licenses
sequencelengths
1
3
version
stringclasses
677 values
tree_hash
stringlengths
40
40
path
stringclasses
1 value
type
stringclasses
2 values
size
stringlengths
2
8
text
stringlengths
25
67.1M
package_name
stringlengths
2
41
repo
stringlengths
33
86
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1686
import ..UncertainValues.TheoreticalDistributionScalarValue import ..UncertainValues.UncertainScalarBinomialDistributed import ..UncertainValues.UncertainScalarBetaBinomialDistributed import ..UncertainValues: AbstractUncertainValue import Distributions.pdf import ..SamplingConstraints: SamplingConstraint, constrain function get_density(uv::AbstractUncertainValue) some_sample = resample(uv, 10000) xmin = minimum(some_sample) * 0.97 xmax = maximum(some_sample) * 1.03 step = (xmax-xmin)/300 xvals = xmin:step:xmax+step density = pdf.(uv.distribution, xvals) xvals, density ./ sum(density) end function get_density(uv::UncertainScalarBinomialDistributed) some_sample = resample(uv, 10000) xmin = minimum(some_sample) xmax = maximum(some_sample) xvals = xmin:1:xmax density = pdf.(uv.distribution, xvals) xvals, density ./ sum(density) end function get_density(uv::UncertainScalarBetaBinomialDistributed) some_sample = resample(uv, 10000) xmin = minimum(some_sample) xmax = maximum(some_sample) xvals = xmin:1:xmax density = pdf.(uv.distribution, xvals) xvals, density ./ sum(density) end @recipe function plot_theoretical(uv::TheoreticalDistributionScalarValue, density = true, n_samples = 1000) if density @series begin get_density(uv) end else @series begin label --> "" resample(uv, n_samples) end end end @recipe function plot_theoretical(uv::TheoreticalDistributionScalarValue, constraint::SamplingConstraint, density = true, n_samples = 1000) cuv = constrain(uv, constraint) if density @series begin get_density(cuv) end else @series begin label --> "" resample(cuv, n_samples) end end end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
633
import ..UncertainDatasets: AbstractUncertainValueDataset import ..UncertainStatistics: mean, std, median, quantile @recipe function f(udata::Vector{AbstractUncertainValue}, quants::Vector{Float64} = [0.33, 0.67]) n_points = length(udata) for i = 1:n_points med = median(udata[i], 10000) lower = quantile(udata[i], minimum(quants), 10000) upper = quantile(udata[i], maximum(quants), 10000) @series begin seriescolor --> :black label --> "" yerror --> ([med - lower], [upper - med]) [i], [med] end end end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
4703
using Reexport @reexport module Resampling import Interpolations import Interpolations: Linear, BoundaryCondition import ..bin import ..UVAL_COLLECTION_TYPES import ..UncertainDatasets: AbstractUncertainValueDataset, UncertainIndexValueDataset, AbstractUncertainDataset import ..UncertainValues: UncertainValue, AbstractUncertainValue function resample end ################################### # Resampling schemes ################################### include("resampling_schemes/AbstractUncertainDataResampling.jl") include("resampling_schemes/BinnedResamplings.jl") include("resampling_schemes/ConstrainedResampling.jl") include("resampling_schemes/ConstrainedValueResampling.jl") include("resampling_schemes/ConstrainedIndexValueResampling.jl") include("resampling_schemes/SequentialResampling.jl") include("resampling_schemes/SequentialInterpolatedResampling.jl") include("resampling_schemes/binned_resamplings.jl") include("resampling_schemes/InterpolateAndBin.jl") include("resampling_schemes/RandomSequences.jl") # Extend some methods to allow easier resampling. include("resampling_with_schemes/constrain_with_schemes.jl") ################################### # Resampling uncertain values ################################### # Uncertain values based on distributions include("uncertain_values/resample_uncertainvalues_distributions.jl") # With constraints include("uncertain_values/resample_uncertainvalues_theoretical.jl") include("uncertain_values/resample_uncertainvalues_theoretical_withconstraints.jl") include("uncertain_values/resample_uncertainvalues_kde.jl") include("uncertain_values/resample_uncertainvalues_kde_withconstraints.jl") include("uncertain_values/resample_certainvalues.jl") include("uncertain_values/resample_uncertainvalues_populations.jl") include("uncertain_values/resample_measurements.jl") ######################################### # Resampling tuples of uncertain values ######################################### include("uncertain_tuples/uncertain_tuples.jl") ######################################### # Resampling vectors of uncertain values ######################################### include("uncertain_values/resampling_vector_uncertainvalues.jl") ################################### # Resampling uncertain datasets ################################### # Element-wise resampling for all subtypes of AbstractUncertainValueDataset include("uncertain_dataset/resample_abstractuncertainvaluedataset_elwise.jl") # Specialized resampling for each type of dataset. include("uncertain_dataset/resample_uncertaindataset_index.jl") include("uncertain_dataset/resample_uncertaindataset_value.jl") include("uncertain_dataset/resample_uncertaindataset_indexvalue.jl") # Resampling vectors of uncertain values include("uncertain_vectors/resample_uncertain_vectors.jl") ######################################### # Ordered resampling ######################################### include("ordered_resampling/resample_sequential.jl") include("ordered_resampling/strictlyincreasing.jl") include("ordered_resampling/strictlydecreasing.jl") ######################################### # Resampling with interpolation ######################################### include("resampling_with_interpolation/resample_linear_interpolation.jl") ################################ # Apply function with resampling ################################ include("apply_func.jl") ################################ # Resampling in-place ################################ include("resampling_inplace.jl") ################################ # Resampling with schemes ################################ include("resampling_with_schemes/resampling_schemes_binned.jl") include("resampling_with_schemes/resampling_schemes_interpolated_binned.jl") include("resampling_with_schemes/resampling_schemes_constrained.jl") include("resampling_with_schemes/resampling_schemes_sequential.jl") ################################ # Interpolation ################################ include("binning/bin_BinnedResampling.jl") include("binning/bin_BinnedWeightedResampling.jl") export resample, resample!, resample_elwise, bin end # module """ Resampling A module defining resampling methods for uncertain values defined in the `UncertainValues` module and uncertain datasets defined in the `UncertainDatasets` module. """ Resampling
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2788
""" resample(f::Function, x::AbstractUncertainValue, n::Int, args...; kwargs...) Draw an `n`-element sample from `x` according to its furnishing distribution, then call `f(x_draw, args...; kwargs...)` on the length-`n` draws. """ function resample(f::Function, x::T, n::Int, args...; kwargs...) where T <: AbstractUncertainValue draw = resample(x, n) f(draw, args...; kwargs...) end """ resample(f::Function, x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int, args...; kwargs...) Draw an `n`-element sample from `x` according to its furnishing distribution, and draw an `n`-element sample from `y` according to its furnishing distribution. Then, call `f(x_draw, y_draw, args...; kwargs...)` on the length-`n` draws. """ function resample(f::Function, x::T1, y::T2, n::Int, args...; kwargs...) where {T1 <: AbstractUncertainValue, T2 <: AbstractUncertainValue} draw_x = float.(resample(x, n)) draw_y = float.(resample(y, n)) f(draw_x, draw_y, args...; kwargs...) end """ resample(f::Function, x::UVAL_COLLECTION_TYPES, n::Int, args...; kwargs...) Resample the elements of `x` according to their furnishing uncertain values, yielding a length-`l` realisation of `x` if `length(x) = l`. The elements of `x` are resampled independently, assuming no sequential dependence between the elements. Then, call `f(x, args...; kwargs...)` on the length-`l` sample with the given `args` and `kwargs`. This process is repeated `n` times, yielding a length-`n` distribution of evaluations of `f`. """ function resample(f::Function, x::T, n::Int, args...; kwargs...) where { T <: UVAL_COLLECTION_TYPES} draws = resample(x, n) [f(draw, args...; kwargs...) for draw in draws] end """ resample(f::Function, x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int, args...; kwargs...) Resample the elements of `x` according to their furnishing uncertain values, yielding a length-`l` realisation of `x` if `length(x) = l`. Then, do the same for `y`. The elements of `x` and `y` are resampled independently, assuming no sequential dependence between the elements of neither `x` nor `y`. Then, call `f(x_draw, y_draw, args..., kwargs...)` on the length-`l` samples `x_draw` and `y_draw`. This process is repeated `n` times, yielding a length-`n` distribution of evaluations of `f`. """ function resample(f::Function, x::TX, y::TY, n::Int, args...; kwargs...) where { TX <: UVAL_COLLECTION_TYPES, TY <: UVAL_COLLECTION_TYPES} f_vals = zeros(Float64, n) for i = 1:n draw_x = float.(resample(x)) draw_y = float.(resample(y)) f_vals[i] = f(draw_x, draw_y, args...; kwargs...) end return f_vals end export resample
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
5823
import StaticArrays: FieldVector, MVector """ resample!(v::AbstractArray{T, 1}, x::AbstractUncertainValue) resample!(v::MVector{N, T}, x::AbstractUncertainValue) where {N, T} resample!(v::FieldVector{N, T}, x::AbstractUncertainValue) where {N, T} resample!(v::MVector{N, T}, x::Vararg{AbstractUncertainValue, N}) where {N, T} resample!(v::FieldVector{N, T}, x::Vararg{AbstractUncertainValue, N}) where {N, T} resample!(v::AbstractArray{T, 1}, x::UVAL_COLLECTION_TYPES) where T resample!(v::AbstractArray{T, 2}, x::UVAL_COLLECTION_TYPES) where T resample!(idxs::AbstractArray{T, 1}, vals::AbstractArray{T, 1}, x::AbstractUncertainIndexValueDataset) where T resample!(idxs::AbstractArray{T, 2}, vals::AbstractArray{T, 2}, x::AbstractUncertainIndexValueDataset) where T Resample a uncertain value `x`, or a collection of uncertain values `x`, into a pre-allocated container `v`. ## Uncertain values - If `x` is a single uncertain value, and `v` is vector-like, then fill `v` with `N` draws of `x`. Works with vectors of length `N`, `MVector{N, T}`s or `FieldVector{N, T}`s. ## Uncertain collections Uncertain collections may be a `Vector{AbstractUncertainValue}` of length `N`, an `AbstractUncertainValueDataset`, or an `NTuple{N, AbstractUncertainValue}`. See also [`UVAL_COLLECTION_TYPES`](@ref). - If `x` is a collection of uncertain values and `v` is vector-like, then fill `v[i]` with a draw of `x[i]` for `i = 1:N`. - If `x` is a collection of uncertain values and `v` is a 2D-array, then fill the `i`-th column of `v` with `length(x)` draws of the `i`-th uncertain value in `x`. ## Uncertain index-value collections - If two mutable vector-like containers, `idxs` and `vals`, are provided along with an uncertain index-value dataset `x`, then fill `idxs[i]` with a random draw from `x.indices[i]` and fill `vals[i]` with a random draw from `x.values[i]`. - If two mutable matrix-like containers, `idxs` and `vals` are provided along with an uncertain index-value dataset `x` (where the number of columns in both `idxs` and `vals` matches `length(x)`), then fill the `i`-th column of `idxs` with `size(idxs, 1)` draws from `x.indices[i]`, and fill the `i`-th column of `vals` with `size(idxs, 1)` draws from `x.values[i]`. """ function resample! end function resample!(v, x::AbstractUncertainValue) v[:] = rand(x, length(v)) return v end function resample!(v, x::UVAL_COLLECTION_TYPES) where T for i in eachindex(v) @inbounds v[i] = rand(x[i]) end return v end function resample!(v::AbstractArray{T, 2}, x::UVAL_COLLECTION_TYPES) where T # The i-th column is filled with random values from the ith uncertain # value in the collection. n_draws = size(v, 1) n_vals = length(x) for i in 1:n_vals v[:, i] = rand(x[i], n_draws) end return v end function resample!(idxs::Vector{T}, vals::Vector{T}, x::AbstractUncertainIndexValueDataset) where T if !(length(idxs) == length(vals) == length(x)) error("`length(idxs) == length(vals) == length(x)` evaluated to false") end for i in eachindex(idxs) @inbounds idxs[i] = rand(x.indices[i]) @inbounds vals[i] = rand(x.values[i]) end return idxs, vals end function resample!(idxs::AbstractArray{T, 2}, vals::AbstractArray{T, 2}, x::AbstractUncertainIndexValueDataset) where T if !(size(idxs, 2) == size(vals, 2) == length(x)) error("`length(idxs) == length(vals) == length(x)` evaluated to false") end n_draws = size(idxs, 1) n_uvals = length(x) # The i-th column in `idxs` is filled with random values from the ith uncertain # index in the collection, and vice versa for `vals`. for i in 1:n_uvals @inbounds idxs[:, i] = rand(x.indices[i], n_draws) @inbounds vals[:, i] = rand(x.values[i], n_draws) end return idxs, vals end ################################################################################# # Multiple draws of a single uncertain value into mutable vector-like containers. ################################################################################# function resample!(v::MVector{N, T}, x::AbstractUncertainValue) where {N, T} v[:] = resample(x, N) return v end function resample!(v::FieldVector{N, T}, x::AbstractUncertainValue) where {N, T} v[:] = resample(x, N) return v end ########################################################################################## # A single draw of `N` uncertain values into a length-`N` a mutable vector-like container. ########################################################################################## function resample!(v::Vector{T}, x::NTuple{N, AbstractUncertainValue}) where {N, T} if length(v) != N throw(ArgumentError("length(v) == $(length(v)), has to match the number of uncertain values ($N)")) end for i = 1:N @inbounds v[i] = resample(x[i]) end return v end function resample!(v::MVector{N, T}, x::Vararg{AbstractUncertainValue, N}) where {N, T} @inbounds for i = 1:N v[i] = resample(x[i]) end return v end function resample!(v::MVector{N, T}, x::NTuple{N, AbstractUncertainValue}) where {N, T} for i = 1:N @inbounds v[i] = resample(x[i]) end return v end function resample!(v::FieldVector{N, T}, x::Vararg{AbstractUncertainValue, N}) where {N, T} @inbounds for i = 1:N v[i] = resample(x[i]) end return v end function resample!(v::FieldVector{N, T}, x::NTuple{N, AbstractUncertainValue}) where {N, T} for i = 1:N @inbounds v[i] = resample(x[i]) end return v end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
7178
import KernelDensity: UnivariateKDE, default_bandwidth, kde import Distributions: Distribution """ bin(x::AbstractUncertainIndexValueDataset, binning::BinnedResampling{UncertainScalarKDE}) -> UncertainIndexValueDataset bin(x::AbstractUncertainIndexValueDataset, binning::BinnedResampling{UncertainScalarPopulation}) -> UncertainIndexValueDataset Resample every element of `x` the number of times given by `binning.n`. After resampling, distribute the values according to their indices, into the bins given by `binning.left_bin_edges`. ## Returns Returns an `UncertainIndexValueDataset`. Indices are assumed to be uniformly distributed within each bin, and are represented as `CertainValue`s at the bin centers. Values of the dataset have different representations depending on what `binning` is: - If `binning isa BinnedResampling{UncertainScalarKDE}`, then values in each bin are represented by a kernel density estimate to the distribution of the resampled values whose resampled indices fall in that bin. - If `binning isa BinnedResampling{UncertainScalarPopulation}`, then values in each bin are represented by equiprobable populations consisting of the resampled values whose resampled indices fall in the bins. """ function bin(x::AbstractUncertainIndexValueDataset, binning::BinnedResampling); end """ bin(x::AbstractUncertainIndexValueDataset, binning::BinnedResampling{RawValues}) -> Tuple(Vector, Vector{Vector}) Resample every element of `x` the number of times given by `binning.n`. After resampling, distribute the values according to their indices, into the `N` bins given by `binning.left_bin_edges`. ## Returns Return a tuple containing the `N` different bin centers and a `N`-length vector of resampled values whose resampled indices fall in the `N` different bins. ## Example ```julia # Some example data with unevenly spaced time indices npts = 300 time, vals = sort(rand(1:1000, npts)), rand(npts) # Add uncertainties to indices and values, and represent as # UncertainIndexValueDataset utime = [UncertainValue(Normal, t, 10) for t in time] uvals = [UncertainValue(Normal, v, 0.1) for v in vals] udata = UncertainIndexValueDataset(utime, uvals) # Bin data into fall in 25 time step wide time bins ranging # from time indices 100 to 900 and return a vector of raw # values for each bin. Do this by resampling each uncertain # data point 10000 times and distributing those draws among # the bins. left_bin_edges = 100:25:900 n_draws = 10000 binning = BinnedResampling(RawValues, left_bin_edges, n_draws) bin_centers, bin_draws = bin(udata, binning) ``` """ function bin(x::AbstractUncertainIndexValueDataset, binning::BinnedResampling{RawValues}; nan_threshold = 0.0) # Pre-allocate some arrays into which we resample the values of the # index and value populations. idxs = fill(NaN, binning.n) vals = fill(NaN, binning.n) perminds = zeros(Int, binning.n) sorted_idxs = fill(NaN, binning.n) sorted_vals = fill(NaN, binning.n) bin_edges = binning.left_bin_edges n_bins = length(bin_edges) - 1 # Used to compute the index of the bin into which a draw belongs mini = minimum(binning.left_bin_edges) s = step(binning.left_bin_edges) # Empty vectors that will contain draws. binvecs = [Vector{Float64}(undef, 0) for i = 1:n_bins] #[sizehint!(bv, resampling.n*n_bins) for bv in binvecs] @inbounds for (j, (idx, val)) in enumerate(zip(x.indices, x.values)) # Resample the j-th index and j-th value resample!(idxs, idx) resample!(vals, val) # Get the vector that sorts the index vector, and use that to # sort the draws. sortperm!(perminds, idxs) sorted_idxs .= idxs[perminds] sorted_vals .= vals[perminds] # The vectors above are sorted sorted, so this can be done faster for i in 1:n_bins inbin = findall(bin_edges[i] .<= sorted_idxs .<= bin_edges[i+1]) if length(inbin) > nan_threshold append!(binvecs[i], sorted_vals[inbin]) end end end bin_centers = bin_edges[1:end-1] .+ step(bin_edges)/2 return bin_centers, binvecs end function bin(x::AbstractUncertainIndexValueDataset, binning::BinnedResampling{UncertainScalarPopulation}; nan_threshold = 0) # Get bin center and a vector for each bin containing the values falling in that bin. left_bin_edges = binning.left_bin_edges n = binning.n n_bins = length(left_bin_edges) - 1 bin_centers, binvecs = bin(x, BinnedResampling(RawValues, left_bin_edges, n)) # Estimate distributions in each bin by kernel density estimation estimated_value_dists = Vector{Union{CertainValue, UncertainScalarPopulation}}(undef, n_bins) binvec_lengths = length.(binvecs) for i in 1:n_bins L = binvec_lengths[i] # If bin contains enough values, represent as population. Otherwise, # set to NaN. if L > nan_threshold probs = Weights(repeat([1/L], L)) estimated_value_dists[i] = UncertainScalarPopulation(binvecs[i], probs) else estimated_value_dists[i] = UncertainValue(NaN) end end new_inds = UncertainIndexDataset(bin_centers) new_vals = UncertainValueDataset(estimated_value_dists) UncertainIndexValueDataset(new_inds, new_vals) end function bin(x::AbstractUncertainIndexValueDataset, binning::BinnedResampling{UncertainScalarKDE}; nan_threshold = 0, kernel::Type{D} = Normal, bw_factor = 4, npoints::Int = 2048) where {K <: UnivariateKDE, D <: Distribution} # Get bin center and a vector for each bin containing the values falling in that bin. left_bin_edges = binning.left_bin_edges n = binning.n n_bins = length(left_bin_edges) - 1 bin_centers, binvecs = bin(x, BinnedResampling(RawValues, left_bin_edges, n)) # Estimate distributions in each bin by kernel density estimation estimated_value_dists = Vector{Union{CertainValue, UncertainScalarKDE}}(undef, n_bins) binvec_lengths = length.(binvecs) for i in 1:n_bins L = binvec_lengths[i] # If bin contains enough values, represent as KDE estimate. Otherwise, # set to NaN. if L > nan_threshold bw = default_bandwidth(binvecs[i]) / bw_factor # Kernel density estimation KDE = kde(binvecs[i], npoints = npoints, kernel = kernel, bandwidth = bw) # Get the x value for which the density is estimated. xrange = KDE.x # Normalise estimated density density = KDE.density ./ sum(KDE.density) estimated_value_dists[i] = UncertainScalarKDE(KDE, binvecs[i], xrange, Weights(density)) else estimated_value_dists[i] = UncertainValue(NaN) end end new_inds = UncertainIndexDataset(bin_centers) new_vals = UncertainValueDataset(estimated_value_dists) UncertainIndexValueDataset(new_inds, new_vals) end export bin
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
7703
import KernelDensity: UnivariateKDE, default_bandwidth, kde import Distributions: Distribution """ bin(x::AbstractUncertainIndexValueDataset, binning::BinnedWeightedResampling{UncertainScalarKDE}) -> UncertainIndexValueDataset bin(x::AbstractUncertainIndexValueDataset, binning::BinnedWeightedResampling{UncertainScalarPopulation}) -> UncertainIndexValueDataset Resample every element of `x` a number of times. After resampling, distribute the values according to their indices, into the `N` bins given by the `N-1`-element grid defined by `binning.left_bin_edges`. In total, `length(x)*binning.n` draws are distributed among the bins. The precise number of times `x[i]` is resampled is given by `binning.weights[i]` (probability weights are always normalised to 1). ## Returns Returns an `UncertainIndexValueDataset`. Indices are assumed to be uniformly distributed within each bin, and are represented as `CertainValue`s at the bin centers. Values of the dataset have different representations depending on what `binning` is: - If `binning isa BinnedWeightedResampling{UncertainScalarKDE}`, then values in each bin are represented by a kernel density estimate to the distribution of the resampled values whose resampled indices fall in that bin. - If `binning isa BinnedWeightedResampling{UncertainScalarPopulation}`, then values in each bin are represented by equiprobable populations consisting of the resampled values whose resampled indices fall in the bins. """ function bin(x::AbstractUncertainIndexValueDataset, binning::BinnedWeightedResampling); end """ bin(x::AbstractUncertainIndexValueDataset, binning::BinnedWeightedResampling{RawValues}) -> Tuple(Vector, Vector{Vector}) Resample every element of `x` a number of times. After resampling, distribute the values according to their indices, into the `N` bins given by the `N-1`-element grid defined by `binning.left_bin_edges`. In total, `length(x)*binning.n` draws are distributed among the bins. The precise number of times `x[i]` is resampled is given by the `binning.weights[i]` (probability weights are always normalised to 1). ## Returns Return a tuple containing the `N` different bin centers and a `N`-length vector of resampled values whose resampled indices fall in the `N` different bins. ## Example ```julia using Plots, UncertainData # Some example data with unevenly spaced time indices function ar1(n::Int, x0 = 0.5, p = 0.3) vals = zeros(n) [vals[i] = vals[i - 1]*p + rand()*0.5 for i = 2:n] return vals end npts = 50 time, vals = sort(rand(1:1000, npts)), ar1(npts) # Add uncertainties to indices and values, and represent as # UncertainIndexValueDataset utime = [UncertainValue(Normal, t, 5) for t in time] uvals = [UncertainValue(Normal, v, 0.03) for v in vals] udata = UncertainIndexValueDataset(utime, uvals) # Bin data into fall in 25 time step wide time bins ranging # from time indices 100 to 900 and return a vector of raw # values for each bin. Do this by resampling each uncertain # data point on average 10000 times and distributing those # draws among the bins. time_grid = 100:40:900 n_draws = 5000 # Let odd-indexed values be three times as likely to be # sampled compared to even-indexed values. wts = Weights([i % 2 == 0 ? 1 : 3 for i = 1:length(udata)]) binning = BinnedWeightedResampling(RawValues, time_grid, wts, n_draws) bin_centers, bin_draws = bin(udata, binning); ``` """ function bin(x::AbstractUncertainIndexValueDataset, binning::BinnedWeightedResampling{RawValues}; nan_threshold = 0.0) # Determine how many times each element of `x` should be resampled # based on the provided weights. wts = binning.weights ./ sum(binning.weights) n_total_draws = binning.n * length(x) Ns = ceil.(Int, n_total_draws .* binning.weights) # Separately convert indices and values to weighted populations pop_inds = UncertainValue(x.indices.indices, wts) pop_vals = UncertainValue(x.values.values, wts) bin_edges = binning.left_bin_edges n_bins = length(bin_edges) - 1 # Used to compute the index of the bin into which a draw belongs mini = minimum(binning.left_bin_edges) s = step(binning.left_bin_edges) # Empty vectors that will contain draws. binvecs = [Vector{Float64}(undef, 0) for i = 1:n_bins] #[sizehint!(bv, n_total_draws) for bv in binvecs] for (i, N) in enumerate(Ns) # Pre-allocate some arrays into which we resample the values of the # index and value populations. idxs = fill(NaN, N) vals = fill(NaN, N) perminds = zeros(Int, N) sorted_idxs = fill(NaN, N) sorted_vals = fill(NaN, N) # Resample the i-th index and i-th value resample!(idxs, pop_inds[i]) resample!(vals, pop_vals[i]) # Get the vector that sorts the index vector, and use that to # sort the draws. sortperm!(perminds, idxs) sorted_idxs .= idxs[perminds] sorted_vals .= vals[perminds] # The vectors above are sorted sorted, so this can be done faster for i in 1:n_bins inbin = findall(bin_edges[i] .<= sorted_idxs .<= bin_edges[i+1]) if length(inbin) > nan_threshold append!(binvecs[i], sorted_vals[inbin]) end end end bin_centers = bin_edges[1:end-1] .+ step(bin_edges)/2 return bin_centers, binvecs end function bin(x::AbstractUncertainIndexValueDataset, binning::BinnedWeightedResampling{UncertainScalarKDE}; nan_threshold = 0.0) bin_edges = binning.left_bin_edges n = binning.n wts = binning.weights binning = BinnedWeightedResampling(RawValues, bin_edges, wts, n) bin_centers, binvecs = bin(x, binning) # Estimate distributions in each bin by kernel density estimation n_bins = length(binning.left_bin_edges) - 1 estimated_value_dists = Vector{Union{CertainValue, UncertainScalarKDE}}(undef, n_bins) for i in 1:n_bins if length(binvecs[i]) > nan_threshold estimated_value_dists[i] = UncertainValue(binvecs[i]) else estimated_value_dists[i] = UncertainValue(NaN) end end new_inds = UncertainIndexDataset(UncertainValue.(bin_edges[1:end-1] .+ step(bin_edges)/2)) new_vals = UncertainValueDataset(estimated_value_dists) UncertainIndexValueDataset(new_inds, new_vals) end function bin(x::AbstractUncertainIndexValueDataset, binning::BinnedWeightedResampling{UncertainScalarPopulation}; nan_threshold = 0.0) bin_edges = binning.left_bin_edges n = binning.n wts = binning.weights binning = BinnedWeightedResampling(RawValues, bin_edges, wts, n) bin_centers, binvecs = bin(x, binning) # Estimate distributions in each bin by kernel density estimation n_bins = length(binning.left_bin_edges) - 1 estimated_value_dists = Vector{Union{CertainValue, UncertainScalarPopulation}}(undef, n_bins) for i in 1:n_bins if length(binvecs[i]) > nan_threshold L = length(binvecs[i]) estimated_value_dists[i] = UncertainValue(binvecs[i], repeat([1 / L], L)) else estimated_value_dists[i] = UncertainValue(NaN) end end new_inds = UncertainIndexDataset(UncertainValue.(bin_edges[1:end-1] .+ step(bin_edges)/2)) new_vals = UncertainValueDataset(estimated_value_dists) UncertainIndexValueDataset(new_inds, new_vals) end import KernelDensity: UnivariateKDE, default_bandwidth, kde import Distributions: Distribution
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
3433
import ..SamplingConstraints: SequentialSamplingConstraint, OrderedSamplingAlgorithm const AUD = AbstractUncertainDataset const SC = Union{SamplingConstraint, Vector{S}} where S <: SamplingConstraint const SEQ = SequentialSamplingConstraint{O} where O <: OrderedSamplingAlgorithm const XD = Union{AbstractUncertainDataset, Vector{<:AbstractUncertainValue}} """ resample(x, ssc::SequentialSamplingConstraint) resample(x, ssc::SequentialSamplingConstraint, c) resample(x::UncertainIndexValueDataset, ssc::SequentialSamplingConstraint, ic, vc) Sample `x` element-wise such that the samples obey the sequential constraints given by `ssc`. Alteratively, apply constrain(s) `c` to `x` *before* sequential sampling is performed. A check is performed before sampling to ensure that such a sequence exists. Before the check is performed, the distributions in `x` are truncated element-wise to the quantiles provided by `c` to ensure they have finite supports. If `x` is an uncertain index-value dataset, then the sequential constraint is only applied to the indices. If one set of additional constraints are added to an uncertain index-value dataset, then they are applied to both the indices and the values. It is also possible to give separate index constraints `ic` and value constraints `vc`. If constraints `c`, or `ic` and `vc`, are given, then `c`/`ic`/`vc` must be either a single constraint, or a vector of constraints where constraints are applied element-wise to the distributions in `x`. resample!(s, x, ssc::SequentialSamplingConstraint, lqs, uqs) The same as above, but store the sampled values a pre-allocated vector `s`, where `length(x) == length(s)`. This avoids excessive memory allocations during repeated resampling. This requires pre-computing the element-wise lower and upper quantiles `lqs` and `uqs` for the initial truncation step. This method *does not* check for the existence of a strictly increasing sequence in `x`. To check that, use [`sequence_exists`](@ref). See also: [`sequence_exists`](@ref), [`StrictlyIncreasing`](@ref), [`StrictlyDecreasing`](@ref), [`StartToEnd`](@ref). ## Examples ```julia N = 100 t = [UncertainValue(Normal, i, 2) for i in 1:N]; resample(t, StrictlyIncreasing(StartToEnd())) ``` ```julia N = 100 t = [UncertainValue(Normal, i, 2) for i in 1:N]; # Verify that an increasing sequence through `t` exists c = StrictlyIncreasing(StartToEnd()) exists, lqs, uqs = sequence_exists(t, c) # Pre-allocate sample vector s = zeros(Float64, N) if exists for i = 1:100 resample!(s, t, c) # Do something with s # ... end end ``` """ function resample(x::XD, ssc::SEQ) exists, lqs, uqs = sequence_exists(x, ssc) exists || error("Sequence does not exist") _draw(x, ssc, lqs, uqs) end function resample!(s, x::XD, ssc::SEQ, lqs, uqs) _draw!(s, x, ssc, lqs, uqs) end resample(udata::XD, ssc::SEQ, c::Union{SC, AbstractVector{<:SC}}) = resample(constrain(udata, c), ssc) resample(udata::UncertainIndexValueDataset, ssc::SEQ) = resample(udata.indices, ssc), resample(udata.values) resample(udata::UncertainIndexValueDataset, ssc::SEQ, c::SC) = resample(constrain(udata.indices, c), ssc), resample(constrain(udata.values, c)) resample(udata::UncertainIndexValueDataset, ssc::SEQ, ic::SC, vc::SC) = resample(constrain(udata.indices, ic), ssc), resample(constrain(udata.values, vc))
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1695
import ..SamplingConstraints: StartToEnd, StrictlyDecreasing, sequence_exists, constrain, TruncateMaximum, TruncateRange, TruncateQuantiles import IntervalArithmetic: interval function _draw!(s, x, c::StrictlyDecreasing{<:StartToEnd}, mins, maxs) L = length(x) # TODO: add slight margin? for i = 1:L if i == 1 lo = maximum(mins[2:end]) truncated_distribution = truncate(x[i], TruncateMinimum(lo)) s[1] = resample(truncated_distribution) end if 1 < i < L hi = min(s[i - 1], maxs[i]) lo = max(mins[i], maximum(mins[i+1:end])) lo <= hi || error("Truncation range invalid for point $i. Got lo < hi ($lo < $hi), which should be impossible.") truncated_distribution = truncate(x[i], TruncateRange(lo, hi)) s[i] = resample(truncated_distribution) end if i == L hi = min(s[i - 1], maxs[i]) truncated_distribution = truncate(x[i], TruncateMaximum(hi)) s[end] = resample(truncated_distribution) end end return s end """ _draw(x, c::StrictlyDecreasing{StartToEnd}, mins, maxs) Sample `x` in a strictly decreasing manner, given pre-computed minimum and maximum values for each distribution in `c`. Implicitly assumes a strictly decreasing sequence exists, but does not check that condition. """ function _draw(x, c::StrictlyDecreasing{<:StartToEnd}, mins, maxs) L = length(x) samples = zeros(Float64, L) # a vector to hold the element-wise samples _draw!(samples, x, c, mins, maxs) end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1798
import ..SamplingConstraints: SamplingConstraint, SequentialSamplingConstraint, OrderedSamplingAlgorithm, StartToEnd, StrictlyIncreasing, sequence_exists, TruncateMinimum, TruncateRange, TruncateQuantiles, constrain export resample import IntervalArithmetic: interval function _draw!(s, x, c::StrictlyIncreasing{<:StartToEnd}, mins, maxs) L = length(x) # TODO: add slight margin? for i = 1:L if i == 1 hi = minimum(maxs[2:end]) truncated_distribution = truncate(x[i], TruncateMaximum(hi)) s[i] = resample(truncated_distribution) end if 1 < i < L lo = max(s[i - 1], mins[i]) hi = min(maxs[i], minimum(maxs[i+1:end])) lo <= hi || error("Truncation range invalid for point $i. Got lo < hi ($lo < $hi), which should be impossible.") truncated_distribution = truncate(x[i], TruncateRange(lo, hi)) s[i] = resample(truncated_distribution) end if i == L lo = max(s[i - 1], mins[i]) truncated_distribution = truncate(x[i], TruncateMinimum(lo)) s[i] = resample(truncated_distribution) end end return s end """ _draw(x, c::StrictlyIncreasing{StartToEnd}, mins, maxs) Sample `x` in a strictly increasing manner, given pre-computed minimum and maximum values for each distribution in `c`. Implicitly assumes a strictly increasing sequence exists, but does not check that condition. """ function _draw(x, c::StrictlyIncreasing{<:StartToEnd}, mins, maxs) L = length(x) samples = zeros(Float64, L) # a vector to hold the element-wise samples _draw!(samples, x, c, mins, maxs) end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
268
# TODO: inherit from StatsBase.AbstractResampling when implemented there """ AbstractUncertainDataResampling An abstract type for all resampling schemes in this package. """ abstract type AbstractUncertainDataResampling end export AbstractUncertainDataResampling
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
832
""" AbstractBinnedResampling Resampling schemes where data are binned. """ abstract type AbstractBinnedResampling <: AbstractUncertainDataResampling end """ AbstractBinnedUncertainValueResampling <: AbstractUncertainDataResampling Resampling schemes where data are binned and the data in each bin are represented as an uncertain value. """ abstract type AbstractBinnedUncertainValueResampling <: AbstractBinnedResampling end """ AbstractBinnedSummarisedResampling <: AbstractUncertainDataResampling Resampling schemes where data are binned and the data in each bin are summarised to a single value (e.g. the mean). """ abstract type AbstractBinnedSummarisedResampling <: AbstractBinnedResampling end export AbstractBinnedResampling, AbstractBinnedSummarisedResampling, AbstractBinnedUncertainValueResampling
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
5689
import ..UncertainDatasets: AbstractUncertainIndexValueDataset import ..SamplingConstraints: SamplingConstraint """ ConstrainedIndexValueResampling(constraints::NTuple{N_DATASETS, NTuple{N_VARIABLES, Union{SamplingConstraint, Vector{<:SamplingConstraint}}}}, n::Int) Indicates that resampling should be performed with constraints on a set of uncertain index-value datasets. See examples for usage. ## Fields - `constraints`. The constraints for the datasets. The constraints are represented as a tuple of length `N_DATASETS`, where the `i`-th tuple element is itself a `N_VARIABLES`-length tuple containing the constraints for the `N_VARIABLES` different variables. See "Indexing" below for details. Constraints for each individual variable must be supplied as either a single sampling constraint, or as a vector of sampling constraints with length matching the length of the variable (`Union{SamplingConstraint, Vector{<:SamplingConstraint}}}`). For example, if the `j`-th variable for the `i`-th dataset contains 352 observations, then `constraints[i, j]` must be either a single sampling constraint (e.g. `TruncateStd(1.1)`) or a vector of 352 different sampling constraints (e.g. `[TruncateStd(1.0 + rand()) for i = 1:352]`). - `n::Int`. The number of draws. ## Indexing Assume `c` is an instance of `ConstrainedIndexValueResampling`. Then - `c[i]` returns the `NTuple` of constraints for the `i`-th dataset, and - `c[i, j]` returns the constraint(s) for the `j`-th variable of the `i`-th dataset. ## Example ### Defining `ConstrainedIndexValueResampling`s. Assume we want to constraints three separate uncertain index-value datasets, with different sampling constraints for the indices and the values for each of the datasets. ```julia # (index constraints, value constraints) for the 1st, 2nd and 3rd datasets c1 = (TruncateStd(1), TruncateStd(1.1)) c2 = (TruncateStd(0.5), TruncateQuantiles(0.1, 0.8)) c3 = (TruncateQuantiles(0.05, 0.95), TruncateQuantiles(0.33, 0.67)) c = ConstrainedIndexValueResampling(c1, c2, c3) ``` Now, - `c[2]` returns the `NTuple` of constraints for the 2nd dataset, and - `c[1, 2]` returns the constraint(s) for the 2nd variable of the 1st dataset. ### Controlling the number of draws The number of draws defaults to 1 if not specified. To indicate that more than one draw should be performed, just input the number of draws before supplying the constraints to the constructor. ``` c1 = (TruncateStd(1), TruncateStd(1.1)) c2 = (TruncateStd(0.5), TruncateQuantiles(0.1, 0.8)) # A single draw c_single = ConstrainedIndexValueResampling(c1, c2) # Multiple (300) draws c_multiple = ConstrainedIndexValueResampling(300, c1, c2) ``` ### Detailed example Let's say we have two uncertain index-value datasets `x` and `y`. We want to constrain the furnishing distributions/population for both the time indices and values, both for `x` and `y`. For `x`, truncate the indices at `0.8` times the standard deviation around their mean, and for `y`, trucate the indices at `1.5` times the standard deviation around their mean. Next, truncate `x`s values at roughly (roughly) at their 20th percentile range, and truncate `y`s values at roughly their 80th percentile range. All this information can be combined in a `ConstrainedIndexValueResampling` instance. This instance can be passed on to any function that accepts uncertain index-value datasets, to indicate that resampling should be performed on truncated versions of the distributions/populations furnishing the datasets. ```julia # some noise, so we don't truncate all furnishing distributions/population at # exactly the same quantiles. r = Uniform(0, 0.01) constraints_x_inds = TruncateStd(0.8) constraints_y_inds = TruncateStd(1.5) constraints_x_vals = [TruncateQuantiles(0.4 + rand(r), 0.6 + rand(r)) for i = 1:length(x)]; constraints_y_vals = [TruncateQuantiles(0.1 + rand(r), 0.9 + rand(r)) for i = 1:length(x)]; cs_x = (constraints_x_inds, constraints_x_vals) cs_y = (constraints_y_inds, constraints_y_vals) resampling = ConstrainedIndexValueResampling(cs_x, cs_y) ``` """ struct ConstrainedIndexValueResampling{N_VARIABLES, N_DATASETS} <: AbstractUncertainDataResampling where {N_VARIABLES, N_DATASETS} constraints::NTuple{N_DATASETS, NTuple{N_VARIABLES, Union{SamplingConstraint, Vector{<:SamplingConstraint}}}} n::Int end ConstrainedIndexValueResampling(constraints::NTuple{N_VARIABLES, Union{SamplingConstraint, Vector{<:SamplingConstraint}}}...) where N_VARIABLES = ConstrainedIndexValueResampling((constraints...,), 1) ConstrainedIndexValueResampling(n::Int, constraints::NTuple{N_VARIABLES, Union{SamplingConstraint, Vector{<:SamplingConstraint}}}...) where N_VARIABLES = ConstrainedIndexValueResampling((constraints...,), n) function Base.show(io::IO, constraints::ConstrainedIndexValueResampling{N_VARIABLES, N_DATASETS}) where {N_VARIABLES, N_DATASETS} s = "$(typeof(constraints)) for $N_DATASETS set(s) of index-value constraints (each a $N_VARIABLES-tuple) where n=$(constraints.n)" println(io, s) end Base.eachindex(c::ConstrainedIndexValueResampling) = Base.OneTo(length(c.constraints)) Base.length(c::ConstrainedIndexValueResampling) = length(c.constraints) Base.firstindex(c::ConstrainedIndexValueResampling) = 1 Base.lastindex(c::ConstrainedIndexValueResampling) = length(c.constraints) Base.getindex(c::ConstrainedIndexValueResampling, i) = c.constraints[i] Base.getindex(c::ConstrainedIndexValueResampling, i, j) = c.constraints[i][j] Base.iterate(c::ConstrainedIndexValueResampling, state = 1) = iterate(c.constraints, state) export ConstrainedIndexValueResampling
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1387
import ..SamplingConstraints: SamplingConstraint """ ConstrainedResampling(constraint::NTuple{N, Union{SamplingConstraint, Vector{<:SamplingConstraint}}}) Indicates that resampling should be performed with constraints on the uncertain values. """ struct ConstrainedResampling{N} <: AbstractUncertainDataResampling where N constraints::NTuple{N, Union{SamplingConstraint, Vector{<:SamplingConstraint}}} end ConstrainedResampling(constraints::Union{SamplingConstraint, Vector{<:SamplingConstraint}}...) = ConstrainedResampling(constraints) ConstrainedResampling(constraints::Vector{Union{SamplingConstraint, Vector{<:SamplingConstraint}}}) = ConstrainedResampling((constraints...)) Base.length(resampling::ConstrainedResampling) = length(resampling.constraints) Base.getindex(resampling::ConstrainedResampling, i) = resampling.constraints[i] Base.firstindex(resampling::ConstrainedResampling) = 1 Base.lastindex(resampling::ConstrainedResampling) = length(resampling) function summarise(resampling::ConstrainedResampling) _type = typeof(resampling) constraint_types = [typeof(c) for c in resampling.constraints] strs = ["$constraint" for constraint in constraint_types] return "$_type" * "(" * join(strs, ", ") * ")" end Base.show(io::IO, resampling::ConstrainedResampling) = print(io, summarise(resampling)) export ConstrainedResampling
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
3861
CONSTRAINT_TYPES = Union{T, Vector{T}} where T <: SamplingConstraint """ ConstrainedValueResampling{N_DATASETS} Indicates that resampling should be done with constraints on the furnishing distributions/populations. ## Fields - `constraints`. The constraints for the datasets. The constraints are represented as a tuple of length `N_DATASETS`, where the `i`-th tuple element contains the constraints for that dataset. Constraints for each dataset must be supplied as either a single sampling constraint, or as a vector of sampling constraints with length matching the length of the dataset (`Union{SamplingConstraint, Vector{<:SamplingConstraint}}}`). For example, if the `i`-th dataset contains 352 observations, then `constraints[i]` must be either a single sampling constraint (e.g. `TruncateStd(1.1)`) or a vector of 352 different sampling constraints (e.g. `[TruncateStd(1.0 + rand()) for i = 1:352]`). - `n::Int`. The number of draws. ## Example Assume we have three collections of uncertain values of, each of length `L = 50`. These should be resampled `250` times. Before resampling, however, the distributions/populations furnishing the uncertain values should be truncated: - For the first collection, truncate each value at `1.5` times its standard deviation around its mean. This could simulate measurement errors from an instrument that yields stable measurements whose errors are normally distributed, but for which we are not interested in outliers or values beyond `1.5` standard devations for our analyses. - For the second collection, truncate each value at the `80`th percentile range. This could simulate measurement errors from an instrument that yields stable measurements, whose errors are not normally distributed, so that confidence intervals are better to use than standard deviations. In this case, we're not interested in outliers, and therefore exclude values smaller than the `10`th percentile and larger than the `90`th percentile of the data. - For the third collection, truncate the `i`-th value at an fraction of its standard deviation around the mean slightly larger than at the `i-1`-th value, so that the standard deviation ranges from `0.5` to `0.5 + L/100`. This could simulate, for example, an instrument whose measurement error increases over time. ```julia L = 50 constraints_d1 = TruncateStd(1.5) constraints_d2 = TruncateQuantiles(0.1, 0.9) constraints_d3 = [TruncateStd(0.5 + i/100) for i = 1:L] ``` """ struct ConstrainedValueResampling{N} <: AbstractUncertainDataResampling constraints::Tuple{Vararg{CONSTRAINT_TYPES, N}} n::Int end # It's tedious for the user to always provide a tuple, so unpack a variable # number of constraints into a tuple, then call the original constructor. function ConstrainedValueResampling(constraints::CONSTRAINT_TYPES...) where N ConstrainedValueResampling((constraints...,), 1) end function ConstrainedValueResampling(n::Int, constraints::CONSTRAINT_TYPES...) where N ConstrainedValueResampling((constraints...,), n) end Broadcast.broadcastable(c::ConstrainedValueResampling) = Ref(c) Base.length(c::ConstrainedValueResampling) = length(c.constraints) Base.firstindex(c::ConstrainedValueResampling) = 1 Base.lastindex(c::ConstrainedValueResampling) = length(c) Base.getindex(c::ConstrainedValueResampling, i) = c.constraints[i] Base.iterate(c::ConstrainedValueResampling, state = 1) = iterate(c.constraints, state) Base.eachindex(c::ConstrainedValueResampling) = Base.OneTo(length(c.constraints)) function Base.show(io::IO, constraints::ConstrainedValueResampling{N_DATASETS}) where {N_DATASETS} s = "$(typeof(constraints)) for $N_DATASETS set(s) of value constraints, where n=$(constraints.n)" println(io, s) end export ConstrainedValueResampling
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1414
""" InterpolateAndBin{L}(f::Function, left_bin_edges, intp::L, intp_grid, extrapolation_bc::Union{<:Real, Interpolations.BoundaryCondition}) Indicates that a dataset consisting of both indices and values should first be interpolated to the `intp_grid` grid using the provided `intp` scheme (e.g. `Linear()`). After interpolating, assign the interpolated values to the bins defined by `left_bin_edges` and summarise the values falling in each bin using the summary function `f` (e.g. `mean`). ## Example ```julia using UncertainData, Interpolations, StatsBase # Assume we have the following unevenly spaced data with some `NaN` values T = 100 time = sample(1.0:T*5, T, ordered = true, replace = false) y1 = rand(T) time[rand(1:T, 10)] .= NaN y1[rand(1:T, 10)] .= NaN # We want to first intepolate the dataset linearly to a regular time grid # with steps of `0.1` time units. intp = Linear() intp_grid = 0:0.1:1000 extrapolation_bc = Flat(OnGrid()) # Then, bin the dataset in time bins `50` time units wide, collect all # values in each bin and summarise them using `f`. f = mean left_bin_edges = 0:50:1000 r = InterpolateAndBin(f, left_bin_edges, intp, intp_grid, extrapolation_bc) ``` """ struct InterpolateAndBin{L} f::Function left_bin_edges intp::L # Linear intp_grid extrapolation_bc::Union{<:Real, Interpolations.BoundaryCondition} end export InterpolateAndBin
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
458
""" RandomSequences(n::Int, sequence_length) Indicates that resampling should be performed on discrete, continuous subsets of a dataset. The lengths of each of the `n` subsets is indicated by `sequence_length`, which should be an integer (fixed sequence length) or an iterable of integers (sequences may have different lengths). """ struct RandomSequences <: AbstractUncertainDataResampling n::Int sequence_length end export RandomSequences
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1264
import ..SamplingConstraints: SequentialSamplingConstraint import ..InterpolationAndGrids: InterpolationGrid """ SequentialInterpolatedResampling{SequentialSamplingConstraint, InterpolationGrid} Indicates that resampling should be done by first resampling sequentially, then interpolating the sample to an interpolation grid. ## Fields - `sequential_constraint::SequentialSamplingConstraint`. The sequential sampling constraint, for example `StrictlyIncreasing()`. - `grid::InterpolationGrid`. The grid onto which the resampled draw (generated according to the sequential constraint) is interpolated, for example `RegularGrid(0, 100, 2.5)`. ## Examples For example, `SequentialInterpolatedResampling(StrictlyIncreasing(), RegularGrid(0:2:100))` indicates a sequential draw that is then interpolated to the grid 0:2:100. """ struct SequentialInterpolatedResampling{S, G} <: AbstractUncertainDataResampling where { S <: SequentialSamplingConstraint, G <: InterpolationGrid} sequential_constraint::S grid::G end function Base.show(io::IO, resampling::SequentialInterpolatedResampling{S, G}) where {S, G} print(io, "SequentialInterpolatedResampling{$S, $(resampling.grid)}") end export SequentialInterpolatedResampling
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
714
import ..SamplingConstraints: SequentialSamplingConstraint """ SequentialResampling{SequentialSamplingConstraint} Indicates that resampling should be done by resampling sequentially. ## Fields - `sequential_constraint::SequentialSamplingConstraint`. The sequential sampling constraint, for example `StrictlyIncreasing()`. ## Examples ```julia SequentialResampling(StrictlyIncreasing()) ``` """ struct SequentialResampling{S} <: AbstractUncertainDataResampling where { S <: SequentialSamplingConstraint} sequential_constraint::S end function Base.show(io::IO, resampling::SequentialResampling{S}) where {S} print(io, "SequentialResampling{$S}") end export SequentialResampling
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
7357
import ..UncertainValues: UncertainScalarPopulation, UncertainScalarKDE """ RawValues Indicates that instead of summarising each bin, vectors of raw values should be returned for a binned resampling. """ struct RawValues end const BINREPR = Union{UncertainScalarKDE, UncertainScalarPopulation, RawValues} """ BinnedResampling(left_bin_edges, n::Int; bin_repr = UncertainScalarKDE) BinnedResampling(UncertainScalarKDE, left_bin_edges, n::Int) BinnedResampling(UncertainScalarPopulation, left_bin_edges, n::Int) BinnedResampling(RawValues, left_bin_edges, n::Int) Indicates that binned resampling should be performed. ## Fields - `left_bin_edges`. The left edgepoints of the bins. Either a range or some custom type which implements `minimum` and `step` methods. - `n`. The number of draws. Each point in the dataset is sampled `n` times. If there are `m` points in the dataset, then the total number of draws is `n*m`. - `bin_repr`. A type of uncertain value indicating how each bin should be summarised (`UncertainScalarKDE` for kernel density estimated distributions in each bin, `UncertainScalarPopulation` to represent values in each bin as an equiprobable population) or not summarise but return raw values falling in each bin (`RawValues`). ## Examples ```julia using UncertainData # Resample on a grid from 0 to 200 in steps of 20 grid = 0:20:200 # The number of samples per point in the dataset n_draws = 10000 # Create the resampling scheme. Use kernel density estimates to distribution # in each bin. resampling = BinnedResampling(grid, n_draws, bin_repr = UncertainScalarKDE) # Represent each bin as an equiprobably population resampling = BinnedResampling(grid, n_draws, bin_repr = UncertainScalarPopulation) # Keep raw values for each bin (essentially the same as UncertainScalarPopulation, # but avoids storing an additional vector of weights for the population members). resampling = BinnedResampling(grid, n_draws, bin_repr = RawValues) ``` """ Base.@kwdef struct BinnedResampling{R, B} <: AbstractBinnedUncertainValueResampling where {R <: BINREPR, B} bin_repr::Type{R} = UncertainScalarKDE left_bin_edges::B n::Int end BinnedResampling(left_bin_edges, n::Int; bin_repr = UncertainScalarKDE) = BinnedResampling(bin_repr, left_bin_edges, n) function Base.show(io::IO, b::BinnedResampling{R, B}) where {R, B} T = typeof(b) println(io, "$(T.name){bin_repr: $R, left_bin_edges: $B, n=$(b.n)}") end """ BinnedWeightedResampling(left_bin_edges, weights, n::Int; bin_repr = UncertainScalarKDE) BinnedWeightedResampling(UncertainScalarKDE, left_bin_edges, weights, n::Int) BinnedWeightedResampling(UncertainScalarPopulation, left_bin_edges, weights, n::Int) BinnedWeightedResampling(RawValues, left_bin_edges, weights, n::Int) Indicates that binned resampling should be performed, but weighting each point in the dataset differently. ## Fields - `left_bin_edges`. The left edgepoints of the bins. Either a range or some custom type which implements `minimum` and `step` methods. - `weights`. The relative probability weights assigned to each point. - `n`. The total number of draws. These are distributed among the points of the dataset according to `weights`. - `bin_repr`. A type of uncertain value indicating how each bin should be summarised (`UncertainScalarKDE` for kernel density estimated distributions in each bin, `UncertainScalarPopulation` to represent values in each bin as an equiprobable population) or not summarise but return raw values falling in each bin (`RawValues`). ## Examples ```julia using UncertainData, StatsBase # Resample on a grid from 0 to 200 in steps of 20 grid = 0:20:200 # Assume our dataset has 50 points. We'll assign random weights to them. wts = Weights(rand(50)) # The total number of draws (on average 1000000/50 = 20000 draws per point # if weights are equal) n_draws = 10000000 # Create the resampling scheme. Use kernel density estimates to distribution # in each bin. resampling = BinnedWeightedResampling(grid, wts, n_draws, bin_repr = UncertainScalarKDE) # Represent each bin as an equiprobably population resampling = BinnedWeightedResampling(grid, wts, n_draws, bin_repr = UncertainScalarPopulation) # Keep raw values for each bin (essentially the same as UncertainScalarPopulation, # but avoids storing an additional vector of weights for the population members). resampling = BinnedWeightedResampling(grid, wts n_draws, bin_repr = RawValues) ``` """ Base.@kwdef struct BinnedWeightedResampling{R, B, W} <: AbstractBinnedUncertainValueResampling where {R <: BINREPR, B, W} bin_repr::Type{R} = UncertainScalarKDE left_bin_edges::B weights::W n::Int end BinnedWeightedResampling(left_bin_edges, weights, n::Int; bin_repr = UncertainScalarKDE) = BinnedWeightedResampling(bin_repr, left_bin_edges, weights, n) function Base.show(io::IO, b::BinnedWeightedResampling{R, B, W}) where {R, B, W} T = typeof(b) println(io, "$(T.name){bin_repr: $R, left_bin_edges: $B, weights: $W, n=$(b.n)}") end """ BinnedMeanResampling Binned resampling where each bin is summarised using the mean of all draws falling in that bin. ## Fields - `left_bin_edges`. The left edgepoints of the bins. Either a range or some custom type which implements `minimum` and `step` methods. - `n`. The number of draws. Each point in the dataset is sampled `n` times. If there are `m` points in the dataset, then the total number of draws is `n*m`. ## Examples ```julia using UncertainData # Resample on a grid from 0 to 200 in steps of 20 grid = 0:20:200 # The number of samples per point in the dataset n_draws = 10000 # Create the resampling scheme resampling = BinnedMeanResampling(grid, n_draws) ``` """ struct BinnedMeanResampling{B} <: AbstractBinnedSummarisedResampling left_bin_edges::B n::Int end function Base.show(io::IO, b::BinnedMeanResampling{B}) where {B} T = typeof(b) println(io, "$(T.name){left_bin_edges=$(b.left_bin_edges), n=$(b.n)}") end """ BinnedMeanWeightedResampling Binned resampling where each bin is summarised using the mean of all draws falling in that bin. Points in the dataset are sampled with probabilities according to `weights`. ## Fields - `left_bin_edges`. The left edgepoints of the bins. Either a range or some custom type which implements `minimum` and `step` methods. - `weights`. The relative probability weights assigned to each point. - `n`. The total number of draws. These are distributed among the points of the dataset according to `weights`. ## Examples ```julia using UncertainData, StatsBase # Resample on a grid from 0 to 200 in steps of 20 grid = 0:20:200 # Assume our dataset has 50 points. We'll assign random weights to them. wts = Weights(rand(50)) # The total number of draws (on average 1000000/50 = 20000 draws per point # if weights are equal) n_draws = 10000000 # Create the resampling scheme resampling = BinnedMeanWeightedResampling(grid, wts, n_draws) ``` """ struct BinnedMeanWeightedResampling{B} <: AbstractBinnedSummarisedResampling left_bin_edges::B weights n::Int end export BinnedResampling, BinnedWeightedResampling, BinnedMeanResampling, BinnedMeanWeightedResampling, RawValues
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
3865
import ..InterpolationAndGrids: linear_interpolation, InterpolationGrid import ..UncertainDatasets: UncertainIndexValueDataset, AbstractUncertainValueDataset import ..SamplingConstraints: TruncateQuantiles """ resample(udata::UncertainIndexValueDataset, grid::InterpolationGrid; trunc::TruncateQuantiles = TruncateQuantiles(0.001, 0.999)) Draw a realization of `udata`, then interpolate the data values to `grid`. To avoid very large spans of interpolation, the uncertain indices are truncated to some large quantile range. Values are not truncated. """ function resample(udata::UncertainIndexValueDataset, grid::InterpolationGrid; trunc::TruncateQuantiles = TruncateQuantiles(0.001, 0.999)) # Constrain data so that all furnishing distributions for the indices have finite # supports. constrained_udata = UncertainIndexValueDataset( constrain(udata.indices, trunc), udata.values ) # Resample the dataset inds, vals = resample(constrained_udata) # Interpolate to the provided grid intp = linear_interpolation(inds, vals, extrapolation_bc = grid.extrapolation_bc) # Return grid indices and the interpolated points. range(grid), intp(grid.min:grid.step:grid.max) end """ resample(udata::UncertainIndexValueDataset, sequential_constraint::SequentialSamplingConstraint, grid::InterpolationGrid; trunc::TruncateQuantiles = TruncateQuantiles(0.001, 0.999)) Draw a realization of `udata`, enforcing a `sequential_constraint` on the indices. Then, interpolate the values of the realization to the provided grid of indices (`grid`). To avoid very large spans of interpolation, the uncertain indices are truncated to some large quantile range. Values are not truncated. """ function resample(udata::UncertainIndexValueDataset, sequential_constraint::SequentialSamplingConstraint, grid::InterpolationGrid; trunc::TruncateQuantiles = TruncateQuantiles(0.001, 0.999)) # Constrain data so that all furnishing distributions for the indices have finite # supports. constrained_udata = UncertainIndexValueDataset( constrain(udata.indices, trunc), udata.values ) # Resample the dataset with the sequential constraint. inds, vals = resample(constrained_udata, sequential_constraint) # Interpolate to the desired grid. intp = linear_interpolation(inds, vals, extrapolation_bc = grid.extrapolation_bc) # Return grid indices and the interpolated points. range(grid), intp(grid.min:grid.step:grid.max) end function resample(udata::UncertainIndexValueDataset, sequential_constraint::SequentialSamplingConstraint, grid::InterpolationGrid, n::Int; trunc::TruncateQuantiles = TruncateQuantiles(0.001, 0.999)) # Constrain data so that all furnishing distributions for the indices have finite # supports. constrained_udata = UncertainIndexValueDataset( constrain(udata.indices, trunc), udata.values ) # Interpolate grid = range(grid) resampled_vals = zeros(Float64, length(grid), n) for i = 1:n # Resample using the sequential constraint, then interpolate # the realization to the provided grid. inds, vals = resample(constrained_udata, sequential_constraint) intp = linear_interpolation(inds, vals, extrapolation_bc = grid.extrapolation_bc) # Each interpolated realization is a column in `resampled_vals` resampled_vals[:, i] = intp(grid.min:grid.step:grid.max) end # Return grid indices and the interpolated points. The interpolated # points now live in a matrix where each column is a realization. grid, resampled_vals end export resample
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1572
import ..SamplingConstraints: constrain import ..UncertainDatasets: UncertainIndexValueDataset, UncertainIndexDataset, ConstrainedUncertainIndexDataset, ConstrainedUncertainValueDataset, AbstractUncertainValueDataset, AbstractUncertainIndexValueDataset function constrain(x::UncertainIndexDataset, resampling::ConstrainedValueResampling{1}) ConstrainedUncertainIndexDataset(constrain(x.indices, resampling.constraints[1])) end function constrain(x::AbstractUncertainValueDataset, resampling::ConstrainedValueResampling{1}) ConstrainedUncertainValueDataset(constrain(x.values, resampling.constraints[1])) end function constrain(x::AbstractUncertainIndexValueDataset, resampling_inds::ConstrainedValueResampling{N1}, resampling_vals::ConstrainedValueResampling{N2}) where {N1, N2} inds = constrain(x.indices, resampling_inds) vals = constrain(x.values, resampling_vals) UncertainIndexValueDataset(inds, vals) end function constrain(x::AbstractUncertainIndexValueDataset, resampling::ConstrainedIndexValueResampling{2, 1}) inds = constrain(x.indices, resampling[1, 1]) vals = constrain(x.values, resampling[1, 2]) UncertainIndexValueDataset(inds, vals) end function constrain(x::AbstractUncertainIndexValueDataset, constraints_inds::CONSTRAINT_TYPES, constraints_vals::CONSTRAINT_TYPES) inds = constrain(x.indices, constraints_inds) vals = constrain(x.values, constraints_vals) UncertainIndexValueDataset(inds, vals) end export constrain
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
18079
import KernelDensity: UnivariateKDE import ..UncertainValues: UncertainScalarKDE, UncertainScalarPopulation """ resample(x::AbstractUncertainIndexValueDataset, resampling::BinnedMeanWeightedResampling) Transform index-irregularly spaced uncertain data onto a regular index-grid and take the mean of the values in each bin. Resamples the data points in `x` according to `resampling.weights`. Distributions in each index bin are obtained by resampling all index values in `x` `resampling.n` times, in proportions obeying `resampling.weights` and mapping those index draws to the bins. Simultaneously, the values in `x` are resampled and placed in the corresponding bins. Finally, the mean in each bin is calculated. In total, `length(x)*resampling.n` draws are distributed among the bins to form the final mean estimate. Returns a vector of mean values, one for each bin. Assumes that the points in `x` are independent. ## Example ```julia vars = (1, 2) npts, tstep = 100, 10 d_xind = Uniform(2.5, 15.5) d_yind = Uniform(2.5, 15.5) d_xval = Uniform(0.01, 0.2) d_yval = Uniform(0.01, 0.2) X, Y = example_uncertain_indexvalue_datasets(ar1_unidir(c_xy = 0.5), npts, vars, tstep = tstep, d_xind = d_xind, d_yind = d_yind, d_xval = d_xval, d_yval = d_yval); n_draws = 10000 # draws per uncertain value time_grid = 0:50:1000 wts = Weights(rand(length(X))) # some random weights # Resample both X and Y so that they are both at the same time indices, # and take the mean of each bin. resampled_dataset = resample(X, BinnedMeanWeightedResampling(time_grid, wts, n_draws)) resampled_dataset = resample(Y, BinnedMeanWeightedResampling(time_grid, wts, n_draws)) ``` """ function resample(x::AbstractUncertainIndexValueDataset, resampling::BinnedMeanWeightedResampling) # Represent entire dataset as a weighted population and sample from that pop_inds = UncertainValue(x.indices.indices, resampling.weights) pop_vals = UncertainValue(x.values.values, resampling.weights) # Pre-allocate an array representing each bin, and an array keeping track # of the values falling in that bin. n_bins = length(resampling.left_bin_edges) - 1 bin_sums = fill(0.0, n_bins) bin_sums_n_entries = fill(0.0, n_bins) # Pre-allocate some arrays into which we resample the values of the # index and value populations. idxs = fill(NaN, resampling.n) vals = fill(NaN, resampling.n) # Used to compute the index of the bin into which a draw belongs mini = minimum(resampling.left_bin_edges) s = step(resampling.left_bin_edges) for (j, (pop_idx, pop_val)) in enumerate(zip(pop_inds, pop_vals)) # Sample the j-th idx-value pair `resampling.n` times and # accumulate the values in the correct bin sum. Also keep # track of how many values there are in each bin. resample!(idxs, pop_idx) resample!(vals, pop_val) @inbounds for i = 1:resampling.n arr_idx = ceil(Int, (idxs[i] - mini) / s) # Because the indices of `x` are uncertain values # with potentially infinite support, we need to check # that the value falls inside the grid if 0 < arr_idx <= n_bins bin_sums[arr_idx] += vals[i] bin_sums_n_entries[arr_idx] += 1.0 end end end # Return bin averages (entries with 0s are represented as NaNs) bin_avgs = bin_sums ./ bin_sums_n_entries bin_avgs[isapprox.(bin_avgs, 0.0)] .= NaN return bin_avgs end """ resample(x::AbstractUncertainIndexValueDataset, resampling::BinnedMeanResampling) Transform index-irregularly spaced uncertain data onto a regular index-grid and take the mean of the values in each bin. Distributions in each index bin are obtained by resampling all index values in `x` `resampling.n` times, and mapping those index draws to the bins. Simultaneously, the values in `x` are resampled and placed in the corresponding bins. Finally, the mean in each bin is calculated. In total, `length(x)*resampling.n` draws are distributed among the bins to form the final mean estimate. Returns a vector of mean values, one for each bin. Assumes that the points in `x` are independent. ## Example ```julia vars = (1, 2) npts, tstep = 100, 10 d_xind = Uniform(2.5, 15.5) d_yind = Uniform(2.5, 15.5) d_xval = Uniform(0.01, 0.2) d_yval = Uniform(0.01, 0.2) X, Y = example_uncertain_indexvalue_datasets(ar1_unidir(c_xy = 0.5), npts, vars, tstep = tstep, d_xind = d_xind, d_yind = d_yind, d_xval = d_xval, d_yval = d_yval); n_draws = 10000 # draws per uncertain value time_grid = 0:50:1000 # Resample both X and Y so that they are both at the same time indices, # and take the mean of each bin. resampled_dataset = resample(X, BinnedMeanResampling(time_grid, n_draws)) resampled_dataset = resample(Y, BinnedMeanResampling(time_grid, n_draws)) ``` """ function resample(x::AbstractUncertainIndexValueDataset, resampling::BinnedMeanResampling) # Constrain # Represent entire dataset as an equally-weighted population and sample from that pop_inds = UncertainValue(x.indices.indices, [1 for x in x.indices]) pop_vals = UncertainValue(x.values.values, [1 for x in x.values]) # Pre-allocate an array representing each bin, and an array keeping track # of the values falling in that bin. n_bins = length(resampling.left_bin_edges) - 1 bin_sums = fill(0.0, n_bins) bin_sums_n_entries = fill(0.0, n_bins) # Pre-allocate some arrays into which we resample the values of the # index and value populations. idxs = fill(NaN, resampling.n) vals = fill(NaN, resampling.n) # Used to compute the index of the bin into which a draw belongs mini = minimum(resampling.left_bin_edges) s = step(resampling.left_bin_edges) for (j, (pop_idx, pop_val)) in enumerate(zip(pop_inds, pop_vals)) # Sample the j-th idx-value pair `resampling.n` times and # accumulate the values in the correct bin sum. Also keep # track of how many values there are in each bin. resample!(idxs, pop_idx) resample!(vals, pop_val) @inbounds for i = 1:resampling.n arr_idx = ceil(Int, (idxs[i] - mini) / s) # Because the indices of `x` are uncertain values # with potentially infinite support, we need to check # that the value falls inside the grid if 0 < arr_idx <= n_bins bin_sums[arr_idx] += vals[i] bin_sums_n_entries[arr_idx] += 1.0 end end end # Return bin averages (entries with 0s are represented as NaNs) bin_avgs = bin_sums ./ bin_sums_n_entries bin_avgs[isapprox.(bin_avgs, 0.0)] .= NaN return bin_avgs end """ resample(x::AbstractUncertainIndexValueDataset, resampling::BinnedResampling{UncertainScalarKDE}; bin_repr::Symbol = :kde, nan_threshold = 0.0) Transform index-irregularly spaced uncertain data onto a regular index-grid. Distributions in each index bin are obtained by resampling all index values in `x` `resampling.n` times, and mapping those index draws to the bins. Simultaneously, the values in `x` are resampled and placed in the corresponding bins. In total, `length(x)*resampling.n` draws are distributed among the bins to form the final KDEs. Returns an `UncertainIndexValueDataset`. The distribution of values in the `i`-th bin is approximated by a kernel density estimate (KDE) over the draws falling in the `i`-th bin. Assumes that the points in `x` are independent. ## Example ```julia vars = (1, 2) npts, tstep = 100, 10 d_xind = Uniform(2.5, 15.5) d_yind = Uniform(2.5, 15.5) d_xval = Uniform(0.01, 0.2) d_yval = Uniform(0.01, 0.2) X, Y = example_uncertain_indexvalue_datasets(ar1_unidir(c_xy = 0.5), npts, vars, tstep = tstep, d_xind = d_xind, d_yind = d_yind, d_xval = d_xval, d_yval = d_yval); n_draws = 10000 # draws per uncertain value time_grid = 0:50:1000 resampling = BinnedResampling(time_grid, n_draws) # Resample both X and Y so that they are both at the same time indices. resampled_dataset = resample(X, resampling) resampled_dataset = resample(Y, resampling) ``` """ function resample(x::AbstractUncertainIndexValueDataset, resampling::BinnedResampling{UncertainScalarKDE}; nan_threshold = 0.0) # Pre-allocate some arrays into which we resample the values of the # index and value populations. idxs = fill(NaN, resampling.n) vals = fill(NaN, resampling.n) perminds = zeros(Int, resampling.n) sorted_idxs = fill(NaN, resampling.n) sorted_vals = fill(NaN, resampling.n) bin_edges = resampling.left_bin_edges n_bins = length(bin_edges) - 1 # Used to compute the index of the bin into which a draw belongs mini = minimum(resampling.left_bin_edges) s = step(resampling.left_bin_edges) # Empty vectors that will contain draws. binvecs = [Vector{Float64}(undef, 0) for i = 1:n_bins] #[sizehint!(bv, resampling.n*n_bins) for bv in binvecs] @inbounds for (j, (idx, val)) in enumerate(zip(x.indices, x.values)) # Resample the j-th index and j-th value resample!(idxs, idx) resample!(vals, val) # Get the vector that sorts the index vector, and use that to # sort the draws. sortperm!(perminds, idxs) sorted_idxs .= idxs[perminds] sorted_vals .= vals[perminds] # The vectors above are sorted sorted, so this can be done faster for i in 1:n_bins inbin = findall(bin_edges[i] .<= sorted_idxs .<= bin_edges[i+1]) if length(inbin) > nan_threshold append!(binvecs[i], sorted_vals[inbin]) end end end # Estimate distributions in each bin by kernel density estimation estimated_value_dists = Vector{Union{CertainValue, UncertainScalarKDE}}(undef, n_bins) for i in 1:n_bins if length(binvecs[i]) > nan_threshold estimated_value_dists[i] = UncertainValue(binvecs[i]) else estimated_value_dists[i] = UncertainValue(NaN) end end new_inds = UncertainIndexDataset(UncertainValue.(bin_edges[1:end-1] .+ step(bin_edges)/2)) new_vals = UncertainValueDataset(estimated_value_dists) UncertainIndexValueDataset(new_inds, new_vals) end function resample(x::AbstractUncertainIndexValueDataset, resampling::BinnedResampling{UncertainScalarPopulation}; nan_threshold = 0.0) # Pre-allocate some arrays into which we resample the values of the # index and value populations. idxs = fill(NaN, resampling.n) vals = fill(NaN, resampling.n) perminds = zeros(Int, resampling.n) sorted_idxs = fill(NaN, resampling.n) sorted_vals = fill(NaN, resampling.n) bin_edges = resampling.left_bin_edges n_bins = length(bin_edges) - 1 # Used to compute the index of the bin into which a draw belongs mini = minimum(resampling.left_bin_edges) s = step(resampling.left_bin_edges) # Empty vectors that will contain draws. binvecs = [Vector{Float64}(undef, 0) for i = 1:n_bins] #[sizehint!(bv, resampling.n*n_bins) for bv in binvecs] @inbounds for (j, (idx, val)) in enumerate(zip(x.indices, x.values)) # Resample the j-th index and j-th value resample!(idxs, idx) resample!(vals, val) # Get the vector that sorts the index vector, and use that to # sort the draws. sortperm!(perminds, idxs) sorted_idxs .= idxs[perminds] sorted_vals .= vals[perminds] # The vectors above are sorted sorted, so this can be done faster for i in 1:n_bins inbin = findall(bin_edges[i] .<= sorted_idxs .<= bin_edges[i+1]) if length(inbin) > nan_threshold append!(binvecs[i], sorted_vals[inbin]) end end end # Estimate distributions in each bin by kernel density estimation estimated_value_dists = Vector{Union{CertainValue, UncertainScalarPopulation}}(undef, n_bins) for i in 1:n_bins if length(binvecs[i]) > nan_threshold L = length(binvecs[i]) estimated_value_dists[i] = UncertainValue(binvecs[i], repeat([1 / L], L)) else estimated_value_dists[i] = UncertainValue(NaN) end end new_inds = UncertainIndexDataset(UncertainValue.(bin_edges[1:end-1] .+ step(bin_edges)/2)) new_vals = UncertainValueDataset(estimated_value_dists) UncertainIndexValueDataset(new_inds, new_vals) end function resample(x::AbstractUncertainIndexValueDataset, resampling::BinnedResampling{RawValues}; nan_threshold = 0.0) # Pre-allocate some arrays into which we resample the values of the # index and value populations. idxs = fill(NaN, resampling.n) vals = fill(NaN, resampling.n) perminds = zeros(Int, resampling.n) sorted_idxs = fill(NaN, resampling.n) sorted_vals = fill(NaN, resampling.n) bin_edges = resampling.left_bin_edges n_bins = length(bin_edges) - 1 # Used to compute the index of the bin into which a draw belongs mini = minimum(resampling.left_bin_edges) s = step(resampling.left_bin_edges) # Empty vectors that will contain draws. binvecs = [Vector{Float64}(undef, 0) for i = 1:n_bins] #[sizehint!(bv, resampling.n*n_bins) for bv in binvecs] @inbounds for (j, (idx, val)) in enumerate(zip(x.indices, x.values)) # Resample the j-th index and j-th value resample!(idxs, idx) resample!(vals, val) # Get the vector that sorts the index vector, and use that to # sort the draws. sortperm!(perminds, idxs) sorted_idxs .= idxs[perminds] sorted_vals .= vals[perminds] # The vectors above are sorted sorted, so this can be done faster for i in 1:n_bins inbin = findall(bin_edges[i] .<= sorted_idxs .<= bin_edges[i+1]) if length(inbin) > nan_threshold append!(binvecs[i], sorted_vals[inbin]) end end end bin_centers = bin_edges[1:end-1] .+ step(bin_edges)/2 return bin_centers, binvecs end """ resample(x::AbstractUncertainIndexValueDataset, resampling::BinnedWeightedResampling; nan_threshold = 0.0) Transform index-irregularly spaced uncertain data onto a regular index-grid. Distributions in each index bin are obtained by resampling all index values in `x` `resampling.n` times, sampled according to probabilities `resampling.weights`, and mapping those index draws to the bins. Simultaneously, the values in `x` are resampled and placed in the corresponding bins. In total, `length(x)*resampling.n` draws are distributed among the bins to form the final KDEs. Returns an `UncertainIndexValueDataset`. The distribution of values in the `i`-th bin is approximated by a kernel density estimate (KDE) over the draws falling in the `i`-th bin. Assumes that the points in `x` are independent. ## Example ```julia vars = (1, 2) npts, tstep = 100, 10 d_xind = Uniform(2.5, 15.5) d_yind = Uniform(2.5, 15.5) d_xval = Uniform(0.01, 0.2) d_yval = Uniform(0.01, 0.2) X, Y = example_uncertain_indexvalue_datasets(ar1_unidir(c_xy = 0.5), npts, vars, tstep = tstep, d_xind = d_xind, d_yind = d_yind, d_xval = d_xval, d_yval = d_yval); left_bin_edges = 0:50:1000 n_draws = 10000 wts = Weights(rand(length(X))) resampling = BinnedWeightedResampling(left_bin_edges, wts, 10) resampled_dataset = resample(X, resampling) ``` """ function resample(x::AbstractUncertainIndexValueDataset, resampling::BinnedWeightedResampling; nan_threshold = 0.0) pop_inds = UncertainValue(x.indices.indices, resampling.weights) pop_vals = UncertainValue(x.values.values, resampling.weights) # Pre-allocate some arrays into which we resample the values of the # index and value populations. idxs = fill(NaN, resampling.n) vals = fill(NaN, resampling.n) perminds = zeros(Int, resampling.n) sorted_idxs = fill(NaN, resampling.n) sorted_vals = fill(NaN, resampling.n) bin_edges = resampling.left_bin_edges n_bins = length(bin_edges) - 1 # Used to compute the index of the bin into which a draw belongs mini = minimum(resampling.left_bin_edges) s = step(resampling.left_bin_edges) # Empty vectors that will contain draws. binvecs = [Vector{Float64}(undef, 0) for i = 1:n_bins] #[sizehint!(bv, resampling.n*n_bins) for bv in binvecs] @inbounds for (j, (idx, val)) in enumerate(zip(pop_inds, pop_vals)) # Resample the j-th index and j-th value resample!(idxs, idx) resample!(vals, val) # Get the vector that sorts the index vector, and use that to # sort the draws. sortperm!(perminds, idxs) sorted_idxs .= idxs[perminds] sorted_vals .= vals[perminds] # The vectors above are sorted sorted, so this can be done faster for i in 1:n_bins inbin = findall(bin_edges[i] .<= sorted_idxs .<= bin_edges[i+1]) if length(inbin) > nan_threshold append!(binvecs[i], sorted_vals[inbin]) end end end # Estimate distributions in each bin by kernel density estimation estimated_value_dists = Vector{AbstractUncertainValue}(undef, n_bins) for i in 1:n_bins if length(binvecs[i]) > nan_threshold estimated_value_dists[i] = UncertainValue(binvecs[i]) else estimated_value_dists[i] = UncertainValue(NaN) end end new_inds = UncertainIndexDataset(UncertainValue.(bin_edges[1:end-1] .+ step(bin_edges)/2)) new_vals = UncertainValueDataset(estimated_value_dists) UncertainIndexValueDataset(new_inds, new_vals) end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
3525
""" resample(x::AbstractUncertainValueDataset, resampling::ConstrainedValueResampling) Resample `x` by first constraining the supports of the distributions/populations furnishing the uncertain values, then drawing samples from the limited supports. Sampling is done without assuming any sequential dependence between the elements of `x`, such no that no dependence is introduced in the draws beyond what is potentially already present in the collection of values. ## Example ```julia # Some example data N = 50 x_uncertain = [UncertainValue(Normal, x, rand(Uniform(0.1, 0.8))) for x in rand(N)] y_uncertain = [UncertainValue(Normal, y, rand(Uniform(0.1, 0.8))) for y in rand(N)] x = UncertainValueDataset(x_uncertain) y = UncertainValueDataset(y_uncertain) # Resample with different constraints resample(x, ConstrainedValueResampling(TruncateStd(1.5)) resample(y, ConstrainedValueResampling(TruncateStd(0.5)) resample(y, ConstrainedValueResampling(TruncateQuantiles(0.2, 0.8)) ``` """ function resample(x::AbstractUncertainValueDataset, resampling::ConstrainedValueResampling{1}) constrained_x = constrain(x, resampling.constraints[1]) resample(constrained_x, resampling.n) end """ resample(x::AbstractUncertainIndexValueDataset, resampling::ConstrainedIndexValueResampling) Resample `x` by first constraining the supports of the distributions/populations furnishing the uncertain indices and values, then drawing samples from the limited supports. Sampling is done without assuming any sequential dependence between the elements of `x`, such no that no dependence is introduced in the draws beyond what is potentially already present in the collection of values. ## Example ```julia # Some example data N = 50 x_uncertain = [UncertainValue(Normal, x, rand(Uniform(0.1, 0.8))) for x in rand(N)] y_uncertain = [UncertainValue(Normal, y, rand(Uniform(0.1, 0.8))) for y in rand(N)] x = UncertainValueDataset(x_uncertain) y = UncertainValueDataset(y_uncertain) time_uncertain = [UncertainValue(Normal, i, 1) for i = 1:length(x)]; time_certain = [CertainValue(i) for i = 1:length(x)]; timeinds_x = UncertainIndexDataset(time_uncertain) timeinds_y = UncertainIndexDataset(time_certain) X = UncertainIndexValueDataset(timeinds_x, x) Y = UncertainIndexValueDataset(timeinds_y, y); ########################### # Define resampling scheme ########################### # Truncate each of the indices for x at 0.8 their standard deviation around the mean constraints_x_inds = TruncateStd(0.8) # Truncate each of the indices for y at 1.5 their standard deviation around the mean constraints_y_inds = TruncateStd(1.5) # Truncate each of the values of x at the 20th percentile range constraints_x_vals = [TruncateQuantiles(0.4, 0.6) for i = 1:N]; # Truncate each of the values of x at the 80th percentile range constraints_y_vals = [TruncateQuantiles(0.1, 0.9) for i = 1:N]; cs_x = (constraints_x_inds, constraints_x_vals) cs_y = (constraints_y_inds, constraints_y_vals) ########### # Resample ########### resample(X, ConstrainedIndexValueResampling(cs_x)) resample(Y, ConstrainedIndexValueResampling(cs_y)) ``` """ function resample(x::AbstractUncertainIndexValueDataset, resampling::ConstrainedIndexValueResampling{2, 1}) constrained_inds = constrain(x.indices, resampling.constraints[1][1]) constrained_vals = constrain(x.values, resampling.constraints[1][2]) d = UncertainIndexValueDataset(constrained_inds, constrained_vals) resample(d, resampling.n) end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2365
import ..InterpolationAndGrids: interpolate_and_bin """ resample(udata::AbstractUncertainIndexValueDataset, regularization_scheme::InterpolateAndBin{Linear}) Draw a single realisation of `udata` and interpolate-and-bin the data according to the provided regularization scheme. Assumes points in `udata` are independent and sorts the draw according to the index values before interpolating. See also [`InterpolateAndBin`](@ref). ## Example ```julia npts = 50 y = rand(npts) N = Normal(0, 1) for t in 3:npts y[t,1] = 0.7*y[t-1,1] - 0.35*y[t-2,1] + rand(N) end # Assume data are unevenly spaced time = sample(1.0:npts*5, npts, ordered = true, replace = false) # Assign some uncertainties to both time indices and values and gather # in an UncertainIndexValueDataset utime = UncertainValue.(Normal.(time, 2)) uy = UncertainValue.(Normal.(y, 0.1)) udata = UncertainIndexValueDataset(utime, uy) # Interpolation-and-binning scheme. First interpolate to a very fine grid, # then gather the points falling in each of the coarser bins and summarise # each bin using the mean of the points in each bin. left_bin_edges = 0:10:npts*5 r = InterpolateAndBin(mean, left_bin_edges, Linear(), 0:0.1:1000, Flat(OnGrid())) # The binned time axis: time_binned = left_bin_edges[1:end-1] .+ step(left_bin_edges)/2 # Get a set corresponding resampled (interpolated+binned) values y_binned = resample(udata, r) # Plot some interpolated+binned draws time_binned = left_bin_edges[1:end-1] .+ step(left_bin_edges)/2 p = plot(xlabel = "time", ylabel = "value") for i = 1:100 plot!(time_binned, resample(udata, r), lw = 0.3, α = 0.2, ms = 0.1, c = :red, marker = stroke(0.1), label = "") end plot!(time, y, c = :black, lw = 1, ms = 2, marker = stroke(2.0, :black), label = "") plot!(udata, c = :black, lw = 1, ms = 2, marker = stroke(0.1, :black), [0.05, 0.95], [0.05, 0.95]) vline!(left_bin_edges, c = :black, α = 0.3, lw = 0.3, label = "") ``` """ function resample(udata::AbstractUncertainIndexValueDataset, regularization_scheme::InterpolateAndBin{Linear}) inds, vals = resample(udata) sortidxs = sortperm(inds) r = regularization_scheme vals_binned = interpolate_and_bin(r.f, r.left_bin_edges, inds[sortidxs], vals[sortidxs], r.intp, r.intp_grid, r.extrapolation_bc) return vals_binned end export resample
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2898
""" resample(x::AbstractUncertainIndexValueDataset, resampling::SequentialResampling) Resample `x` according to a sequential resampling constraint. This way of resampling introduces some serial dependence between the elements of `x` - *beyond* what might already be present in the dataset. This is because imposing a sequential constraint (e.g. `StrictlyIncreasing`) to the `i`-th value of the dataset imposes constraints on what is possible to sample from the `i+1`th value. ## Example ```julia # Some example data N = 50 x_uncertain = [UncertainValue(Normal, x, rand(Uniform(0.1, 0.8))) for x in rand(N)] y_uncertain = [UncertainValue(Normal, y, rand(Uniform(0.1, 0.8))) for y in rand(N)] x = UncertainValueDataset(x_uncertain) y = UncertainValueDataset(y_uncertain) time_uncertain = [UncertainValue(Normal, i, 1) for i = 1:length(x)]; time_certain = [CertainValue(i) for i = 1:length(x)]; timeinds_x = UncertainIndexDataset(time_uncertain) timeinds_y = UncertainIndexDataset(time_certain) X = UncertainIndexValueDataset(timeinds_x, x) Y = UncertainIndexValueDataset(timeinds_y, y); # Resample seq_resampling = SequentialResampling(StrictlyIncreasing()) resample(X, seq_resampling) ``` """ function resample(x::AbstractUncertainIndexValueDataset, resampling::SequentialResampling{S}) where {S} resample(x, resampling.sequential_constraint) end """ resample(x::AbstractUncertainIndexValueDataset, resampling::SequentialInterpolatedResampling) Resample `x` according to a sequential resampling constraint, then interpolate the draw(s) to some specified grid. This way of resampling introduces some serial dependence between the elements of `x` - *beyond* what might already be present in the dataset. This is because imposing a sequential constraint (e.g. `StrictlyIncreasing`) to the `i`-th value of the dataset imposes constraints on what is possible to sample from the `i+1`th value. ## Example ```julia # Some example data N = 50 x_uncertain = [UncertainValue(Normal, x, rand(Uniform(0.1, 0.8))) for x in rand(N)] y_uncertain = [UncertainValue(Normal, y, rand(Uniform(0.1, 0.8))) for y in rand(N)] x = UncertainValueDataset(x_uncertain) y = UncertainValueDataset(y_uncertain) time_uncertain = [UncertainValue(Normal, i, 1) for i = 1:length(x)]; time_certain = [CertainValue(i) for i = 1:length(x)]; timeinds_x = UncertainIndexDataset(time_uncertain) timeinds_y = UncertainIndexDataset(time_certain) X = UncertainIndexValueDataset(timeinds_x, x) Y = UncertainIndexValueDataset(timeinds_y, y); # Resample seqintp_resampling = SequentialInterpolatedResampling(StrictlyIncreasing(), RegularGrid(0:2:N)) resample(X, seqintp_resampling) ``` """ function resample(x::AbstractUncertainIndexValueDataset, resampling::SequentialInterpolatedResampling{S, G}) where {S, G} resample(x, resampling.sequential_constraint, resampling.grid) end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1901
import ..UncertainDatasets: UVAL_COLLECTION_TYPES import ..SamplingConstraints: SamplingConstraint import ..UVAL_COLLECTION_TYPES """ resample_elwise(uvd::UVAL_COLLECTION_TYPES) Resample each element in `uvals` once. The i-th entry in the returned vector is a `1`-element vector containing one unique draw of `uvals[i]`. """ function resample_elwise(uvd::UVAL_COLLECTION_TYPES) [resample(uvd[i], 1) for i = 1:length(uvd)] end """ resample_elwise(uvd::UVAL_COLLECTION_TYPES, n::Int) Resample each element in `uvals` `n` times. The i-th entry in the returned vector is a `n`-element vector consisting of `n` unique draws of `uvals[i]`. """ function resample_elwise(uvd::UVAL_COLLECTION_TYPES, n::Int) [resample(uvd[i], n) for i = 1:length(uvd)] end """ resample_elwise(uvd::UVAL_COLLECTION_TYPES, constraint::Union{SamplingConstraint, Vector{SamplingConstraint}}, n::Int) Resample each element in `uvals` `n` times. The i-th entry in the returned vector is a `n`-element vector consisting of `n` unique draws of `uvals[i]`, drawn after first truncating the support of `uvals[i]` according to the provided `constraint`(s). """ resample_elwise(uvd::UVAL_COLLECTION_TYPES, constraint::Union{SamplingConstraint, Vector{SamplingConstraint}}, n::Int) function resample_elwise(uvd::UVAL_COLLECTION_TYPES, constraint::SamplingConstraint, n::Int) [resample(uvd[i], constraint, n) for i = 1:length(uvd)] end function resample_elwise(uvd::UVAL_COLLECTION_TYPES, constraint::Vector{<:SamplingConstraint}, n::Int) Lc, Luv = length(constraint), length(uvd) if Lc != Luv error("""The number of constraints must match the number of uncertain values in the dataset. Got $Lc constraint but needed $Luv.""") end [resample(uvd[i], constraint[i], n) for i = 1:length(uvd)] end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
7590
import ..UncertainDatasets.UncertainIndexDataset import ..UncertainDatasets.AbstractUncertainIndexDataset ############################## # Resample the entire dataset ############################## """ resample(uvd::AbstractUncertainIndexDataset) Draw a realisation of an uncertain index dataset according to the distributions of the uncertain values comprising it. """ function resample(uvd::DT) where {DT <: AbstractUncertainIndexDataset} L = length(uvd) [resample(uvd.indices[i]) for i in 1:L] end """ resample(uv::AbstractUncertainIndexDataset, constraint::Vector{<:SamplingConstraint}) Draw a realisation of an uncertain index dataset according to the uncertain values comprising it, while constraining the distributions furnishing the values in the dataset according the provided sampling constraints. """ function resample(uv::DT, constraint::Vector{<:SamplingConstraint}) where { DT <: AbstractUncertainIndexDataset} [resample(uv.indices[i], constraint[i]) for i in 1:length(uv)] end function resample(uv::DT, constraint::SamplingConstraint) where { DT <: AbstractUncertainIndexDataset} [resample(uv.indices[i], constraint) for i in 1:length(uv)] end function resample(uv::DT, constraint::SamplingConstraint, n::Int) where { DT <: AbstractUncertainIndexDataset} [[resample(uv.indices[i], constraint) for i in 1:length(uv)] for k = 1:n] end function resample(uv::DT, constraint::Vector{<:SamplingConstraint}, n::Int) where { DT <: AbstractUncertainIndexDataset} [[resample(uv.indices[i], constraint[i]) for i in 1:length(uv)] for k = 1:n] end ############################## # Resample the entire dataset ############################## """ resample_elwise(uvd::UncertainIndexDataset, n::Int) Resample each element in `uvals` `n` times. The i-th entry in the returned vector is a `n`-element vector consisting of `n` unique draws of `uvals[i]`. """ function resample_elwise(uvd::DT, n::Int) where {DT <: AbstractUncertainIndexDataset} [resample(uvd[i], n) for i = 1:length(uvd)] end """ resample(uvd::UncertainIndexDataset, n::Int) Draw `n` realisations of an `UncertainIndexDataset` according to the distributions of the `UncertainValue`s comprising it. """ function resample(uvd::DT, n::Int) where {DT <: AbstractUncertainIndexDataset} L = length(uvd) [[resample(uvd.indices[i]) for i in 1:L] for k in 1:n] end # ########################################################################## # # Draw realisations of the uncertain dataset according to the distributions # # of the uncertain values comprising it, according to sampling constraints # ########################################################################## # """ # resample(uv::UncertainIndexDataset, constraint::NoConstraint) -> Vector{Float64} # Draw a realisation of an `UncertainIndexDataset` where each uncertain value # is truncated according to `constraint`. In the case of `NoConstraint`, # no trucation is performed and the whole distribution is sampled. # """ # function resample(uv::DT, constraint::NoConstraint) where { # DT <: AbstractUncertainIndexDataset} # L = length(uv) # [resample(uv.indices[i], constraint) for i in 1:L] # end # """ # resample(uv::UncertainIndexDataset, n::NoConstraint, # n::Int) -> Vector{Vector{Float64}} # Draw `n` realisation of an `UncertainIndexDataset` where each uncertain value # is truncated according to `constraint`. In the case of `NoConstraint`, # no trucation is performed and the whole distribution is sampled. # """ # function resample(uv::DT, constraint::NoConstraint, n::Int) where { # DT <: AbstractUncertainIndexDataset} # L = length(uv) # [[resample(uv.indices[i], constraint) for i in 1:L] for k = 1:n] # end # """ # resample(uv::UncertainIndexDataset, # constraint::TruncateLowerQuantile) -> Vector{Float64} # Draw a realisation of an `UncertainIndexDataset` where each uncertain value # is truncated according to `constraint`. In the case of `TruncateLowerQuantile`, # the supports of the distributions are truncated below at some quantile. # """ # function resample(uv::DT, constraint::TruncateLowerQuantile) where { # DT <: AbstractUncertainIndexDataset} # L = length(uv) # [resample(uv.indices[i], constraint) for i in 1:L] # end # """ # resample(uv::UncertainIndexDataset, constraint::TruncateLowerQuantile, # n::Int) -> Vector{Vector{Float64}} # Draw `n` realisation of an `UncertainIndexDataset` where each uncertain value # is truncated according to `constraint`. In the case of `TruncateLowerQuantile`, # the supports of the distributions are truncated below at some quantile. # """ # function resample(uv::DT, constraint::TruncateLowerQuantile, n::Int) where { # DT <: AbstractUncertainIndexDataset} # L = length(uv) # [[resample(uv.indices[i], constraint) for i in 1:L] for k = 1:n] # end # """ # resample(uv::UncertainIndexDataset, # constraint::TruncateUpperQuantile) -> Vector{Float64} # Draw a realisation of an `UncertainIndexDataset` where each uncertain value # is truncated according to `constraint`. In the case of `TruncateLowerQuantile`, # the supports of the distributions are truncated above at some quantile. # """ # function resample(uv::DT, constraint::TruncateUpperQuantile) where { # DT <: AbstractUncertainIndexDataset} # L = length(uv) # [resample(uv.indices[i], constraint) for i in 1:L] # end # """ # resample(uv::UncertainIndexDataset, constraint::TruncateUpperQuantile, # n::Int) -> Vector{Vector{Float64}} # Draw `n` realisation of an `UncertainIndexDataset` where each uncertain value # is truncated according to `constraint`. In the case of `TruncateUpperQuantile`, # the supports of the distributions are truncated above at some quantile. # """ # function resample(uv::DT, constraint::TruncateUpperQuantile, n::Int) where { # DT <: AbstractUncertainIndexDataset} # L = length(uv) # [[resample(uv.indices[i], constraint) for i in 1:L] for k = 1:n] # end # """ # resample(uv::UncertainIndexDataset, # constraint::TruncateQuantiles) -> Vector{Float64} # Draw a realisation of an `UncertainIndexDataset` where each uncertain value # is truncated according to `constraint`. In the case of `TruncateLowerQuantile`, # the supports of the distributions are truncated at some quantile range. # """ # function resample(uv::UncertainIndexDataset, constraint::TruncateQuantiles) where { # DT <: AbstractUncertainIndexDataset} # L = length(uv) # [resample(uv.indices[i], constraint) for i in 1:L] # end # """ # resample(uv::UncertainIndexDataset, n::Int, # constraint::TruncateUpperQuantile) -> Vector{Vector{Float64}} # Draw `n` realisation of an `UncertainIndexDataset` where each uncertain value # is truncated according to `constraint`. In the case of `TruncateUpperQuantile`, # the supports of the distributions are truncated above at some quantile. # """ # function resample(uv::DT, constraint::TruncateQuantiles, n::Int) where { # DT <: AbstractUncertainIndexDataset} # L = length(uv) # [[resample(uv.indices[i], constraint) for i in 1:L] for k = 1:n] # end # """ # resample_elwise(uvd::UncertainIndexDataset, constraint::SamplingConstraint, n::Int) # Resample each element in `uvals` `n` times. The i-th entry in the returned # vector is a `n`-element vector consisting of `n` unique draws of `uvals[i]`, drawn # after first truncating the support of `uvals[i]` according to the provided `constraint`. # """ # function resample_elwise(uvd::DT, constraint::SamplingConstraint, n::Int) where { # DT <: AbstractUncertainIndexDataset} # [resample(uvd[i], constraint, n) for i = 1:length(uvd)] # end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
12555
import ..UncertainDatasets: UncertainIndexValueDataset import ..SamplingConstraints: SamplingConstraint # Some summary documentaiton for the online documentation """ resample(udata::UncertainIndexValueDataset, constraint::Union{SamplingConstraint, Vector{SamplingConstraint}}) -> Tuple{Vector{Float64}, Vector{Float64}} Resample an uncertain index-value dataset in an element-wise manner. Enforces the provided sampling `constraint` to all uncertain values in the dataset, both indices and data values. If a single constraint is provided, then that constraint will be applied to all values. If a vector of constraints (as many as there are values) is provided, then the constraints are applied element-wise to both the indices and the data values. """ resample(udata::UncertainIndexValueDataset, constraint::Union{SamplingConstraint, Vector{SamplingConstraint}}) """ resample(udata::UncertainIndexValueDataset, constraint::Union{SamplingConstraint, Vector{SamplingConstraint}}, n::Int) -> Tuple{Vector{Float64}, Vector{Float64}} Resample `n` realizations of an uncertain index-value dataset in an element-wise manner. Enforces the provided sampling `constraint` to all uncertain values in the dataset, both indices and data values. If a single constraint is provided, that constraint will be applied to all values. If a vector of constraints (as many as there are values) is provided, then the constraints are applied element-wise to both the indices and the data values. """ resample(udata::UncertainIndexValueDataset, constraint::Union{SamplingConstraint, Vector{SamplingConstraint}}, n::Int) """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Union{SamplingConstraint, Vector{SamplingConstraint}}, constraint_vals::Union{SamplingConstraint, Vector{SamplingConstraint}}) -> Tuple{Vector{Float64}, Vector{Float64}} Resample an uncertain index-value dataset in an element-wise manner. Enforces separate sampling constraints to the indices and to the data values. If a single constraint is provided, that constraint will be applied to all values. If a vector of constraints (as many as there are values) is provided, then the constraints are applied element-wise. """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Union{SamplingConstraint, Vector{SamplingConstraint}}, constraint_vals::Union{SamplingConstraint, Vector{SamplingConstraint}}) """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Union{SamplingConstraint, Vector{SamplingConstraint}}, constraint_vals::Union{SamplingConstraint, Vector{SamplingConstraint}}, n::Int) -> Vector{Tuple{Vector{Float64}, Vector{Float64}}} Resample `n` realizations of an uncertain index-value dataset in an element-wise manner. Enforces separate sampling constraints to the indices and to the data values. If a single constraint is provided, that constraint will be applied to all values. If a vector of constraints (as many as there are values) is provided, then the constraints are applied element-wise. """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Union{SamplingConstraint, Vector{SamplingConstraint}}, constraint_vals::Union{SamplingConstraint, Vector{SamplingConstraint}}, n::Int) ########################################### # The documentation for the actual methods ########################################### """ resample(udata::UncertainIndexValueDataset) -> Tuple{Vector{Float64}, Vector{Float64}} Resample an uncertain index-value dataset in an element-wise manner. """ function resample(udata::UncertainIndexValueDataset) n_vals = length(udata) indices = zeros(Float64, n_vals) values = zeros(Float64, n_vals) for i = 1:n_vals idx, val = udata[i] indices[i] = resample(idx) values[i] = resample(val) end indices, values end """ resample(udata::UncertainIndexValueDataset, constraint::SamplingConstraint) -> Tuple{Vector{Float64}, Vector{Float64}} Resample an uncertain index-value dataset element-wise in an element-wise manner. Enforces the provided sampling `constraint` to all uncertain values in the dataset, both indices and data values. """ function resample(udata::UncertainIndexValueDataset, constraint::SamplingConstraint) n_vals = length(udata) indices = zeros(Float64, n_vals) values = zeros(Float64, n_vals) for i = 1:n_vals idx, val = udata[i] indices[i] = resample(idx, constraint) values[i] = resample(val, constraint) end indices, values end """ resample(udata::UncertainIndexValueDataset, constraint_idxs::SamplingConstraint, constraint_vals::SamplingConstraint) -> Tuple{Vector{Float64}, Vector{Float64}} Resample an uncertain index-value dataset in an element-wise manner. Enforces the same sampling constraint `constraint_idxs` to all index values, and the `constraint_vals` sampling constraint to all data values. """ function resample(udata::UncertainIndexValueDataset, constraint_idxs::SamplingConstraint, constraint_vals::SamplingConstraint) n_vals = length(udata) indices = zeros(Float64, n_vals) values = zeros(Float64, n_vals) for i = 1:n_vals idx, val = udata[i] indices[i] = resample(idx, constraint_idxs) values[i] = resample(val, constraint_vals) end indices, values end """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{SamplingConstraint}, constraint_vals::SamplingConstraint) -> Tuple{Vector{Float64}, Vector{Float64}} Resample an uncertain index-value dataset in an element-wise manner. Enforces a unique sampling constraint `constraint_idxs[i]` to the i-th index value, while using the same sampling constraint `constraint_vals` on all data values. """ function resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{<:SamplingConstraint}, constraint_vals::SamplingConstraint) n_vals = length(udata) indices = zeros(Float64, n_vals) values = zeros(Float64, n_vals) for i = 1:n_vals idx, val = udata[i] indices[i] = resample(idx, constraint_idxs[i]) values[i] = resample(val, constraint_vals) end indices, values end """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{SamplingConstraint}, constraint_vals::SamplingConstraint) -> Tuple{Vector{Float64}, Vector{Float64}} Resample an uncertain index-value dataset in an element-wise manner. Enforces a unique sampling constraint `constraint_idxs[i]` to both the i-th index value and to the i-th data value. """ function resample(udata::UncertainIndexValueDataset, constraint::Vector{<:SamplingConstraint}) n_vals = length(udata) indices = zeros(Float64, n_vals) values = zeros(Float64, n_vals) for i = 1:n_vals idx, val = udata[i] indices[i] = resample(idx, constraint[i]) values[i] = resample(val, constraint[i]) end indices, values end """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{SamplingConstraint}, constraint_vals::SamplingConstraint) -> Tuple{Vector{Float64}, Vector{Float64}} Resample an uncertain index-value dataset in an element-wise manner. Enforces a unique sampling constraint `constraint_idxs[i]` to the i-th index value. Also enforces a unique sampling constraint `constraint_vals[i]` to the i-th data value. """ function resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{<:SamplingConstraint}, constraint_vals::Vector{<:SamplingConstraint}) n_vals = length(udata) indices = zeros(Float64, n_vals) values = zeros(Float64, n_vals) for i = 1:n_vals idx, val = udata[i] indices[i] = resample(idx, constraint_idxs[i]) values[i] = resample(val, constraint_vals[i]) end indices, values end """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{SamplingConstraint}, constraint_vals::SamplingConstraint) -> Tuple{Vector{Float64}, Vector{Float64}} Resample an uncertain index-value dataset in an element-wise manner. Enforces the same sampling constraint `constraint_idxs` on all index values, while using the sampling constraint `constraint_vals[i]` to the i-th data value. """ function resample(udata::UncertainIndexValueDataset, constraint_idxs::SamplingConstraint, constraint_vals::Vector{<:SamplingConstraint}) n_vals = length(udata) indices = zeros(Float64, n_vals) values = zeros(Float64, n_vals) for i = 1:n_vals idx, val = udata[i] indices[i] = resample(idx, constraint_idxs) values[i] = resample(val, constraint_vals[i]) end indices, values end """ resample(udata::UncertainIndexValueDataset, n::Int) -> Vector{Tuple{Vector{Float64}, Vector{Float64}}} Resample `n` realizations an uncertain index-value dataset in an element-wise manner. """ function resample(udata::UncertainIndexValueDataset, n::Int) [resample(udata) for i = 1:n] end """ resample(udata::UncertainIndexValueDataset, n::Int) -> Vector{Tuple{Vector{Float64}, Vector{Float64}}} Resample `n` realizations an uncertain index-value dataset in an element-wise manner. Enforces the provided sampling `constraint` to all uncertain values in the dataset, both indices and data values. """ function resample(udata::UncertainIndexValueDataset, constraint::SamplingConstraint, n::Int) [resample(udata, constraint) for i = 1:n] end """ resample(udata::UncertainIndexValueDataset, constraint_idxs::SamplingConstraint, constraint_vals::SamplingConstraint, n::Int) -> Vector{Tuple{Vector{Float64}, Vector{Float64}}} Resample `n` realizations of an uncertain index-value dataset in an element-wise manner. Enforces the same sampling constraint `constraint_idxs` to all index values, and the `constraint_vals` sampling constraint to all data values. """ function resample(udata::UncertainIndexValueDataset, constraint_idxs::SamplingConstraint, constraint_vals::SamplingConstraint, n::Int) [resample(udata, constraint_idxs, constraint_vals) for i = 1:n] end """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{SamplingConstraint}, constraint_vals::SamplingConstraint, n::Int) -> Vector{Tuple{Vector{Float64}, Vector{Float64}}} Resample `n` realizations of an uncertain index-value dataset in an element-wise manner. Enforces a unique sampling constraint `constraint_idxs[i]` to both the i-th index value and to the i-th data value. """ function resample(udata::UncertainIndexValueDataset, constraint::Vector{<:SamplingConstraint}, n::Int) [resample(udata, constraint) for i = 1:n] end """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{SamplingConstraint}, constraint_vals::SamplingConstraint, n::Int) -> Vector{Tuple{Vector{Float64}, Vector{Float64}}} Resample `n` realizations of an uncertain index-value dataset in an element-wise manner. Enforces a unique sampling constraint `constraint_idxs[i]` to the i-th index value, while using the same sampling constraint `constraint_vals` on all data values. """ function resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{<:SamplingConstraint}, constraint_vals::SamplingConstraint, n::Int) [resample(udata, constraint_idxs, constraint_vals) for i = 1:n] end """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{SamplingConstraint}, constraint_vals::SamplingConstraint, n::Int) -> Vector{Tuple{Vector{Float64}, Vector{Float64}}} Resample `n` realizations of an uncertain index-value dataset in an element-wise manner. Enforces the same sampling constraint `constraint_idxs` on all index values, while using the sampling constraint `constraint_vals[i]` to the i-th data value. """ function resample(udata::UncertainIndexValueDataset, constraint_idxs::SamplingConstraint, constraint_vals::Vector{<:SamplingConstraint}, n::Int) [resample(udata, constraint_idxs, constraint_vals) for i = 1:n] end """ resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{SamplingConstraint}, constraint_vals::SamplingConstraint, n::Int) -> Vector{Tuple{Vector{Float64}, Vector{Float64}}} Resample `n` realizations of an uncertain index-value dataset in an element-wise manner. Enforces a unique sampling constraint `constraint_idxs[i]` to the i-th index value. Also enforces a unique sampling constraint `constraint_vals[i]` to the i-th data value. """ function resample(udata::UncertainIndexValueDataset, constraint_idxs::Vector{<:SamplingConstraint}, constraint_vals::Vector{<:SamplingConstraint}, n::Int) [resample(udata, constraint_idxs, constraint_vals) for i = 1:n] end export resample
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
6214
import ..UncertainDatasets: AbstractUncertainValueDataset, UncertainValueDataset, UVAL_COLLECTION_TYPES import ..SamplingConstraints: NoConstraint, TruncateLowerQuantile, TruncateUpperQuantile, TruncateQuantiles """ resample(x::UVAL_COLLECTION_TYPES, constraint::SamplingConstraint) -> Vector{T} where T Resample `x` (a collection of uncertain values) once, applying the provided sampling `constraint`. Returns a `length(x)`-element vector. The `i`-th element of this vector is generated by truncating the `i`-th uncertain value by the sampling `constraint`, then drawing a single random number from the truncated value. See also [`UVAL_COLLECTION_TYPES`](@ref). ## Example ```julia # Generate some uncertain values where the `i`-th value is given by a normal # distribution with mean `i` and a standard deviation drawn from a uniform # distribution on `[0, 1]`. uvals = [UncertainValue(Normal(i, rand())) for i = 1:100] # Truncate each distribution at +- 0.5 standard deviations, then resample. resample(uvals, TruncateStd(0.5)) ``` """ function resample(uv::UVAL_COLLECTION_TYPES, constraint::SamplingConstraint) [resample(uv.values[i], constraint) for i in 1:length(uv)] end """ resample(x::UVAL_COLLECTION_TYPES, constraint::Vector{<:SamplingConstraint}) -> Vector{T} where T Resample `x` (a collection of uncertain values) once, applying the provided sampling `constraint`s. The number of constraints must match the number of elements in `x`. Returns a `length(x)`-element vector. The `i`-th element of this vector is generated by truncating the `i`-th uncertain value by the `i`-th sampling `constraint`, then drawing a single random number from the truncated value. See also [`UVAL_COLLECTION_TYPES`](@ref). ## Example ```julia # Generate some uncertain values where the `i`-th value is given by a normal # distribution with mean `i` and a standard deviation drawn from a uniform # distribution on `[0, 1]`. uvals = [UncertainValue(Normal(i, rand())) for i = 1:100] # Truncate each distribution at +- 0.5 standard deviations, then resample. resample(uvals, TruncateStd(0.5)) ``` """ function resample(uv::UVAL_COLLECTION_TYPES, constraint::Vector{<:SamplingConstraint}) [resample(uv.values[i], constraint[i]) for i in 1:length(uv)] end """ resample(x::UVAL_COLLECTION_TYPES, constraint::SamplingConstraint, n::Int) -> Vector{Vector{T}} where T Resample `x` (a collection of uncertain values) `n` times, applying the provided sampling `constraint`. Returns an `n`-element vector of `length(x)`-element vectors. Each of these vectors is an independent draw from `x`. The `i`-th element of each draw is generated by truncating the `i`-th uncertain value by the sampling `constraint`, then drawing a single random number from the truncated value. See also [`UVAL_COLLECTION_TYPES`](@ref). ## Example ```julia # Generate some uncertain values where the `i`-th value is given by a normal # distribution with mean `i` and a standard deviation drawn from a uniform # distribution on `[0, 1]`. uvals = [UncertainValue(Normal(i, rand())) for i = 1:100] # Truncate the first 50 elements at the 90th percentile range, and the # last 50 elements at the 40th percentile range. constraints = [i <= 50 ? TruncateQuantiles(0.05, 0.95) : TruncateQuantiles(0.3, 0.7) for i = 1:100] # Truncate the distributions, then draw ten independent realisations of the collection subject # to the provided constraints. resample(uvals, constraints, 10) ``` """ function resample(uv::UVAL_COLLECTION_TYPES, constraint::SamplingConstraint, n::Int) [[resample(uv.values[i], constraint) for i in 1:length(uv)] for k = 1:n] end """ resample(x::UVAL_COLLECTION_TYPES, constraint::Vector{<:SamplingConstraint}, n::Int) -> Vector{Vector{T}} where T Resample `x` (a collection of uncertain values) `n` times, applying the provided sampling `constraint`s. Returns an `n`-element vector of `length(x)`-element vectors. Each of these vectors is an independent draw from `x`. The `i`-th element of each draw is generated by truncating the `i`-th uncertain value by the `i`-th sampling `constraint`, then drawing a single random number from the truncated value. See also [`UVAL_COLLECTION_TYPES`](@ref). ## Example ```julia # Generate some uncertain values where the `i`-th value is given by a normal # distribution with mean `i` and a standard deviation drawn from a uniform # distribution on `[0, 1]`. uvals = [UncertainValue(Normal(i, rand())) for i = 1:100] # Truncate the first 50 elements at `± 0.5` standard deviations, and the # last 50 elements at `± 1.2` standar deviations. constraints = [i <= 50 ? TruncateStd(0.5) : TruncateStd(1.2) for i = 1:100] # Apply the constraints element-wise, then draw ten independent realisations # of the collection subject to those constraints. resample(uvals, constraints, 10) ``` """ function resample(uv::UVAL_COLLECTION_TYPES, constraint::Vector{<:SamplingConstraint}, n::Int) [[resample(uv.values[i], constraint[i]) for i in 1:length(uv)] for k = 1:n] end """ resample(x::UVAL_COLLECTION_TYPES, constraint::SamplingConstraint, n::Int) -> Vector{Vector{T}} where T Resample `x` (a collection of uncertain values) once by drawing a single random number from each of the uncertain values in `x`. See also [`UVAL_COLLECTION_TYPES`](@ref). ## Example ```julia # Generate some uncertain values represented by gamma distributions uvals = [UncertainValue(Gamma(i, rand())) for i = 1:100] # Resample the collection once resample(uvals) ``` """ function resample(uvd::UVAL_COLLECTION_TYPES) L = length(uvd) [resample(uvd.values[i]) for i in 1:L] end """ resample(uvd::UVAL_COLLECTION_TYPES, n::Int) -> Vector{Vector{T}} Draw `n` realisations of an uncertain value dataset according to the distributions of the uncertain values comprising it. See also [`UVAL_COLLECTION_TYPES`](@ref). ## Example ```julia # Generate some uncertain values represented by gamma distributions uvals = [UncertainValue(Gamma(i, rand())) for i = 1:100] # Resample the collection once resample(uvals) ``` """ function resample(uvd::UVAL_COLLECTION_TYPES, n::Int) L = length(uvd) [[resample(uvd.values[i]) for i in 1:L] for k in 1:n] end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
567
function resample(uvals::NTuple{N, Union{Real, AbstractUncertainValue}}) where N Tuple(convert(AbstractFloat, resample(uval)) for uval in uvals) end function resample(uvals::NTuple{N, Union{Real, AbstractUncertainValue}}, n::Int) where N [resample(uvals) for i = 1:n] end function resample_elwise(uvals::NTuple{N, Union{Real, AbstractUncertainValue}}) where N [resample(uval) for uval in uvals] end function resample_elwise(uvals::NTuple{N, Union{Real, AbstractUncertainValue}}, n::Int) where N [[resample(uval) for i = 1:n] for uval in uvals] end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
810
import ..SamplingConstraints: SamplingConstraint import ..UncertainValues: CertainValue resample(x::Number) = x resample(v::CertainValue) = v.value resample(v::CertainValue, n::Int) = [v.value for i = 1:n] resample(v::CertainValue, s::SamplingConstraint) = v.value resample(v::CertainValue, s::SamplingConstraint, n::Int) = [v.value for i = 1:n] constraints = [ :(NoConstraint), :(TruncateLowerQuantile), :(TruncateUpperQuantile), :(TruncateQuantiles), :(TruncateMaximum), :(TruncateMinimum), :(TruncateRange), :(TruncateStd) ] for constraint in constraints funcs = quote resample(x::CertainValue, constraint::$(constraint)) = x.value resample(x::CertainValue, constraint::$(constraint), n::Int) = [x.value for i = 1:n] end eval(funcs) end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
329
import Measurements: Measurement import ..UncertainValues: UncertainValue import Distributions: Normal resample(m::Measurement{T}) where T = resample(UncertainValue(Normal, m.val, m.err)) function resample(m::Measurement{T}, n::Int) where T uval = UncertainValue(Normal, m.val, m.err) [resample(uval) for i = 1:n] end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2725
import ..UncertainValues: TheoreticalDistributionScalarValue, AbstractUncertainTwoParameterScalarValue, AbstractUncertainThreeParameterScalarValue, UncertainScalarTheoreticalTwoParameter, UncertainScalarTheoreticalThreeParameter, UncertainScalarNormallyDistributed, UncertainScalarUniformlyDistributed, UncertainScalarBetaDistributed, UncertainScalarBetaPrimeDistributed, UncertainScalarBetaBinomialDistributed, UncertainScalarBinomialDistributed, UncertainScalarGammaDistributed, UncertainScalarFrechetDistributed import Distributions # Resample for generic resample(uv::TheoreticalDistributionScalarValue) = rand(uv.distribution) resample(uv::TheoreticalDistributionScalarValue, n::Int) = rand(uv.distribution, n) # Custom resample methods for each type of uncertain scalars based on # distributions (in case we want to implement custom sampling for some of them) resample(uv::UncertainScalarTheoreticalThreeParameter) = rand(uv.distribution) resample(uv::UncertainScalarTheoreticalTwoParameter) = rand(uv.distribution) resample(uv::UncertainScalarNormallyDistributed) = rand(uv.distribution) resample(uv::UncertainScalarUniformlyDistributed) = rand(uv.distribution) resample(uv::UncertainScalarBetaDistributed) = rand(uv.distribution) resample(uv::UncertainScalarBetaPrimeDistributed) = rand(uv.distribution) resample(uv::UncertainScalarBetaBinomialDistributed) = rand(uv.distribution) resample(uv::UncertainScalarGammaDistributed) = rand(uv.distribution) resample(uv::UncertainScalarFrechetDistributed) = rand(uv.distribution) resample(uv::UncertainScalarBinomialDistributed) = rand(uv.distribution) resample(uv::UncertainScalarTheoreticalThreeParameter, n::Int) = rand(uv.distribution, n) resample(uv::UncertainScalarTheoreticalTwoParameter, n::Int) = rand(uv.distribution, n) resample(uv::UncertainScalarNormallyDistributed, n::Int) = rand(uv.distribution, n) resample(uv::UncertainScalarUniformlyDistributed, n::Int) = rand(uv.distribution, n) resample(uv::UncertainScalarBetaDistributed, n::Int) = rand(uv.distribution, n) resample(uv::UncertainScalarBetaPrimeDistributed, n::Int) = rand(uv.distribution, n) resample(uv::UncertainScalarBetaBinomialDistributed, n::Int) = rand(uv.distribution, n) resample(uv::UncertainScalarGammaDistributed, n::Int) = rand(uv.distribution, n) resample(uv::UncertainScalarFrechetDistributed, n::Int) = rand(uv.distribution, n) resample(uv::UncertainScalarBinomialDistributed, n::Int) = rand(uv.distribution, n) resample(x::Distributions.Truncated) = rand(x) resample(x::Distributions.Truncated, n::Int) = rand(x, n) resample(x::Distributions.Distribution) = rand(x) resample(x::Distributions.Distribution, n::Int) = rand(x, n)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
476
import ..UncertainValues.AbstractUncertainScalarKDE """ resample(uv::UncertainScalarKDE) Resample an uncertain value whose distribution is approximated using a kernel density estimate once. """ resample(uv::AbstractUncertainScalarKDE) = rand(uv) """ resample(uv::AbstractUncertainScalarKDE) Resample an uncertain value whose distribution is approximated using a kernel density estimate `n` times. """ resample(uv::AbstractUncertainScalarKDE, n::Int) = rand(uv, n)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
18466
import ..UncertainValues: getquantileindex, support, AbstractUncertainScalarKDE import StatsBase: sample, sample!, Weights import Distributions: Uniform import ..SamplingConstraints: NoConstraint, TruncateLowerQuantile, TruncateUpperQuantile, TruncateQuantiles, TruncateMinimum, TruncateMaximum, TruncateRange, TruncateStd, fallback """ resample(uv::AbstractUncertainScalarKDE, constraint::NoConstraint) Resample without contraints (use the full distribution representing the value) ## Example ```julia some_sample = rand(Normal(), 1000) # Calling UncertainValue with a single vector of numbers triggers KDE estimation uncertainval = UncertainValue(some_sample) # -> UncertainScalarKDE # Resample the uncertain value by resampling the full distribution once. resample(uncertainval, NoConstraint()) ``` """ resample(uv::AbstractUncertainScalarKDE, constraint::NoConstraint) = resample(uv) """ resample(uv::AbstractUncertainScalarKDE, constraint::NoConstraint, n::Int) Resample without contraints (use the full distribution representing the value) ## Example ```julia some_sample = rand(Normal(), 1000) # Calling UncertainValue with a single vector of numbers triggers KDE estimation uncertainval = UncertainValue(some_sample) # -> UncertainScalarKDE # Resample the uncertain value by resampling the full distribution n times resample(uncertainval, NoConstraint(), n) ``` """ resample(uv::AbstractUncertainScalarKDE, constraint::NoConstraint, n::Int) = resample(uv, n) """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateLowerQuantile) Resample `uv` by first truncating below the kernel density estimate of the distribution furnishing the value at some lower quantile, then resampling it once. ## Example ```julia using UncertainData some_sample = rand(Normal(), 1000) # Calling UncertainValue with a single vector of numbers triggers KDE estimation uncertainval = UncertainValue(some_sample) # -> UncertainScalarKDE constraint = TruncateLowerQuantile(0.16) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution once. resample(uncertainval, constraint) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateLowerQuantile) # Find the index of the kernel density estimated distribution # corresponding to the lower quantile at which we want to truncate. idx_lower_quantile = getquantileindex(uv, constraint.lower_quantile) # Box width δ = step(uv.range) # Subset the values and weights (values of the pdf at those values) range = uv.range[idx_lower_quantile:end] |> collect wts = Weights(uv.pdf[idx_lower_quantile:end]) # Sample a box sampled_val = sample(range, wts) # Sample uniformly from within the box rand(Uniform(sampled_val, sampled_val + δ)) end """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateLowerQuantile, n::Int) Resample `uv` by first truncating below the kernel density estimate of the distribution furnishing the value at some lower quantile, then resampling it `n` times. ## Example ```julia using UncertainData some_sample = rand(Normal(), 1000) # Calling UncertainValue with a single vector of numbers triggers KDE estimation uncertainval = UncertainValue(some_sample) # -> UncertainScalarKDE constraint = TruncateLowerQuantile(0.16) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 500 times. resample(uncertainval, constraint, 500) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateLowerQuantile, n::Int) # Find the index of the kernel density estimated distribution # corresponding to the lower quantile at which we want to truncate. idx_lower_quantile = getquantileindex(uv, constraint.lower_quantile) # Box width δ = step(uv.range) # Subset the values and weights (values of the pdf at those values) range = uv.range[idx_lower_quantile:end] |> collect wts = Weights(uv.pdf[idx_lower_quantile:end]) # Sample n boxes according to estimated weights (pdf) sampled_vals = Vector{Float64}(undef, n) sample!(range, wts, sampled_vals) # Sample uniformly from within each box [rand(Uniform(sampled_vals[i], sampled_vals[i] + δ)) for i = 1:n] end """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateUpperQuantile) Resample `uv` by first truncating above the kernel density estimate of the distribution furnishing the value at some upper quantile, then resampling it once. ## Example ```julia using UncertainData some_sample = rand(Normal(), 1000) # Calling UncertainValue with a single vector of numbers triggers KDE estimation uncertainval = UncertainValue(some_sample) # -> UncertainScalarKDE constraint = TruncateUpperQuantile(0.78) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution once. resample(uncertainval, constraint) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateUpperQuantile) # Find the index of the kernel density estimated distribution # corresponding to the lower quantile at which we want to truncate. idx_upper_quantile = getquantileindex(uv, constraint.upper_quantile) # Box width δ = step(uv.range) # Subset the values and weights (values of the pdf at those values) range = uv.range[1:idx_upper_quantile] |> collect wts = Weights(uv.pdf[1:idx_upper_quantile]) # Sample a box sampled_val = sample(range, wts) # Sample uniformly from within the box rand(Uniform(sampled_val, sampled_val + δ)) end """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateUpperQuantile, n::Int) Resample `uv` by first truncating above the kernel density estimate of the distribution furnishing the value at some upper quantile, then resampling it `n` times. ## Example ```julia using UncertainData some_sample = rand(Normal(), 1000) # Calling UncertainValue with a single vector of numbers triggers KDE estimation uncertainval = UncertainValue(some_sample) # -> UncertainScalarKDE constraint = TruncateLowerQuantile(0.16) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 500 times. resample(uncertainval, constraint, 500) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateUpperQuantile, n::Int) # Find the index of the kernel density estimated distribution # corresponding to the lower quantile at which we want to truncate. idx_upper_quantile = getquantileindex(uv, constraint.upper_quantile) # Box width δ = step(uv.range) # Subset the values and weights (values of the pdf at those values) range = uv.range[1:idx_upper_quantile] |> collect wts = Weights(uv.pdf[1:idx_upper_quantile]) # Sample n boxes according to estimated weights (pdf) sampled_vals = Vector{Float64}(undef, n) sample!(range, wts, sampled_vals) # Sample uniformly from within each box [rand(Uniform(sampled_vals[i], sampled_vals[i] + δ)) for i = 1:n] end """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateUpperQuantile) Resample `uv` by first truncating the kernel density estimate of the distribution furnishing the value both above and below at some quantile range, then resampling it once. ## Example ```julia using UncertainData some_sample = rand(Normal(), 1000) # Calling UncertainValue with a single vector of numbers triggers KDE estimation uncertainval = UncertainValue(some_sample) # -> UncertainScalarKDE constraint = TruncateQuantiles(0.1, 0.9) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution once. resample(uncertainval, constraint) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateQuantiles) # Find the index of the kernel density estimated distribution # corresponding to the lower quantile at which we want to truncate. idx_lower_quantile = getquantileindex(uv, constraint.lower_quantile) idx_upper_quantile = getquantileindex(uv, constraint.upper_quantile) # Box width δ = step(uv.range) # Subset the values and weights (values of the pdf at those values) range = uv.range[idx_lower_quantile:idx_upper_quantile] |> collect wts = Weights(uv.pdf[idx_lower_quantile:idx_upper_quantile]) # Sample a box sampled_val = sample(range, wts) # Sample uniformly from within the box rand(Uniform(sampled_val, sampled_val + δ)) end """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateQuantiles, n::Int) Resample `uv` by first truncating the kernel density estimate of the distribution furnishing the value both above and below at some quantile range, then resampling it `n` times. ## Example ```julia using UncertainData some_sample = rand(Normal(), 1000) # Calling UncertainValue with a single vector of numbers triggers KDE estimation uncertainval = UncertainValue(some_sample) # -> UncertainScalarKDE constraint = TruncateQuantiles(0.1, 0.9) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 500 times. resample(uncertainval, constraint, 500) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateQuantiles, n::Int) # Find the index of the kernel density estimated distribution # corresponding to the lower quantile at which we want to truncate. idx_upper_quantile = getquantileindex(uv, constraint.upper_quantile) idx_lower_quantile = getquantileindex(uv, constraint.lower_quantile) # Box width δ = step(uv.range) # Subset the values and weights (values of the pdf at those values) range = uv.range[idx_lower_quantile:idx_upper_quantile] |> collect wts = Weights(uv.pdf[idx_lower_quantile:idx_upper_quantile]) # Sample n boxes according to estimated weights (pdf) sampled_vals = Vector{Float64}(undef, n) sample!(range, wts, sampled_vals) # Sample uniformly from within each box [rand(Uniform(sampled_vals[i], sampled_vals[i] + δ)) for i = 1:n] end """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateMaximum) Resample `uv` by first truncating the kernel density estimate of the distribution furnishing the value at some maximum value, then resampling it once. ## Example ```julia # Uncertain value represented by a normal distribution with mean = 0 and # standard deviation = 1. uncertainval = UncertainValue(rand(Normal(0, 1), 1000)) constraint = TruncateMaximum(0.8) # accept no values larger than 1.1 # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 700 times. resample(uncertainval, constraint, 700) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateMaximum) # Box width δ = step(uv.range) upper_bound = constraint.max lower_bound = minimum(uv.range) idx_max = findlast(uv.range .<= upper_bound) # Subset the values and weights (values of the pdf at those values) range = uv.range[1:idx_max] |> collect wts = Weights(uv.pdf[1:idx_max]) # Sample a box sampled_val = sample(range, wts) # Sample uniformly from within the box rand(Uniform(sampled_val, sampled_val + δ)) end """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateMaximum, n::Int) Resample `uv` by first truncating the kernel density estimate of the distribution furnishing the value at some minimum value, then resampling it `n` times. ## Example ```julia # Uncertain value represented by a normal distribution with mean = 0 and # standard deviation = 1. uncertainval = UncertainValue(rand(Normal(0, 1), 1000)) constraint = TruncateMaximum(0.8) # accept no values larger than 1.1 # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 700 times. resample(uncertainval, constraint, 700) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateMaximum, n::Int) # Box width δ = step(uv.range) upper_bound = constraint.max lower_bound = minimum(uv.range) idx_max = findlast(uv.range .<= upper_bound) # Subset the values and weights (values of the pdf at those values) range = uv.range[1:idx_max] |> collect wts = Weights(uv.pdf[1:idx_max]) # Sample n boxes according to estimated pdf sampled_vals = Vector{Float64}(undef, n) sample!(range, wts, sampled_vals) # Sample uniformly from within each box [rand(Uniform(sampled_vals[i], sampled_vals[i] + δ)) for i = 1:n] end """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateMinimum) Resample `uv` by first truncating the kernel density estimate of the distribution furnishing the value at some minimum value, then resampling it once. ## Example ```julia # Uncertain value represented by a normal distribution with mean = 0 and # standard deviation = 1. uncertainval = UncertainValue(rand(Normal(0, 1), 1000)) constraint = TruncateMinimum(0.2) # accept no values smaller than 0.2 # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 700 times. resample(uncertainval, constraint, 700) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateMinimum) # Box width δ = step(uv.range) lower_bound = constraint.min upper_bound = maximum(uv.range) idx_min = findfirst(uv.range .>= lower_bound) # Subset the values and weights (values of the pdf at those values) range = uv.range[idx_min:end] |> collect wts = Weights(uv.pdf[idx_min:end]) # Sample a box sampled_val = sample(range, wts) # Sample uniformly from within the box rand(Uniform(sampled_val, sampled_val + δ)) end """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateMinimum, n::Int) Resample `uv` by first truncating the kernel density estimate of the distribution furnishing the value at some minimum value, then resampling it `n` times. ## Example ```julia # Uncertain value represented by a normal distribution with mean = 0 and # standard deviation = 1. uncertainval = UncertainValue(rand(Normal(0, 1), 1000)) constraint = TruncateMinimum(0.2) # accept no values smaller than 0.2 # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 700 times. resample(uncertainval, constraint, 700) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateMinimum, n::Int) # Box width δ = step(uv.range) lower_bound = constraint.min upper_bound = maximum(uv.range) idx_min = findfirst(uv.range .>= lower_bound) # Subset the values and weights (values of the pdf at those values) range = uv.range[idx_min:end] |> collect wts = Weights(uv.pdf[idx_min:end]) # Sample n boxes according to estimated pdf sampled_vals = Vector{Float64}(undef, n) sample!(range, wts, sampled_vals) # Sample uniformly from within each box [rand(Uniform(sampled_vals[i], sampled_vals[i] + δ)) for i = 1:n] end """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateRange) Resample `uv` by first truncating the kernel density estimate of the distribution furnishing the value at some minimum and maximum values, then resampling it once. ## Example ```julia # Uncertain value represented by a normal distribution with mean = 0 and # standard deviation = 1. uncertainval = UncertainValue(rand(Normal(0, 1), 1000)) # Only accept values in the range [-0.9, 1.2] constraint = TruncateRange(-0.9, 1.2) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 300 times. resample(uncertainval, constraint, 300) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateRange) # Box width δ = step(uv.range) lower_bound = constraint.min upper_bound = constraint.max idx_min = findfirst(uv.range .>= lower_bound) idx_max = findlast(uv.range .<= upper_bound) # Subset the values and weights (values of the pdf at those values) range = uv.range[idx_min:idx_max] |> collect wts = Weights(uv.pdf[idx_min:idx_max]) # Sample a box sampled_val = sample(range, wts) # Sample uniformly from within the box rand(Uniform(sampled_val, sampled_val + δ)) end """ resample(uv::AbstractUncertainScalarKDE, constraint::TruncateRange, n::Int) Resample `uv` by first truncating the kernel density estimate of the distribution furnishing the value at some minimum and maximum values, then resampling it `n` times. ## Example ```julia # Uncertain value represented by a normal distribution with mean = 0 and # standard deviation = 1. uncertainval = UncertainValue(rand(Normal(0, 1), 1000)) # Only accept values in the range [-0.9, 1.2] constraint = TruncateRange(-0.9, 1.2) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 300 times. resample(uncertainval, constraint, 300) ``` """ function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateRange, n::Int) # Box width δ = step(uv.range) lower_bound = constraint.min upper_bound = constraint.max idx_min = findfirst(uv.range .>= lower_bound) idx_max = findlast(uv.range .<= upper_bound) # Subset the values and weights (values of the pdf at those values) range = uv.range[idx_min:idx_max] |> collect wts = Weights(uv.pdf[idx_min:idx_max]) # Sample n boxes according to estimated pdf sampled_vals = Vector{Float64}(undef, n) sample!(range, wts, sampled_vals) # Sample uniformly from within each box [rand(Uniform(sampled_vals[i], sampled_vals[i] + δ)) for i = 1:n] end # Resampling UncertainScalarKDE with TruncateStd is undefined, so fall back to quantiles function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateStd) resample(uv, fallback(uv, constraint)) end # Resampling UncertainScalarKDE with TruncateStd is undefined, so fall back to quantiles function resample(uv::AbstractUncertainScalarKDE, constraint::TruncateStd, n::Int) resample(uv, fallback(uv, constraint), n) end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
695
import ..UncertainValues.AbstractUncertainValue import ..UncertainValues.TheoreticalFittedUncertainScalar import ..UncertainValues.UncertainScalarTheoreticalFit import ..UncertainValues.FittedDistribution ######################################################################## # Resampling without constraints ######################################################################## resample(ue::UncertainScalarTheoreticalFit) = rand(ue.distribution.distribution) resample(ue::UncertainScalarTheoreticalFit, n::Int) = rand(ue.distribution.distribution, n) resample(fd::FittedDistribution) = rand(fd.distribution) resample(fd::FittedDistribution, n::Int) = rand(fd.distribution, n)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1133
import ..UncertainValues: AbstractScalarPopulation import Base.rand import StatsBase.sample function resample(p::AbstractScalarPopulation) rand(p) end function resample(p::AbstractScalarPopulation, n::Int) rand(p, n) end function resample(p::AbstractScalarPopulation, constraint::SamplingConstraint) rand(constrain(p, constraint)) end function resample(p::AbstractScalarPopulation, constraint::SamplingConstraint, n::Int) rand(constrain(p, constraint), n) end constraints = [ :(NoConstraint), :(TruncateLowerQuantile), :(TruncateUpperQuantile), :(TruncateQuantiles), :(TruncateMaximum), :(TruncateMinimum), :(TruncateRange), :(TruncateStd) ] for constraint in constraints funcs = quote function resample(p::AbstractScalarPopulation{T, PW}, constraint::$(constraint)) where {T, PW} rand(constrain(p, constraint)) end function resample(p::AbstractScalarPopulation{T, PW}, constraint::$(constraint), n::Int) where {T, PW} rand(constrain(p, constraint), n) end end eval(funcs) end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2127
import ..UncertainValues: AbstractUncertainValue, FittedDistribution, TheoreticalFittedUncertainScalar, UncertainScalarTheoreticalFit import Distributions: Truncated import StatsBase: quantile, std, mean import Distributions: support ######################################################################## # Resampling without constraints ######################################################################## """ resample(uv::AbstractUncertainValue) Sample the uncertain value once, drawing values from the entire support of the probability distribution furnishing it. """ resample(uv::AbstractUncertainValue) = rand(uv.distribution) """ resample(uv::AbstractUncertainValue, n::Int) Sample the uncertain value `n` times, drawing values from the entire support of the probability distribution furnishing it. """ resample(uv::AbstractUncertainValue, n::Int) = rand(uv.distribution, n) """ resample(fd::FittedDistribution) Resample a `FittedDistribution` instance once. """ resample(fd::FittedDistribution) = rand(fd.distribution) """ resample(fd::FittedDistribution, n::Int) Resample a `FittedDistribution` instance `n` times. """ resample(fd::FittedDistribution, n::Int) = rand(fd.distribution, n) """ resample(ue::UncertainScalarTheoreticalFit) Resample an `UncertainScalarTheoreticalFit` instance once. """ resample(ue::UncertainScalarTheoreticalFit) = rand(ue.distribution.distribution) """ resample(ue::UncertainScalarTheoreticalFit, n::Int) Resample an `UncertainScalarTheoreticalFit` instance `n` times. """ resample(ue::UncertainScalarTheoreticalFit, n::Int) = rand(ue.distribution.distribution, n) """ resample(uv::TheoreticalFittedUncertainScalar, n::Int) Resample an `TheoreticalFittedUncertainScalar` instance once. """ resample(uv::TheoreticalFittedUncertainScalar) = rand(uv.distribution.distribution) """ resample(uv::TheoreticalFittedUncertainScalar, n::Int) Resample an `TheoreticalFittedUncertainScalar` instance `n` times. """ resample(uv::TheoreticalFittedUncertainScalar, n::Int) = rand(uv.distribution.distribution, n)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
13488
import ..UncertainValues.AbstractUncertainValue import ..SamplingConstraints: SamplingConstraint, NoConstraint, TruncateLowerQuantile, TruncateUpperQuantile, TruncateQuantiles, TruncateMinimum, TruncateMaximum, TruncateRange, TruncateStd import Distributions: truncated ######################################################################## # Resampling with constraints ######################################################################## """ Resample an uncertain value by first truncating its furnishing distribution with the provided sampling `constraint`. """ function resample(uv::AbstractUncertainValue, constraint::SamplingConstraint) end """ resample(uv::AbstractUncertainValue, constraint::NoConstraint) Resample an uncertain value without contraints (use the full furnishing distribution). ## Example ```julia uncertainval = UncertainValue(0, 0.2, Normal) # Resample the uncertain value by resampling the full distribution once. resample(uncertainval, NoConstraint()) ``` """ resample(uv::AbstractUncertainValue, constraint::NoConstraint) = resample(uv) """ resample(uv::AbstractUncertainValue, constraint::NoConstraint, n::Int) Resample an uncertain value without contraints (use the full furnishing distribution). ## Example ```julia uncertainval = UncertainValue(0, 0.2, Normal) # Resample the uncertain value by resampling the full distribution 1000 times. resample(uncertainval, NoConstraint(), 1000) ``` """ resample(uv::AbstractUncertainValue, constraint::NoConstraint, n::Int) = resample(uv, n) """ resample(uv::AbstractUncertainValue, constraint::TruncateUpperQuantile) Resample by first truncating the distribution representing the value at a lower quantile, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 0.2, Normal) constraint = TruncateLowerQuantile(0.16) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution once. resample(uncertainval, constraint) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateLowerQuantile) # Apply (another level of) truncation, then sample lower_bound = quantile(uv.distribution, constraint.lower_quantile) upper_bound = maximum(uv) rand(truncated(uv.distribution, lower_bound, upper_bound)) end """ resample(uv::AbstractUncertainValue, constraint::TruncateLowerQuantile, n::Int) Resample by first truncating the distribution representing the value at a lower quantile, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 0.2, Normal) constraint = TruncateLowerQuantile(0.16) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 1000 times. resample(uncertainval, constraint, 1000) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateLowerQuantile, n::Int) # Apply (another level of) truncation, then sample lower_bound = quantile(uv.distribution, constraint.lower_quantile) upper_bound = maximum(uv) rand(truncated(uv.distribution, lower_bound, upper_bound), n) end """ resample(uv::AbstractUncertainValue, constraint::TruncateUpperQuantile) Resample by first truncating the distribution representing the value at an upper quantile, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 0.2, Normal) constraint = TruncateUpperQuantile(0.8) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution once. resample(uncertainval, constraint) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateUpperQuantile) # Apply (another level of) truncation, then sample lower_bound = minimum(uv) upper_bound = quantile(uv.distribution, constraint.upper_quantile) rand(truncated(uv.distribution, lower_bound, upper_bound)) end """ resample(uv::AbstractUncertainValue, constraint::TruncateUpperQuantile, n::Int) Resample by first truncating the distribution representing the value at an upper quantile, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 0.2, Normal) constraint = TruncateUpperQuantile(0.8) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 1000 times. resample(uncertainval, constraint, 1000) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateUpperQuantile, n::Int) # Apply (another level of) truncation, then sample lower_bound = minimum(uv) upper_bound = quantile(uv.distribution, constraint.upper_quantile) rand(truncated(uv.distribution, lower_bound, upper_bound), n) end """ resample(uv::AbstractUncertainValue, constraint::TruncateQuantiles) Resample by first truncating the distribution representing the value at a set of qunatiles, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 1, Uniform) constraint = TruncateLowerQuantile(0.2) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution once. resample(uncertainval, constraint) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateQuantiles) # Apply (another level of) truncation, then sample lower_bound = quantile(uv.distribution, constraint.lower_quantile) upper_bound = quantile(uv.distribution, constraint.upper_quantile) rand(truncated(uv.distribution, lower_bound, upper_bound)) end """ resample(uv::AbstractUncertainValue, constraint::TruncateQuantiles, n::Int) Resample by first truncating the distribution representing the value at a set of qunatiles, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 1, Uniform) constraint = TruncateLowerQuantile(0.2) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 1000 times. resample(uncertainval, constraint, 1000) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateQuantiles, n::Int) # Apply (another level of) truncation, then sample lower_bound = quantile(uv.distribution, constraint.lower_quantile) upper_bound = quantile(uv.distribution, constraint.upper_quantile) rand(truncated(uv.distribution, lower_bound, upper_bound), n) end """ resample(uv::AbstractUncertainValue, constraint::TruncateStd, n::Int; n_draws::Int = 10000) Resample by first truncating the distribution representing the value to some multiple of its standard deviation around the mean. ## Example ```julia uncertainval = UncertainValue(0, 0.8, Normal) constraint = TruncateStd(1.1) # accept values only in range 1.1*stdev around the mean # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 1000 times. resample(uncertainval, constraint, 1000) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateStd, n::Int; n_draws::Int = 10000) # Apply (another level of) truncation, then sample stdev = std(resample(uv, n_draws)) m = mean(resample(uv, n_draws)) lower_bound = m - stdev upper_bound = m + stdev rand(truncated(uv.distribution, lower_bound, upper_bound), n) end """ resample(uv::AbstractUncertainValue, constraint::TruncateMinimum) Resample by first truncating the distribution representing the value at some minimum value, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 0.2, Normal) constraint = TruncateMinimum(-0.5) # accept no values less than -0.5 # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution once. resample(uncertainval, constraint) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateMinimum) # Apply (another level of) truncation, then sample lower_bound = constraint.min upper_bound = maximum(uv) lower_bound > upper_bound ? error("lower bound > upper_bound") : nothing rand(truncated(uv.distribution, lower_bound, upper_bound)) end """ resample(uv::AbstractUncertainValue, constraint::TruncateMinimum, n::Int) Resample by first truncating the distribution representing the value at some minimum value, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 0.2, Normal) constraint = TruncateMinimum(-0.5) # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 1000 times. resample(uncertainval, constraint, 1000) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateMinimum, n::Int) # Apply (another level of) truncation, then sample lower_bound = constraint.min upper_bound = maximum(uv) lower_bound > upper_bound ? error("lower bound > upper_bound") : nothing rand(truncated(uv.distribution, lower_bound, upper_bound), n) end """ resample(uv::AbstractUncertainValue, constraint::TruncateMaximum) Resample by first truncating the distribution representing the value at some minimum value, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 0.8, Normal) constraint = TruncateMaximum(1.1) # accept no values larger than 1.1 # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution once. resample(uncertainval, constraint) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateMaximum) # Apply (another level of) truncation, then sample upper_bound = constraint.max lower_bound = minimum(uv) lower_bound > upper_bound ? error("lower bound > upper_bound") : nothing rand(truncated(uv.distribution, lower_bound, upper_bound)) end """ resample(uv::AbstractUncertainValue, constraint::TruncateMaximum, n::Int) Resample by first truncating the distribution representing the value at some minimum value, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 0.8, Normal) constraint = TruncateMaximum(1.1) # accept no values larger than 1.1 # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 1000 times. resample(uncertainval, constraint, 1000) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateMaximum, n::Int) # Apply (another level of) truncation, then sample lower_bound = minimum(uv) upper_bound = constraint.max lower_bound > upper_bound ? error("lower bound > upper_bound") : nothing rand(truncated(uv.distribution, lower_bound, upper_bound), n) end """ resample(uv::AbstractUncertainValue, constraint::TruncateRange) Resample by first truncating the distribution representing the value at some minimum value, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 0.8, Normal) constraint = TruncateRange(-0.7, 1.1) # accept values only in range [-0.7, 1.1] # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution once. resample(uncertainval, constraint) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateRange) # Apply (another level of) truncation, then sample lower_bound = constraint.min upper_bound = constraint.max lower_bound > upper_bound ? error("lower bound > upper_bound") : nothing rand(truncated(uv.distribution, lower_bound, upper_bound)) end """ resample(uv::AbstractUncertainValue, constraint::TruncateRange, n::Int) Resample by first truncating the distribution representing the value at some minimum value, then performing the resampling. ## Example ```julia uncertainval = UncertainValue(0, 0.8, Normal) constraint = TruncateRange(-0.7, 1.1) # accept values only in range [-0.7, 1.1] # Resample the uncertain value `1000` times by truncating the distribution furnishing it, # then resampling the new distribution `1000` times resample(uncertainval, constraint, 1000) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateRange, n::Int) # Apply (another level of) truncation, then sample lower_bound = constraint.min upper_bound = constraint.max lower_bound > upper_bound ? error("lower bound > upper_bound") : nothing rand(truncated(uv.distribution, lower_bound, upper_bound), n) end """ resample(uv::AbstractUncertainValue, constraint::TruncateStd; n_draws::Int = 10000) Resample by first truncating the distribution representing the value to some multiple of its standard deviation around the mean. ## Example ```julia uncertainval = UncertainValue(0, 0.8, Normal) constraint = TruncateStd(1.1) # accept values only in range 1.1*stdev around the mean # Resample the uncertain value by truncating the distribution furnishing it, # then resampling the new distribution 1000 times. resample(uncertainval, constraint, 1000) ``` """ function resample(uv::AbstractUncertainValue, constraint::TruncateStd; n_draws::Int = 10000) # Apply (another level of) truncation, then sample stdev = std(resample(uv, n_draws)) m = mean(resample(uv, n_draws)) lower_bound = m - stdev upper_bound = m + stdev lower_bound > upper_bound ? error("lower bound > upper_bound") : nothing rand(truncated(uv.distribution, lower_bound, upper_bound)) end export resample
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1040
""" resample(uvals::Vector{AbstractUncertainValue}, c::SamplingConstrant) Treat `uvals` as a dataset and resample each value of `uvals` once, Returns an `length(uvals)`-element vector. """ resample(uvals::Vector{AbstractUncertainValue}, c::SamplingConstrant) = resample.(uvals, c) """ resample(uvals::Vector{AbstractUncertainValue}, n::Int) Treat `uvals` as a dataset and resample it `n` times. Returns `n` resampled draws of `uvals`, each being a `length(uvals)`-element vector. For each returned vector, the i-th element is a unique draw of `uvals[i]`. """ function resample(uvals::Vector{AbstractUncertainValue}, n::Int) [resample.(uvals) for i = 1:n] end """ resample_elwise(uvals::Vector{AbstractUncertainValue}, n::Int) Resample each element in `uvals` `n` times. The i-th entry in the returned vector is a `n`-element vector consisting of `n` unique draws of `uvals[i]`. """ function resample_elwise(uvals::Vector{AbstractUncertainValue}, n::Int) [resample(uvals[i], n) for i = 1:length(uvals)] end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
983
""" resample(uvals::Vector{AbstractUncertainValue}) Treat `uvals` as a dataset and resample each value of `uvals` once. Returns an `length(uvals)`-element vector. """ resample(uvals::Vector{AbstractUncertainValue}) = resample.(uvals) """ resample(uvals::Vector{AbstractUncertainValue}, n::Int) Treat `uvals` as a dataset and resample it `n` times. Returns `n` resampled draws of `uvals`, each being a `length(uvals)`-element vector. For each returned vector, the i-th element is a unique draw of `uvals[i]`. """ function resample(uvals::Vector{AbstractUncertainValue}, n::Int) [resample.(uvals) for i = 1:n] end """ resample_elwise(uvals::Vector{AbstractUncertainValue}, n::Int) Resample each element in `uvals` `n` times. The i-th entry in the returned vector is a `n`-element vector consisting of `n` unique draws of `uvals[i]`. """ function resample_elwise(uvals::Vector{AbstractUncertainValue}, n::Int) [resample(uvals[i], n) for i = 1:length(uvals)] end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1527
resample(uv::AbstractVector{<:Number}) = uv # No constraints function resample(uv::DT) where {DT <: AbstractVector{<:AbstractUncertainValue}} resample.(uv) end function resample(uv::DT, n::Int) where {DT <: AbstractVector{<:AbstractUncertainValue}} [resample.(uv) for i = 1:n] end # Vectors of sampling constraints mapped to unique sampling constraints function resample(uv::DT, constraint::Vector{<:SamplingConstraint}) where { DT <: AbstractVector{<:AbstractUncertainValue}} [resample(uv[i], constraint[i]) for i in 1:length(uv)] end function resample(uv::DT, constraint::Vector{<:SamplingConstraint}, n::Int) where { DT <: AbstractVector{<:AbstractUncertainValue}} [[resample(uv[i], constraint[i]) for i in 1:length(uv)] for k = 1:n] end # Vectors of sampling constraints mapped to unique sampling constraints function resample(uv::DT, constraint::SamplingConstraint) where { DT <: AbstractVector{<:AbstractUncertainValue}} [resample(uv[i], constraint) for i in 1:length(uv)] end function resample(uv::DT, constraint::SamplingConstraint, n::Int) where { DT <: AbstractVector{<:AbstractUncertainValue}} [[resample(uv[i], constraint) for i in 1:length(uv)] for k = 1:n] end function resample(uv::DT, constraint::NoConstraint) where { DT <: AbstractVector{<:AbstractUncertainValue}} return uv end function resample(uv::DT, constraint::NoConstraint, n::Int) where { DT <: AbstractVector{<:AbstractUncertainValue}} return [uv for i = 1:n] end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
960
using Reexport @reexport module SamplingConstraints import Distributions # Sampling constraints types include("constraint_definitions.jl") # Fallbacks when encountering incompatible sampling constraints include("fallback_constraints.jl") # Truncate values include("truncation/truncate.jl") # Constrain uncertain values include("constrain_uncertainvalue.jl") include("constrain_certain_value.jl") include("constrain_population.jl") # Constrain uncertain datasets include("constrain_uncertaindataset.jl") include("constrain_uncertainvaluedataset.jl") include("constrain_uncertainindexdataset.jl") include("ordered_sequences/ordered_sequences.jl") end # module """ SamplingConstraints A module defining: 1. Sampling constraints for the data types in the `UncertainValues` and `UncertainDatasets` modules. 2. Functions for resampling those data types. """ SamplingConstraints
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
358
import ..UncertainValues: CertainValue constrain(v::CertainValue) = v constrain(v::CertainValue, s::SamplingConstraint) = v constrain(v::CertainValue, s::TruncateLowerQuantile) = v constrain(v::CertainValue, s::TruncateUpperQuantile) = v constrain(v::CertainValue, s::TruncateQuantiles) = v constrain(v::CertainValue, s::TruncateStd) = v export constrain
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
724
import ..UncertainValues: AbstractScalarPopulation, ConstrainedUncertainScalarPopulation import StatsBase import StatsBase: AbstractWeights """ constrain(pop::UncertainScalarPopulation, constraint::SamplingConstraint, n::Int = 30000) Constrain an `UncertainScalarPopulation` by the given sampling `constraint`. If the sampling constraint requires resampling to compute, resample `n` times. """ function constrain(pop::AbstractScalarPopulation{T, PW}, constraint::SamplingConstraint, n::Int = 30000) where {T, PW} # Get the population members and weights that fullfill the constraint and return them as a # ConstrainedUncertainScalarPopulation truncate(pop, constraint, n) end export constrain
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1972
import ..UncertainDatasets:ConstrainedUncertainDataset """ constrain(udata::UncertainDataset, s::SamplingConstraint) -> ConstrainedUncertainDataset Return a uncertain dataset by applying the constraint `s` to each uncertain value in `udata`. """ constrain(udata::UncertainDataset, constraint::SamplingConstraint) = ConstrainedUncertainDataset([constrain(uval, constraint) for uval in udata]) """ constrain(udata::UncertainDataset, constraints::Vector{T}) where {T<:SamplingConstraint} -> ConstrainedUncertainDataset Return a uncertain dataset by applying a different sampling constraint to each uncertain value in `udata`. """ function constrain(udata::UncertainDataset, constraints::Vector{T}) where {T<:SamplingConstraint} if length(udata) != length(constraints) error("Number of sampling constraints must match length of dataset.") end n_vals = length(udata) ConstrainedUncertainDataset([constrain(udata[i], constraints[i]) for i in 1:n_vals]) end """ constrain(udata::ConstrainedUncertainDataset, constraint::SamplingConstraint) -> ConstrainedUncertainDataset Return a uncertain dataset by applying the constraint `s` to each uncertain value in `udata`. """ constrain(udata::ConstrainedUncertainDataset, constraint::SamplingConstraint) = ConstrainedUncertainDataset([constrain(uval, constraint) for uval in udata]) """ constrain(udata::ConstrainedUncertainDataset, constraints::Vector{T}) where {T<:SamplingConstraint} -> ConstrainedUncertainDataset Return a uncertain dataset by applying a different sampling constraint to each uncertain value in `udata`. """ function constrain(udata::ConstrainedUncertainDataset, constraints::Vector{T}) where {T<:SamplingConstraint} if length(udata) != length(constraints) error("Number of sampling constraints must match length of dataset.") end n_vals = length(udata) ConstrainedUncertainDataset([constrain(udata[i], constraints[i]) for i in 1:n_vals]) end export constrain
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2206
import ..UncertainDatasets: AbstractUncertainIndexDataset, UncertainIndexDataset import ..UncertainDatasets:ConstrainedUncertainIndexDataset """ constrain(udata::UncertainIndexDataset, s::SamplingConstraint) -> ConstrainedUncertainIndexDataset Return a uncertain dataset by applying the constraint `s` to each uncertain value in `udata`. """ constrain(udata::UncertainIndexDataset, constraint::SamplingConstraint) = ConstrainedUncertainIndexDataset([constrain(uval, constraint) for uval in udata]) """ constrain(udata::UncertainIndexDataset, constraints::Vector{T}) where {T<:SamplingConstraint} -> ConstrainedUncertainIndexDataset Return a uncertain dataset by applying a different sampling constraint to each uncertain value in `udata`. """ function constrain(udata::UncertainIndexDataset, constraints::Vector{T}) where {T<:SamplingConstraint} if length(udata) != length(constraints) error("Number of sampling constraints must match length of dataset.") end n_vals = length(udata) ConstrainedUncertainIndexDataset([constrain(udata[i], constraints[i]) for i in 1:n_vals]) end """ constrain(udata::UncertainIndexDataset, constraint::SamplingConstraint) -> ConstrainedUncertainIndexDataset Return a uncertain dataset by applying the `constraint` to each uncertain value in `udata`. """ constrain(udata::ConstrainedUncertainIndexDataset, constraint::SamplingConstraint) = ConstrainedUncertainIndexDataset([constrain(uval, constraint) for uval in udata]) """ constrain(udata::UncertainIndexDataset, constraints::Vector{T}) where {T<:SamplingConstraint} -> ConstrainedUncertainIndexDataset Return a uncertain dataset by applying a different sampling constraint to each uncertain value in `udata`. """ function constrain(udata::ConstrainedUncertainIndexDataset, constraints::Vector{T}) where {T<:SamplingConstraint} if length(udata) != length(constraints) error("Number of sampling constraints must match length of dataset.") end n_vals = length(udata) ConstrainedUncertainIndexDataset([constrain(udata[i], constraints[i]) for i in 1:n_vals]) end export constrain, verify_constraints
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
5835
############################################################# # Constraining values ############################################################ """ constrain_kde_distribution(uv::AbstractUncertainScalarKDE, constraint::SamplingConstraint) Return the TruncatedUncertainScalarKDE resulting from applying `constraint` to `uv`. """ function constrain_kde_distribution(uv::AbstractUncertainScalarKDE, constraint::SamplingConstraint) range_subset, pdf_subset, idx_min, idx_max = truncate(uv, constraint) TruncatedUncertainScalarKDE( UnivariateKDE(range_subset, pdf_subset), uv.values, range_subset, Weights(pdf_subset) ) end constrain_kde_distribution(uv::AbstractUncertainScalarKDE, constraint::NoConstraint) = uv ############################################################# # Uncertain values represented by theoretical distributions # with parameters fitted to empirical data ############################################################ """ constrain_theoretical_fitted_distribution( uv::TheoreticalFittedUncertainScalar, truncated_dist::Distribution) Return a constrained (truncated) version of an uncertain value represented by a theoretical distribution with parameters fitted to empirical data. """ function constrain_theoretical_fitted_distribution( uv::TheoreticalFittedUncertainScalar, truncated_dist::Distribution) ConstrainedUncertainScalarTheoreticalFit(FittedDistribution(truncated_dist), uv.values) end ############################################################# # Uncertain values represented by theoretical distributions ############################################################ """ constrain_theoretical_distribution( uv::AbstractUncertainOneParameterScalarValue, truncated_dist::Distribution) Return a constrained (truncated) version of an uncertain value represented by a one-parameter theoretical distribution. """ function constrain_theoretical_distribution( uv::AbstractUncertainOneParameterScalarValue, truncated_dist::Distribution) # Get parameters of original distribution p1 = fieldnames(typeof(uv))[2] ConstrainedUncertainScalarValueOneParameter( truncated_dist, getfield(uv, p1) ) end """ constrain_theoretical_distribution( uv::AbstractUncertainTwoParameterScalarValue, truncated_dist::Distribution) Return a constrained (truncated) version of an uncertain value represented by a two-parameter theoretical distribution. """ function constrain_theoretical_distribution( uv::AbstractUncertainTwoParameterScalarValue, truncated_dist::Distribution) # Get parameters of original distribution p1, p2 = (fieldnames(typeof(uv))[2:3]...,) ConstrainedUncertainScalarValueTwoParameter( truncated_dist, getfield(uv, p1), getfield(uv, p2) ) end """ constrain_theoretical_distribution( uv::AbstractUncertainTwoParameterScalarValue, truncated_dist::Distribution) Return a constrained (truncated) version of an uncertain value represented by a two-parameter theoretical distribution. """ function constrain_theoretical_distribution( uv::AbstractUncertainThreeParameterScalarValue, truncated_dist::Distribution) # Get parameters of original distribution p1, p2, p3 = (fieldnames(typeof(uv))[2:4]...,) ConstrainedUncertainScalarValueThreeParameter( truncated_dist, getfield(uv, p1), getfield(uv, p2), getfield(uv, p3) ) end ############################################################# # Truncate uncertain values ############################################################ """ constrain(uv::AbstractUncertainValue, constraint::SamplingConstraint) Apply the `constraint` and truncate the support of the distribution furnishing the uncertain value `uv`. Returns a constrained uncertain value. """ constrain(uv::AbstractUncertainValue, constraint::SamplingConstraint) """ constrain(uv::TheoreticalDistributionScalarValue, constraint::SamplingConstraint) -> TheoreticalDistributionScalarValue Apply the `constraint` and truncate the support of an uncertain value `uv` furnished by a theoretical distribution. """ function constrain(uv::TheoreticalDistributionScalarValue, constraint::SamplingConstraint) constrain_theoretical_distribution(uv, truncate(uv, constraint)) end """ constrain(uv::TheoreticalFittedUncertainScalar, constraint::SamplingConstraint) -> ConstrainedUncertainScalarTheoreticalFit Apply the `constraint` and truncate the support of an uncertain value `uv` furnished by a theoretical distribution where parameters are fitted to empirical data. """ function constrain(uv::TheoreticalFittedUncertainScalar, constraint::SamplingConstraint) truncated_dist = truncate(uv, constraint) constrain_theoretical_fitted_distribution(uv, truncated_dist) end """ constrain(uv::AbstractUncertainScalarKDE, constraint::SamplingConstraint) -> TruncatedUncertainScalarKDE Apply the `constraint` and truncate the support of an uncertain value `uv` represented by a kernel density estimate. """ function constrain(uv::AbstractUncertainScalarKDE, constraint::SamplingConstraint) constrain_kde_distribution(uv, constraint) end constrain(uvals::Vector{AbstractUncertainValue}, constraint::SamplingConstraint) = [constrain(uval, constraint) for uval in uvals] function constrain(uvals::Vector{AbstractUncertainValue}, constraints::Vector{SamplingConstraint}) [constrain(uvals[i], constraints[i]) for i = 1:length(uvals)] end export ConstrainedUncertainScalarValueTwoParameter, ConstrainedUncertainScalarValueThreeParameter, truncate, constrain
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
3244
import ..UncertainDatasets: AbstractUncertainValueDataset, UncertainValueDataset import ..UncertainDatasets:ConstrainedUncertainValueDataset """ constrain(udata::Vector{<:AbstractUncertainValue}, s::SamplingConstraint) -> ConstrainedUncertainValueDataset Return a vector of uncertain value by applying the constraint `s` to each uncertain value in `udata`. """ function constrain(udata::Vector{<:AbstractUncertainValue}, constraint::SamplingConstraint) [constrain(uval, constraint) for uval in udata] end """ constrain(udata::Vector{<:AbstractUncertainValue}, constraints::Vector{T}) where {T<:SamplingConstraint} -> Vector{<:AbstractUncertainValue} Return a vector of uncertain values by applying a different sampling constraint to each uncertain value in `udata`. """ function constrain(udata::Vector{<:AbstractUncertainValue}, constraints::Vector{T}) where {T <: SamplingConstraint} if length(udata) != length(constraints) error("Number of sampling constraints must match length of dataset.") end return [constrain(udata[i], constraints[i]) for i in 1:length(udata)] end """ constrain(udata::AbstractUncertainValueDataset, s::SamplingConstraint) -> ConstrainedUncertainValueDataset Return a uncertain dataset by applying the constraint `s` to each uncertain value in `udata`. """ function constrain(udata::DT, constraint::SamplingConstraint) where { DT <: AbstractUncertainValueDataset} ConstrainedUncertainValueDataset([constrain(uval, constraint) for uval in udata]) end """ constrain(udata::AbstractUncertainValueDataset, constraints::Vector{T}) where {T<:SamplingConstraint} -> ConstrainedUncertainValueDataset Return a uncertain dataset by applying a different sampling constraint to each uncertain value in `udata`. """ function constrain(udata::DT, constraints::Vector{T}) where { T<:SamplingConstraint, DT <: AbstractUncertainValueDataset} if length(udata) != length(constraints) error("Number of sampling constraints must match length of dataset.") end n_vals = length(udata) ConstrainedUncertainValueDataset([constrain(udata[i], constraints[i]) for i in 1:n_vals]) end # """ # constrain(udata::ConstrainedUncertainValueDataset, # constraint::SamplingConstraint) -> ConstrainedUncertainValueDataset # Return a uncertain dataset by applying the `constraint` to each # uncertain value in `udata`. # """ # constrain(udata::ConstrainedUncertainValueDataset, constraint::SamplingConstraint) = # ConstrainedUncertainValueDataset([constrain(uval, constraint) for uval in udata]) # """ # constrain(udata::ConstrainedUncertainValueDataset, # constraints::Vector{T}) where {T<:SamplingConstraint} -> ConstrainedUncertainValueDataset # Return a uncertain dataset by applying a different sampling constraint to each uncertain # value in `udata`. # """ # function constrain(udata::ConstrainedUncertainValueDataset, # constraints::Vector{T}) where {T<:SamplingConstraint} # if length(udata) != length(constraints) # error("Number of sampling constraints must match length of dataset.") # end # n_vals = length(udata) # ConstrainedUncertainValueDataset([constrain(udata[i], constraints[i]) for i in 1:n_vals]) # end export constrain, verify_constraints
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
8711
import ..UncertainDatasets.AbstractUncertainValueDataset import ..UncertainDatasets.UncertainDataset import ..UncertainValues.UncertainScalarKDE abstract type SamplingConstraint end """ NoConstraint A (non)constraint indicating that the distribution furnishing an uncertain value should be sampled its entire support. """ struct NoConstraint <: SamplingConstraint end ######################################################################### # Sampling constraints for regular data (i.e. not age/depth/time index..., # but the values associated to those indices). ######################################################################### abstract type ValueSamplingConstraint <: SamplingConstraint end """ TruncateLowerQuantile(lower_quantile::Float64) A constraint indicating that the distribution furnishing an uncertain value should be truncated below at some quantile. """ struct TruncateLowerQuantile <: ValueSamplingConstraint lower_quantile::Float64 end """ TruncateUpperQuantile(upper_quantile::Float64) A constraint indicating that the distribution furnishing an uncertain value should be truncated above at some quantile. """ struct TruncateUpperQuantile <: ValueSamplingConstraint upper_quantile::Float64 end """ TruncateQuantiles(lower_quantile::Float64, upper_quantile::Float64) A constraint indicating that the distribution furnishing an uncertain value should be truncated at some quantile quantile `(lower_quantile, upper_quantile)`. """ struct TruncateQuantiles{T1<:Real, T2<:Real} <: ValueSamplingConstraint lower_quantile::T1 upper_quantile::T2 function TruncateQuantiles(lower_quantile::T1, upper_quantile::T2) where {T1, T2} err_msg = "Need 0 <= lower_quantile < upper_quantile <= 1" if !(lower_quantile < upper_quantile && 0.0 <= lower_quantile < upper_quantile <= 1.0) throw(DomainError(err_msg * " (got lo = $lower_quantile, hi = $upper_quantile)")) else new{T1, T2}(lower_quantile, upper_quantile) end end end """ TruncateStd(nσ::Number) A constraint indicating that the distribution furnishing an uncertain value should be truncated at the mean ± `nσ` (`n` standard deviations). ## Notes - Beware when you apply the `TruncateStd` constraint to a (usually a numeric) population with a small value range. With `nσ` small, you might end up with a population mean *between* the actual values, so that the range `[mean(pop) - nσ*std(pop), mean(pop) + nσ*std(pop)]` returns `nothing`. """ struct TruncateStd{T<:Number} <: ValueSamplingConstraint nσ::T function TruncateStd(nσ::T) where T if nσ <= 0 err_str = "TruncateStd must be initialised with nσ strictly positive" throw(DomainError(err_str * " (got nσ = $nσ)")) else new{T}(nσ) end end end """ TruncateMinimum(min::Number) A constraint indicating that the distribution furnishing an uncertain value should be truncated below at some specified minimum value. """ struct TruncateMinimum{T<:Number} <: ValueSamplingConstraint min::T end """ TruncateMaximum(max::Number) A constraint indicating that the distribution furnishing an uncertain value should be truncated above at some specified maximum value. """ struct TruncateMaximum{T<:Number} <: ValueSamplingConstraint max::T end """ TruncateRange(min::Number, max::Number) A constraint indicating that the distribution furnishing an uncertain value should be truncated at some range `[min, max]`. """ struct TruncateRange{T1, T2} <: ValueSamplingConstraint min::T1 max::T2 function TruncateRange(min::T1, max::T2) where {T1, T2} if min <= max # <= ties are allowed, because we may encounter CertainValue instances return new{T1, T2}(min, max) else err_msg = "Cannot create TruncateRange instance. Need min < max" throw(DomainError(err_msg * " (got min = $min, max = $max)")) end end end export SamplingConstraint, NoConstraint, ValueSamplingConstraint, TruncateLowerQuantile, TruncateUpperQuantile, TruncateQuantiles, TruncateMinimum, TruncateMaximum, TruncateRange, TruncateStd ######################################################################### # Sampling constraints for sample indices (time index, age, depth, etc...) # Often, these need to be sampled to obey some physical criteria (i.e., # observations are from physical samples lying above each other, so the order # of the observations cannot be mixed). ######################################################################### include("ordered_sequences/ordered_sequence_algorithms.jl") """ IndexSamplingConstraint An abstract type for sampling constraints valid only for indices. """ abstract type IndexSamplingConstraint <: SamplingConstraint end ######################################################################### # Sequential sampling constraints ######################################################################### abstract type SequentialSamplingConstraint{OA} end # Add the ordered sampling scheme to the seqential sampling constraints, # because that's all they affect. Defaults to `StartToEnd`. #(::Type{SSC})(args...; kwargs...) where SSC<:SequentialSamplingConstraint = SSC(StartToEnd(), args...; kwargs...) """ StrictlyIncreasing(algorithm::OrderedSamplingAlgorithm; n::Int = 50000, lq = 0.05, uq = 0.95) Sampling scheme indicating element-wise sampling such that the resulting values are strictly increasing in magnitude. Increasing sequential sampling is only guaranteed when distributions have finite support. Therefore, distributions are element-wise truncated to the lower and upper quantiles `lq` and `uq`. For each distribution, this is done by drawing `n` values from it, then finding the quantiles for that sample, and finally truncating the distribution to the empirical quantile range. `algorithm` is an instance of some `OrderedSamplingAlgorithm` (e.g. `StartToEnd`). `n` is the number of samples to draw when computing quantiles. Typically used when there are known, physical constraints on the measurements. For example, geochemical measurements of sediments at different depths of a sediment core are taken at physically separate depths in the core. Thus, the order of the indices cannot be flipped, and must be strictly decreasing/increasing. See also: [`StartToEnd`](@ref) """ struct StrictlyIncreasing{OA <: OrderedSamplingAlgorithm} <: SequentialSamplingConstraint{OA} ordered_sampling_algorithm::OA n::Int # number of samples to draw from samples lq::Float64 # lower quantile uq::Float64 # upper quantile function StrictlyIncreasing(algorithm::OA = StartToEnd(); n::Int = 10000, lq = 0.05, uq = 0.95) where OA if lq >= uq throw(ArgumentError("Need lq < uq. Got lq=$(lq) > uq=$(uq).")) end new{OA}(algorithm, n, lq, uq) end end """ StrictlyIncreasing(algorithm::OrderedSamplingAlgorithm; n::Int = 50000) Sampling scheme indicating element-wise sampling such that the resulting values are strictly decreasing in magnitude. Decreasing sequential sampling is only guaranteed when distributions have finite support. Therefore, distributions are element-wise truncated to the lower and upper quantiles `lq` and `uq`. For each distribution, this is done by drawing `n` values from it, then finding the quantiles for that sample, and finally truncating the distribution to the empirical quantile range. `algorithm` is an instance of some `OrderedSamplingAlgorithm` (e.g. `StartToEnd`). `n` is the number of samples to draw when computing quantiles. Typically used when there are known, physical constraints on the measurements. For example, geochemical measurements of sediments at different depths of a sediment core are taken at physically separate depths in the core. Thus, the order of the indices cannot be flipped, and must be strictly decreasing/increasing. See also: [`StartToEnd`](@ref) """ struct StrictlyDecreasing{OA <: OrderedSamplingAlgorithm} <: SequentialSamplingConstraint{OA} ordered_sampling_algorithm::OA n::Int # number of samples to draw from samples lq::Float64 # lower quantile uq::Float64 # upper quantile function StrictlyDecreasing(algorithm::OA = StartToEnd(); n::Int = 10000, lq = 0.05, uq = 0.95) where OA if lq >= uq throw(ArgumentError("Need lq < uq. Got lq=$(lq) > uq=$(uq).")) end new{OA}(algorithm, n, lq, uq) end end export IndexSamplingConstraint, SequentialSamplingConstraint, StrictlyIncreasing, StrictlyDecreasing
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
869
import ..UncertainValues.AbstractUncertainValue import ..UncertainValues.AbstractUncertainScalarKDE """ Fallback constraints. This is necessary when sampling constraints are not compatible. For example, the `TruncatedStd` constraint is not implemented for `UncertainScalarKDE`, so we need a fallback. The default is to fall back to `NoConstraint()``. """ function fallback(s::SamplingConstraint, v::AbstractUncertainValue) NoConstraint() end """ fallback(uv::UncertainScalarKDE, constraint::TruncateStd) Fallback constraint for a `TruncateStd` constraint applied to an `UncertainScalarKDE` instance. """ function fallback(uv::AbstractUncertainScalarKDE, constraint::TruncateStd) @warn "TruncateStd constraint is incompatible with UncertainScalarKDE. Falling back to TruncateQuantiles(0.33, 0.67)." TruncateQuantiles(0.33, 0.67) end export fallback
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1597
""" OrderedSamplingAlgorithm An abstract type for ordered sampling algorithms. """ abstract type OrderedSamplingAlgorithm end """ StartToEnd An ordered sampling algorithm indicating that values should be treated consecutively from start to finish of the dataset. """ struct StartToEnd <: OrderedSamplingAlgorithm end """ EndToStart An ordered sampling algorithm indicating that the values should be treated consecutively from the end to the start of the dataset. """ struct EndToStart <: OrderedSamplingAlgorithm end """ RandPtOutwards An ordered sampling algorithm indicating that the values should be divided into two groups, separating the values at some midpoint of the dataset. The two groups of values are then treated separately. """ struct RandPtOutwards <: OrderedSamplingAlgorithm midpoint_idx::Int end """ ChuncksForwards An ordered sampling algorithm indicating that the values should be divided into multiple (`n_chunks`) groups. The groups of values are then treated separately, treating values from the start to the end of each group. """ struct ChunksForwards <: OrderedSamplingAlgorithm n_chunks::Int end """ ChuncksBackwards An ordered sampling algorithm indicating that the values should be divided into multiple (`n_chunks`) groups. The groups of values are then treated separately, treating values from the end to the start of each group. """ struct ChunksBackwards <: OrderedSamplingAlgorithm n_chunks::Int end export OrderedSamplingAlgorithm, StartToEnd, EndToStart, RandPtOutwards, ChunksForwards, ChunksBackwards
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2736
using IntervalArithmetic include("utils.jl") import ..AbstractUncertainIndexValueDataset export sequence_exists, sequence_exists! """ sequence_exists(x, c::SequentialSamplingConstraint) Does a point-by-point sequence through the uncertain dataset `x` exist that satisfies the criteria `c`? If `x` is an `UncertainIndexValueDataset`, then check for a sequence through the indices only. Before the check is performed, the distributions in `x` are truncated to the quantiles provided by `c` to ensure they have finite supports. ## Example ```julia # Create a set of time indices # We construct this is such a way that we *know* an increasing sequence exists. t = [UncertainValue(Normal, i, 2) for i in 1:N]; sequence_exists(t, StrictlyIncreasing(StartToEnd())) ``` """ function sequence_exists end # If data has uncertainties both in indices and values, check only for indices. sequence_exists(x::AbstractUncertainIndexValueDataset, c) = sequence_exists(x.indices, c) function sequence_exists(x, c::SequentialSamplingConstraint) lqs, uqs = get_quantiles(x, c) return sequence_exists(lqs, uqs, c), lqs, uqs end function sequence_exists!(lqs, uqs, x::AbstractUncertainValueDataset, c) get_quantiles!(lqs, uqs, x, c) return sequence_exists(lqs, uqs, c) end sequence_exists(x::AbstractUncertainIndexValueDataset, c::StrictlyIncreasing{StartToEnd}) = sequence_exists(x.indices, c) ########################### # Concrete implementations ########################### """ sequence_exists(udata::AbstractUncertainValueDataset, c::StrictlyDecreasing{StartToEnd}) Does a strictly decreasing sequence through the dataset exist? I.e, check that a strictly decreasing sequence can be found after first constraining each distribution to the provided quantile range (this is necessary because some distributions may have infinite support). """ function sequence_exists(lqs, uqs, c::StrictlyIncreasing{StartToEnd}) L = length(lqs) if any(lqs .> uqs) # ties are allowed, because we have `CertainValue`s error("Not all `lqs[i]` are lower than uqs[i]. Quantile calculations are not meaningful.") return false end for i = 1:L-1 if lqs[i] >= minimum(uqs[i+1:end]) return false end end return true end function sequence_exists(lqs, uqs, c::StrictlyDecreasing{StartToEnd}) L = length(lqs) if any(lqs .> uqs) # ties are allowed, because we have `CertainValue`s error("Not all `lqs[i]` are lower than uqs[i]. Quantile calculations are not meaningful.") return false end for i = 1:L-1 if uqs[i] < maximum(lqs[i+1:end]) return false end end return true end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
3249
""" get_quantiles(x, c::SequentialSamplingConstraint{<:OrderedSamplingAlgorithm}) → Tuple{Vector{Float64}, Vector{Float64}} get_quantiles!(lqs, uqs, x, c::SequentialSamplingConstraint{<:OrderedSamplingAlgorithm}) → Tuple{Vector{Float64}, Vector{Float64}} Get the element-wise lower and upper quantiles for `x` respecting the quantiles given by the constraint `c`. The in-place `get_quantiles!` writes the quantiles to pre-allocated vectors `lqs` and `uqs`. """ function get_quantiles end """ get_quantiles(x, c::SequentialSamplingConstraint{<:OrderedSamplingAlgorithm}) → Tuple{Vector{Float64}, Vector{Float64}} get_quantiles!(lqs, uqs, x, c::SequentialSamplingConstraint{<:OrderedSamplingAlgorithm}) → Tuple{Vector{Float64}, Vector{Float64}} Get the element-wise lower and upper quantiles for `x` respecting the quantiles given by the constraint `c`. The in-place `get_quantiles!` writes the quantiles to pre-allocated vectors `lqs` and `uqs`. """ function get_quantiles! end const SSC = SequentialSamplingConstraint{<:OrderedSamplingAlgorithm} # Generic case takes care of vectors of uncertain values function get_quantiles(x, c::SSC) lqs = quantile.(x, c.lq, c.n) uqs = quantile.(x, c.uq, c.n) return lqs, uqs end function get_quantiles!(lqs, uqs, x, c::SSC) length(lqs) == length(uqs) == length(x) || error("Lengths of `lqs`, `uqs` and `x` do not match.") lqs .= quantile.(x, c.lq, c.n) uqs .= quantile.(x, c.uq, c.n) end # Be specific when it comes to concrete types with differently named data fields. function get_quantiles(x::UncertainIndexDataset, c::SSC) lqs = quantile.(x.indices, c.lq, c.n) uqs = quantile.(x.indices, c.uq, c.n) return lqs, uqs end function get_quantiles!(lqs, uqs, x::UncertainIndexDataset, c::SSC) length(lqs) == length(uqs) == length(x) || error("Lengths of `lqs`, `uqs` and `x` do not match.") lqs .= quantile.(x.indices, c.lq, c.n) uqs .= quantile.(x.indices, c.uq, c.n) end function get_quantiles(x::UncertainValueDataset, c::SSC) lqs = quantile.(x.values, c.lq, c.n) uqs = quantile.(x.values, c.uq, c.n) return lqs, uqs end function get_quantiles!(x::UncertainValueDataset, c::SSC) length(lqs) == length(uqs) == length(x) || error("Lengths of `lqs`, `uqs` and `x` do not match.") lqs .= quantile.(x.values, c.lq, c.n) uqs .= quantile.(x.values, c.uq, c.n) end """ truncated_supports(udata::AbstractUncertainValueDataset; quantiles = [0.001, 0.999]) Truncate the furnishing distribution of each uncertain value in the dataset to the provided `quantiles` range. Returns a vector of Interval{Float64}, one for each value. """ function truncated_supports(udata::AbstractUncertainValueDataset; quantiles = [0.001, 0.999]) n_vals = length(udata) # Using the provided quantiles, find the parts of the supports of the furnishing # distributions from which we're going to sample. supports = Vector{Interval{Float64}}(undef, n_vals) for i = 1:n_vals lowerbound = quantile(udata[i], minimum(quantiles)) upperbound = quantile(udata[i], maximum(quantiles)) supports[i] = interval(lowerbound, upperbound) end return supports end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
597
# mean, median, std etc.. functions don't work on Distribution.Truncated instances, # so we'll define them using a resampling approach import Distributions.Truncated import StatsBase: mean, median, middle, std, mode, quantile mean(d::Truncated; n::Int = 10000) = mean(rand(d, n)) median(d::Truncated; n::Int = 10000) = median(rand(d, n)) middle(d::Truncated; n::Int = 10000) = median(rand(d, n)) std(d::Truncated; n::Int = 10000) = std(rand(d, n)) mode(d::Truncated; n::Int = 10000) = mode(rand(d, n)) quantile(d::Truncated, q; n::Int = 10000) = quantile(rand(d, n), q)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1317
import ..UncertainValues.TheoreticalDistributionScalarValue import ..UncertainValues.AbstractUncertainOneParameterScalarValue import ..UncertainValues.AbstractUncertainTwoParameterScalarValue import ..UncertainValues.AbstractUncertainThreeParameterScalarValue import ..UncertainValues.ConstrainedUncertainScalarValueOneParameter import ..UncertainValues.ConstrainedUncertainScalarValueTwoParameter import ..UncertainValues.ConstrainedUncertainScalarValueThreeParameter import ..UncertainValues.TruncatedUncertainScalarKDE import ..UncertainValues.AbstractUncertainScalarKDE import ..UncertainValues.TheoreticalFittedUncertainScalar import ..UncertainValues.ConstrainedUncertainScalarTheoreticalFit import ..UncertainValues.FittedDistribution import ..UncertainValues.getquantileindex import Distributions.ValueSupport import Distributions.Univariate import Distributions.Distribution import Distributions.support import Distributions.Truncated import Distributions: mean, std import KernelDensity.UnivariateKDE import StatsBase: quantile, Weights, mean, std import Base.truncate include("stats_on_truncated_distributions.jl") include("truncate_kde.jl") include("truncate_theoreticalscalar.jl") include("truncate_theoretical_fitted.jl") include("truncate_CertainValue.jl") include("truncate_population.jl")
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1421
import ..UncertainValues.CertainValue Base.truncate(v::CertainValue) = v function Base.truncate(v::CertainValue, constraint::TruncateMaximum) if v.value > constraint.max msg = "Truncating $v with $constraint failed\n" msg2 = "Need value < constraint.max, got $v < $(constraint.max)" throw(ArgumentError(msg * msg2)) else return v end end function Base.truncate(v::CertainValue, constraint::TruncateMinimum) if v.value < constraint.min msg = "Truncating $v with $constraint failed\n" msg2 = "Need value > constraint.min, got $v > $(constraint.min)" throw(ArgumentError(msg * msg2)) else return v end end function Base.truncate(v::CertainValue, constraint::TruncateRange) if v.value < constraint.min msg = "Truncating $v with $constraint failed\n" msg2 = "Need value > constraint.min, got $v > $(constraint.min)" throw(ArgumentError(msg * msg2)) elseif v.value > constraint.max msg = "Truncating $v with $constraint failed\n" msg2 = "Need value < constraint.max, got $v < $(constraint.max)" throw(ArgumentError(msg * msg2)) else return v end end truncate(v::CertainValue, s::TruncateLowerQuantile) = v truncate(v::CertainValue, s::TruncateUpperQuantile) = v truncate(v::CertainValue, s::TruncateQuantiles) = v truncate(v::CertainValue, s::TruncateStd) = v
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
7957
import Base.truncate ################################################################ # Verify that support is not empty after applying constraints ################################################################ """ verify_nonempty_support(uv::AbstractUncertainScalarKDE, constraint::SamplingConstraint) Check if the support of the distribution furnishing the uncertain value is empty after applying the sampling constraint. """ verify_nonempty_support(uv::AbstractUncertainScalarKDE, constraint::SamplingConstraint) function verify_nonempty_support(uv::AbstractUncertainScalarKDE, constraint::TruncateMaximum) if constraint.max < minimum(uv.range) minrange = minimum(uv.range) truncmax = constraint.max e = "constraint.max = $truncmax < minimum(uv.range) = $minrange" throw(ArgumentError(e)) end end function verify_nonempty_support(uv::AbstractUncertainScalarKDE, constraint::TruncateMinimum) if constraint.min > maximum(uv.range) truncmin = constraint.min maxrange = maximum(uv.range) e = "maximum(uv.range) = $maxrange < constraint.min = $truncmin" throw(ArgumentError(e)) end end function verify_nonempty_support(uv::AbstractUncertainScalarKDE, constraint::TruncateRange) truncmin = constraint.min truncmax = constraint.max minrange = minimum(uv.range) maxrange = maximum(uv.range) if maxrange < truncmin && minrange > truncmax e1 = "maximum(uv.range) = $maxrange < TruncateMinimum.min = $truncmin and " e2 = "TruncateMaximum.max = $truncmax < minimum(uv.range) = $minrange" throw(ArgumentError(string(e1, e2))) elseif maxrange < truncmin e = "maximum(uv.range) = $maxrange < TruncateMinimum.min = $truncmin" throw(ArgumentError(e)) elseif minrange > truncmax e = "TruncateMaximum.max = $truncmax < minimum(uv.range) = $minrange" throw(ArgumentError(e)) end end export verify_nonempty_support ################################################################ # Truncating uncertain values based on kernel density estimates ################################################################ """ truncate(uv::AbstractUncertainScalarKDE, constraint::SamplingConstraint) Truncate an uncertain value `uv` furnished by a kernel density estimated distribution using the supplied `constraint`. """ truncate(uv::AbstractUncertainScalarKDE, constraint::SamplingConstraint) truncate(uv::AbstractUncertainScalarKDE, constraint::NoConstraint) = uv """ truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateLowerQuantile) Truncate the kernel density estimate to `uv`s distribution using a `TruncateLowerQuantile` sampling constraint. """ function truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateLowerQuantile) idx_lower_quantile = getquantileindex(uv, constraint.lower_quantile) # Subset the values and weights (values of the pdf at those values) idx_min = idx_lower_quantile idx_max = length(uv.pdf) range_subset = uv.range[idx_min:idx_max] pdf_subset = uv.pdf[idx_min:idx_max] # Return truncated KDE and the indices used to subset. We have to normalise the pdf # here, so that we're still dealing with a probability distribution. range_subset, pdf_subset ./ sum(pdf_subset), idx_min, idx_max end """ truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateUpperQuantile) Truncate the kernel density estimate to `uv`s distribution using a `TruncateUpperQuantile` sampling constraint. """ function truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateUpperQuantile) idx_upper_quantile = getquantileindex(uv, constraint.upper_quantile) # Subset the values and weights (values of the pdf at those values) idx_min = 1 idx_max = idx_upper_quantile range_subset = uv.range[idx_min:idx_max] pdf_subset = uv.pdf[idx_min:idx_max] # Return truncated KDE and the indices used to subset. We have to normalise the pdf # here, so that we're still dealing with a probability distribution. range_subset, pdf_subset ./ sum(pdf_subset), idx_min, idx_max end """ truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateQuantiles) Truncate the kernel density estimate to `uv`s distribution using a `TruncateQuantiles` sampling constraint. """ function truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateQuantiles) idx_lower_quantile = getquantileindex(uv, constraint.lower_quantile) idx_upper_quantile = getquantileindex(uv, constraint.upper_quantile) # Subset the values and weights (values of the pdf at those values) idx_min = idx_lower_quantile idx_max = idx_upper_quantile range_subset = uv.range[idx_min:idx_max] pdf_subset = uv.pdf[idx_min:idx_max] # Return truncated KDE and the indices used to subset. We have to normalise the pdf # here, so that we're still dealing with a probability distribution. range_subset, pdf_subset ./ sum(pdf_subset), idx_min, idx_max end """ truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateMinimum) Truncate the kernel density estimate to `uv`s distribution using a `TruncateMinimum` sampling constraint. """ function truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateMinimum; test_support = true) # Is the support empty after applying the constraint? If so, throw error. test_support ? verify_nonempty_support(uv, constraint) : nothing # Subset the values and weights (values of the pdf at those values) idx_min = findfirst(uv.range .>= constraint.min) idx_max = length(uv.pdf) range_subset = uv.range[idx_min:idx_max] pdf_subset = uv.pdf[idx_min:idx_max] # Return truncated KDE and the indices used to subset. We have to normalise the pdf # here, so that we're still dealing with a probability distribution. range_subset, pdf_subset ./ sum(pdf_subset), idx_min, idx_max end """ truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateMaximum) Truncate the kernel density estimate to `uv`s distribution using a `TruncateMaximum` sampling constraint. """ function truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateMaximum; test_support = true) # Is the support empty after applying the constraint? If so, throw error. test_support ? verify_nonempty_support(uv, constraint) : nothing # Subset the values and weights (values of the pdf at those values) idx_min = 1 idx_max = findlast(uv.range .<= constraint.max) range_subset = uv.range[idx_min:idx_max] pdf_subset = uv.pdf[idx_min:idx_max] # Return truncated KDE and the indices used to subset. We have to normalise the pdf # here, so that we're still dealing with a probability distribution. range_subset, pdf_subset ./ sum(pdf_subset), idx_min, idx_max end """ truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateRange) Truncate the kernel density estimate to `uv`s distribution using a `TruncateRange` sampling constraint. """ function truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateRange; test_support = true) # Is the support empty after applying the constraint? If so, throw error. test_support ? verify_nonempty_support(uv, constraint) : nothing # Subset the values and weights (values of the pdf at those values) idx_min = findfirst(uv.range .>= constraint.min) idx_max = findlast(uv.range .<= constraint.max) range_subset = uv.range[idx_min:idx_max] pdf_subset = uv.pdf[idx_min:idx_max] # Return truncated KDE and the indices used to subset. We have to normalise the pdf # here, so that we're still dealing with a probability distribution. range_subset, pdf_subset ./ sum(pdf_subset), idx_min, idx_max end truncate(uv::AbstractUncertainScalarKDE, constraint::TruncateStd) = truncate(uv, fallback(uv, constraint)) export truncate
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
13821
import ..UncertainValues: AbstractScalarPopulation, UncertainScalarPopulation, UncertainValue import Distributions UVAL_TYPES = Union{T1, T2} where {T1 <: AbstractUncertainValue, T2 <: Distributions.Distribution} """ truncate(population::AbstractScalarPopulation, constraint::NoConstraint) Get the elements and the associated sampling weights of the `population` members satisfying the sampling `constraint`. - If `constraint` is a `NoConstraint` instance, then all members and weights are returned unmodified. """ function Base.truncate(pop::AbstractScalarPopulation, constraint::NoConstraint, n::Int = 30000) ConstrainedUncertainScalarPopulation(pop.values, pop.probs) end ############################################################ # Populations whose members are strictly real-valued scalars ############################################################ function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateRange, n::Int = 30000) where {T <: Number, PW} inds = findall(constraint.min .<= pop.values .<= constraint.max) if length(inds) == 0 #throw(ArgumentError("$pop could not be truncated. No values left after truncation.")) return nothing end ConstrainedUncertainScalarPopulation(pop.values[inds], pop.probs[inds]) end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateMaximum, n::Int = 30000) where {T <: Number, PW} inds = findall(pop.values .<= constraint.max) if length(inds) == 0 #throw(ArgumentError("$pop could not be truncated. No values left after truncation.")) return nothing end ConstrainedUncertainScalarPopulation(pop.values[inds], pop.probs[inds]) end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateMinimum, n::Int = 30000) where {T <: Number, PW} inds = findall(constraint.min .<= pop.values) if length(inds) == 0 #throw(ArgumentError("$pop could not be truncated. No values left after truncation.")) return nothing end ConstrainedUncertainScalarPopulation(pop.values[inds], pop.probs[inds]) end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateLowerQuantile, n::Int = 30000) where {T <: Number, PW} lower_bound = quantile(pop, constraint.lower_quantile) inds = findall(lower_bound .<= pop.values) if length(inds) == 0 #throw(ArgumentError("$pop could not be truncated. No values left after truncation.")) return nothing end ConstrainedUncertainScalarPopulation(pop.values[inds], pop.probs[inds]) end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateUpperQuantile, n::Int = 30000) where {T <: Number, PW} upper_bound = quantile(pop, constraint.upper_quantile) inds = findall(pop.values .<= upper_bound) if length(inds) == 0 #throw(ArgumentError("$pop could not be truncated. No values left after truncation.")) return nothing end ConstrainedUncertainScalarPopulation(pop.values[inds], pop.probs[inds]) end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateQuantiles, n::Int = 30000) where {T <: Number, PW} lower_bound = quantile(pop, constraint.lower_quantile) upper_bound = quantile(pop, constraint.upper_quantile) inds = findall(lower_bound .<= pop.values .<= upper_bound) if length(inds) == 0 #throw(ArgumentError("$pop could not be truncated. No values left after truncation.")) return nothing end ConstrainedUncertainScalarPopulation(pop.values[inds], pop.probs[inds]) end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateStd, n::Int = 30000) where {T <: Number, PW} # Draw a sample of size `n` of the member of `p` according to their weights. s = rand(pop, n) # Compute mean and standard deviation p_mean = mean(s) p_stdev = std(s) nσ = constraint.nσ inds = findall(p_mean - p_stdev*nσ .<= pop.values .<= p_mean + p_stdev*nσ) if length(inds) == 0 throw(ArgumentError("$pop could not be truncated. No values left after truncation.")) return nothing end ConstrainedUncertainScalarPopulation(pop.values[inds], pop.probs[inds]) end ############################################################ # Populations whose members are some sort of uncertain value ############################################################ TRUNCVAL_TYPES = Union{T1, T2} where { T1 <: AbstractUncertainValue, T2 <: Distributions.Distribution} export TRUNCVAL_TYPES function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateMinimum) where { T <: AbstractUncertainValue, PW} maxs = [maximum(uv) for uv in pop] # Find all distributions whose supports start *above or at* the minimum value imposed by the constraint. inds = findall(constraint.min .<= maxs) if length(inds) == 0 throw(ArgumentError("$pop could not be truncated. No values left after truncation.")) end # Constrain those distributions and match them with their respective probabilities truncated_vals = Vector{TRUNCVAL_TYPES}(undef, length(inds)) for (i, val) in enumerate(pop[inds]) truncated_vals[i] = constrain(val, constraint) end truncated_vals = Vector{TRUNCVAL_TYPES}(undef, 0) inds = Vector{Int}(undef, 0) for (i, val) in enumerate(pop.values) if maximum(val) >= constraint.min c = constrain(val, constraint) if !(c isa Nothing) push!(inds, i) push!(truncated_vals, c) end end end if length(inds) > 0 ConstrainedUncertainScalarPopulation(truncated_vals, pop.probs[inds]) else nothing end end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateMinimum, n::Int) where {T <: AbstractUncertainValue, PW} Base.truncate(pop, constraint) end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateMaximum) where { T <: AbstractUncertainValue, PW} mins = [minimum(uv) for uv in pop] # Find all distributions whose supports start *below or at* the maximum value imposed by the constraint. inds = findall(mins .<= constraint.max) if length(inds) == 0 #throw(ArgumentError("$pop could not be truncated. No values left after truncation.")) return nothing end # Constrain those distributions and match them with their respective probabilities truncated_vals = Vector{TRUNCVAL_TYPES}(undef, 0) inds = Vector{Int}(undef, 0) for (i, val) in enumerate(pop.values) if minimum(val) < constraint.max c = constrain(val, constraint) if !(c isa Nothing) push!(inds, i) push!(truncated_vals, c) end end end if length(inds) > 0 return ConstrainedUncertainScalarPopulation(truncated_vals, pop.probs[inds]) else return nothing end end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateMaximum, n::Int) where {T <: AbstractUncertainValue, PW} Base.truncate(pop, constraint) end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateRange) where { T <: AbstractUncertainValue, PW} mins = [minimum(uv) for uv in pop] maxs = [maximum(uv) for uv in pop] # Find all distributions whose supports start *above or at* the minimum value imposed by the constraint. satisfies_minconstraint = constraint.min .<= maxs # Find all distributions whose supports start *below or at* the maximum value imposed by the constraint. satisfies_maxconstraint = mins .<= constraint.max # Find all distributions that satisfy both the lower constraint and the upper constraint inds = findall(satisfies_minconstraint .& satisfies_maxconstraint) if length(inds) == 0 #throw(ArgumentError("$pop could not be truncated. No values left after truncation.")) return nothing end # Constrain those distributions and match them with their respective probabilities truncated_vals = Vector{TRUNCVAL_TYPES}(undef, length(inds)) for (i, val) in enumerate(pop[inds]) truncated_vals[i] = constrain(val, constraint) end ConstrainedUncertainScalarPopulation(truncated_vals, pop.probs[inds]) end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateRange, n::Int) where {T <: AbstractUncertainValue, PW} Base.truncate(pop, constraint) end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateLowerQuantile, n::Int = 30000) where {T <: AbstractUncertainValue, PW} # We want to truncate each of the population members so that their furnishing distributions # cannot yield numbers smaller than the overall quantile of the population. First # find the overall lower quantile population_lower_quantile = quantile(pop, constraint.lower_quantile, n) # Now, truncate each of the population members below at the overall population lower # quantile. Probabilities are kept the same. truncated_vals = Vector{TRUNCVAL_TYPES}(undef, 0) inds = Vector{Int}(undef, 0) for (i, val) in enumerate(pop.values) if maximum(val) > population_lower_quantile push!(inds, i) push!(truncated_vals, truncate(val, TruncateMinimum(population_lower_quantile))) end end #println("There were $(length(inds)) population members left after truncation") if length(inds) > 0 return ConstrainedUncertainScalarPopulation(truncated_vals, pop.probs[inds]) else return nothing end end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateUpperQuantile, n::Int = 30000) where {T <: AbstractUncertainValue, PW} # We want to truncate each of the population members so that their furnishing distributions # cannot yield numbers larger than the overall quantile of the population. First # find the overall lower quantile population_upper_quantile = quantile(pop, constraint.upper_quantile, n) #@show "Overall quantile", population_upper_quantile # Now, truncate each of the population members above at the overall population upper # quantile. Probabilities are kept the same. We initialise an empty array, because # some values of the population may be dropped during the truncation process. truncated_vals = Vector{TRUNCVAL_TYPES}(undef, 0) inds = Vector{Int}(undef, 0) for (i, val) in enumerate(pop.values) if minimum(val) < population_upper_quantile push!(inds, i) push!(truncated_vals, truncate(val, TruncateMaximum(population_upper_quantile))) end end #println("There were $(length(inds)) population members left after truncation") if length(inds) > 0 return ConstrainedUncertainScalarPopulation(truncated_vals, pop.probs[inds]) else return nothing end end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateQuantiles, n::Int = 30000) where {T <: AbstractUncertainValue, PW} # We want to truncate each of the population members so that their furnishing distributions # cannot yield numbers larger than the overall upper quantile of the population, nor # numbers smaller than the overall lower quantile of the population. Find these. population_upper_quantile = quantile(pop, constraint.upper_quantile, n) population_lower_quantile = quantile(pop, constraint.lower_quantile, n) # Now, truncate each of the population members at the range given by the overall quantiles # Probabilities are kept the same. truncated_vals = Vector{TRUNCVAL_TYPES}(undef, 0) inds = Vector{Int}(undef, 0) for (i, val) in enumerate(pop.values) if maximum(val) > population_lower_quantile && minimum(val) < population_upper_quantile push!(inds, i) push!(truncated_vals, truncate(val, TruncateRange(population_lower_quantile, population_upper_quantile))) end end if length(inds) > 0 return ConstrainedUncertainScalarPopulation(truncated_vals, pop.probs[inds]) else return nothing end end function Base.truncate(pop::UncertainScalarPopulation{T, PW}, constraint::TruncateStd{TN}, n::Int = 30000) where {T <: AbstractUncertainValue, PW, TN <: Number} # We want to truncate each of the population members so that their furnishing distributions # cannot yield numbers outside +- constraint.nσ*population_std. s = rand(pop, n) population_std = std(s) population_mean = mean(s) upper_bound = population_mean + (constraint.nσ*population_std) lower_bound = population_mean - (constraint.nσ*population_std) # Now, truncate each of the population members at the range given by the overall quantiles # Probabilities are kept the same. truncated_vals = Vector{TRUNCVAL_TYPES}(undef, 0) inds = Vector{Int}(undef, 0) for (i, val) in enumerate(pop.values) if maximum(val) > lower_bound && minimum(val) < upper_bound push!(inds, i) push!(truncated_vals, truncate(val, TruncateRange(lower_bound, upper_bound))) end end if length(inds) > 0 return ConstrainedUncertainScalarPopulation(truncated_vals, pop.probs[inds]) else return nothing end end export truncate
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
308
function Base.truncate(uv::TheoreticalFittedUncertainScalar, constraint::TruncateStd; n_draws::Int = 10000) m = mean(uv.distribution.distribution) s = std(uv.distribution.distribution) lower_bound = m - s upper_bound = m + s truncated(uv.distribution, lower_bound, upper_bound) end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
5345
import Distributions: truncated function validate_bounds(lower, upper, uv, constraint) if lower >= upper msg = "lower_bound < upper_bound required, got $lower <= $upper" msg2 = "cannot truncate $uv with $constraint\n" throw(ArgumentError(msg2*msg)) end end ################################################################ # Truncating uncertain values based on theoretical distributions # Operating on the union of both TheoreticalFittedUncertainScalar # and TheoreticalDistributionScalarValue as the type of the # uncertain value is ok, because Truncated is defined for # FittedDistribution, which is the .distribution fild for # fitted scalars. ################################################################ """ truncate(uv::TheoreticalDistributionScalarValue, constraint::SamplingConstraint) Truncate an uncertain value `uv` represented by a theoretical distribution according to the sampling `constraint`. """ Base.truncate(uv::TheoreticalDistributionScalarValue, constraint::SamplingConstraint) """ truncate(uv::TheoreticalDistributionScalarValue, constraint::NoConstraint) Truncate the theoretical distribution furnishing `uv` using a `NoConstraint` sampling constraint. """ function Base.truncate(uv::TheoreticalDistributionScalarValue, constraint::NoConstraint) s = support(uv.distribution) lower_bound, upper_bound = s.lb, s.ub return truncated(uv.distribution, lower_bound, upper_bound) end """ truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateQuantiles) Truncate the theoretical distribution furnishing `uv` using a `TruncateQuantiles` sampling constraint. """ function Base.truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateQuantiles) lower_bound = quantile(uv.distribution, constraint.lower_quantile) upper_bound = quantile(uv.distribution, constraint.upper_quantile) validate_bounds(lower_bound, upper_bound, uv, constraint) truncated(uv.distribution, lower_bound, upper_bound) end """ truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateLowerQuantile) Truncate the theoretical distribution furnishing `uv` using a `TruncateLowerQuantile` sampling constraint. """ function Base.truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateLowerQuantile) lower_bound = quantile(uv.distribution, constraint.lower_quantile) upper_bound = maximum(uv) validate_bounds(lower_bound, upper_bound, uv, constraint) truncated(uv.distribution, lower_bound, upper_bound) end """ truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateUpperQuantile) Truncate the theoretical distribution furnishing `uv` using a `TruncateUpperQuantile` sampling constraint. """ function Base.truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateUpperQuantile) lower_bound = minimum(uv) upper_bound = quantile(uv.distribution, constraint.upper_quantile) validate_bounds(lower_bound, upper_bound, uv, constraint) truncated(uv.distribution, lower_bound, upper_bound) end """ truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateMinumum) Truncate the theoretical distribution furnishing `uv` using a `TruncateMinimum` sampling constraint. """ function Base.truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateMinimum) lower_bound = constraint.min upper_bound = maximum(uv) validate_bounds(lower_bound, upper_bound, uv, constraint) truncated(uv.distribution, lower_bound, upper_bound) end """ truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateMaximum) Truncate the theoretical distribution furnishing `uv` using a `TruncateMaximum` sampling constraint. """ function Base.truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateMaximum) lower_bound = minimum(uv) upper_bound = constraint.max validate_bounds(lower_bound, upper_bound, uv, constraint) truncated(uv.distribution, lower_bound, upper_bound) end """ truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateRange) Truncate the theoretical distribution furnishing `uv` using a `TruncateRange` sampling constraint. """ function Base.truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateRange) lower_bound = constraint.min upper_bound = constraint.max validate_bounds(lower_bound, upper_bound, uv, constraint) truncated(uv.distribution, lower_bound, upper_bound) end """ truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateStd, n::Int = 10000) Truncate the theoretical distribution furnishing `uv` using a `TruncateStd` sampling constraint. This functions needs to compute the mean and standard deviation of a truncated distribution, so takes an extra optional argument `n_draws` to allow this. """ function Base.truncate(uv::TheoreticalDistributionScalarValue, constraint::TruncateStd) m = mean(uv.distribution) s = std(uv.distribution) lower_bound = m - s upper_bound = m + s validate_bounds(lower_bound, upper_bound, uv, constraint) truncated(uv.distribution, lower_bound, upper_bound) end export truncate
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
256
@reexport module SensitivityTests """ An abstract type indicating a sensitivity test. """ abstract type SensitivityTest end export SensitivityTest end """ SensitivityTests A module defining sensitivity tests. """ SensitivityTests
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2971
import HypothesisTests.LjungBoxTest import HypothesisTests.BoxPierceTest # # # """ # LjungBoxTest(d::UncertainDataset; n::Int = 10000, # lag::Int = 1, dof::Int = 0) -> LjungBoxTest # # Compute the Ljung-Box `Q` statistic to test the null hypothesis of # independence in a data series represented by the uncertain dataset `d`. # # `lag` specifies the number of lags used in the construction of `Q`. When # testing the residuals of an estimated model, `dof` has to be set to the number # of estimated parameters. E.g., when testing the residuals of an ARIMA(p,0,q) # model, set `dof=p+q`. # """ # function LjungBoxTest(d::UncertainDataset; lag::Int = 1, dof::Int = 0) # LjungBoxTest(resample(d, 1)[1], lag, dof) # end # # """ # LjungBoxTest(d::UncertainDataset, n_tests::Int = 100; lag::Int = 1, dof::Int = 0) -> LjungBoxTest # # Compute the Ljung-Box `Q` statistic to test the null hypothesis of # independence in a data series represented by the uncertain dataset `d` # by performing the test on `n` independent draws of the dataset. # # `lag` specifies the number of lags used in the construction of `Q`. When # testing the residuals of an estimated model, `dof` has to be set to the number # of estimated parameters. E.g., when testing the residuals of an ARIMA(p,0,q) # model, set `dof=p+q`. # """ # function LjungBoxTest(d::UncertainDataset, n_tests::Int = 100; lag::Int = 1, dof::Int = 0) # [LjungBoxTest(resample(d)[1], lag, dof) for i = 1:n_tests] # end # # """ # BoxPierceTest(d::UncertainDataset; # lag::Int = 1, dof::Int = 0) -> Vector{BoxPierceTest} # # Compute the Box-Pierce `Q` statistic to test the null hypothesis of # independence in a data series represented by the uncertain dataset `d`. # # `lag` specifies the number of lags used in the construction of `Q`. When # testing the residuals of an estimated model, `dof` has to be set to the number # of estimated parameters. E.g., when testing the residuals of an ARIMA(p,0,q) # model, set `dof=p+q`. # """ # function BoxPierceTest(d::UncertainDataset; lag::Int = 1, dof::Int = 0) # BoxPierceTest(resample(d, 1)[1], lag, dof) # end # # # """ # BoxPierceTest(d::UncertainDataset, n_tests::Int = 100; # lag::Int = 1, dof::Int = 0) -> BoxPierceTest # # Compute the BoxPierceTest `Q` statistic to test the null hypothesis of # independence in a data series represented by the uncertain dataset `d` # by performing the test on `n` independent draws of the dataset. # # `lag` specifies the number of lags used in the construction of `Q`. When # testing the residuals of an estimated model, `dof` has to be set to the number # of estimated parameters. E.g., when testing the residuals of an ARIMA(p,0,q) # model, set `dof=p+q`. # """ # function BoxPierceTest(d::UncertainDataset, n_tests::Int = 100; # lag::Int = 1, dof::Int = 0) # [BoxPierceTest(resample(d, 1)[1], lag, dof) for i = 1:n_tests] # end # # # export # LjungBoxTest, # BoxPierceTest
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1468
using Reexport @reexport module UncertainStatistics using StatsBase using Distributions # Uncertain values and datasets types using ..UncertainValues using ..UncertainDatasets # Sampling constraints and resampling functions using ..Resampling import ..UVAL_COLLECTION_TYPES # Definitions of statistical methods for UncertainDatasets include("statsbase/uncertain_values/core_stats_point_estimates.jl") include("statsbase/uncertain_values/core_stats_pair_estimates.jl") include("statsbase/uncertain_datasets/core_stats_uncertaindatasets.jl") include("statsbase/uncertain_datasets/core_stats_uncertaindatasets_single_estimates.jl") include("statsbase/uncertain_datasets/core_stats_uncertaindatasets_pairwise_estimates.jl") include("hypothesis_tests/mann_whitney.jl") include("hypothesis_tests/t_tests.jl") include("hypothesis_tests/anderson_darling.jl") include("hypothesis_tests/kolmogorov_smirnov.jl") include("hypothesis_tests/jarque_bera.jl") include("hypothesis_tests/timeseries_tests.jl") #include("StatsBase_meanfunctions.jl") #include("StatsBase_scalarstatistics_moments.jl") #include("StatsBase_variation.jl") #include("StatsBase_zscores.jl") #include("StatsBase_entropies.jl") #include("StatsBase_quantiles.jl") end """ UncertainStatistics A module defining function to compute various statistics for uncertain values and uncertain datasets. """
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1837
import HypothesisTests.OneSampleADTest """ OneSampleADTest(uv::UncertainValue, d::UnivariateDistribution, n::Int = 1000) -> OneSampleADTest Perform a one-sample Anderson–Darling test of the null hypothesis that a draw of `n` realisations of the uncertain value `uv` comes from the distribution `d` against the alternative hypothesis that the sample is not drawn from `d`. """ function OneSampleADTest(uv::AbstractUncertainValue, d::UnivariateDistribution, n::Int = 1000) x = resample(uv, n) OneSampleADTest(x, d) end """ OneSampleADTestPooled(ud::UncertainDataset, d::UnivariateDistribution, n::Int = 1000)) -> OneSampleADTest First, draw `n` realisations of each uncertain value in `ud` and pool them together. Then perform a one-sample Anderson–Darling test of the null hypothesis that the pooled values comes from the distribution `d` against the alternative hypothesis that the sample is not drawn from `d`. """ function OneSampleADTestPooled(ud::UncertainDataset, d::UnivariateDistribution, n::Int = 1000) x = vcat(resample(ud, n)...,) OneSampleADTest(x, d) end """ OneSampleADTestElementWise(ud::UncertainDataset, d::UnivariateDistribution, n::Int = 1000)) -> Vector{OneSampleADTest} First, draw `n` realisations of each uncertain value in `ud`, keeping one pool of values for each uncertain value. Then, perform an element-wise (pool-wise) one-sample Anderson–Darling test of the null hypothesis that each value pool comes from the distribution `d` against the alternative hypothesis that the sample is not drawn from `d`. """ function OneSampleADTestElementWise(ud::UncertainDataset, d::UnivariateDistribution, n::Int = 1000) [OneSampleADTest(resample(ud[i], n), d) for i = 1:length(ud)] end export OneSampleADTest, OneSampleADTestPooled, OneSampleADTestElementWise
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2654
# The original license on the t-tests from HypothesisTests.jl # is as follows. We're including it here because we're mostly copying the # documentation of the functions. # # jarque_bera.jl # Jarque-Bera goodness-of-fit test # # Copyright (C) 2017 Benjamin Born # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import HypothesisTests.JarqueBeraTest """ JarqueBeraTest(d::AbstractUncertainValue, n::Int = 1000) -> JarqueBeraTest Compute the Jarque-Bera statistic to test the null hypothesis that an uncertain value is normally distributed. """ function JarqueBeraTest(d::AbstractUncertainValue, n::Int = 1000) x = resample(d, n) JarqueBeraTest(x) end """ JarqueBeraTestPooled(ud::UncertainDataset, n::Int = 1000) -> JarqueBeraTest First, draw `n` realisations of each uncertain value in `ud` and pool them together. Then, compute the Jarque-Bera statistic to test the null hypothesis that the values of the pool are normally distributed. """ function JarqueBeraTestPooled(ud::UncertainDataset, n::Int = 1000) x = vcat(resample(ud, n)...,) JarqueBeraTest(x) end """ OneSampleADTestElementWise(ud::UncertainDataset, n::Int = 1000) -> Vector{JarqueBeraTest} First, draw `n` realisations of each uncertain value in `ud`, keeping one pool of values for each uncertain value. Then, compute the Jarque-Bera statistic to test the null hypothesis that each value pool is normally distributed. """ function JarqueBeraTestElementWise(ud::UncertainDataset, n::Int = 1000) [JarqueBeraTest(resample(ud[i], n)) for i = 1:length(ud)] end export JarqueBeraTest, JarqueBeraTestPooled, JarqueBeraTestElementWise
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
6257
# The original license on the t-tests from HypothesisTests.jl # is as follows. We're including it here because we're mostly copying the # documentation of the functions. # # kolmogorov_smirnov.jl # Kolmogorov–Smirnov # # Copyright (C) 2014 Christoph Sawade # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import HypothesisTests.ExactOneSampleKSTest import HypothesisTests.ApproximateTwoSampleKSTest """ ExactOneSampleKSTest(uv::AbstractUncertainValue, d::UnivariateDistribution, n::Int = 1000) -> ExactOneSampleKSTest Perform a one-sample exact Kolmogorov–Smirnov test of the null hypothesis that a draw of `n` realisations of the uncertain value `uv` comes from the distribution `d` against the alternative hypothesis that the sample is not drawn from `d`. """ function ExactOneSampleKSTest(uv::AbstractUncertainValue, d::UnivariateDistribution, n::Int = 1000) x = resample(uv, n) ExactOneSampleKSTest(x, d) end """ ExactOneSampleKSTestPooled(ud::UncertainDataset, d::UnivariateDistribution, n::Int = 1000) -> ExactOneSampleKSTest First, draw `n` realisations of each uncertain value in `ud` and pool them together. Then perform a one-sample exact Kolmogorov–Smirnov test of the null hypothesis that the pooled values comes from the distribution `d` against the alternative hypothesis that the sample is not drawn from `d`. """ function ExactOneSampleKSTestPooled(ud::UncertainDataset, d::UnivariateDistribution, n::Int = 1000) x = vcat(resample(ud, n)...,) ExactOneSampleKSTest(x, d) end """ ExactOneSampleKSTestElementWise(ud::UncertainDataset, d::UnivariateDistribution, n::Int = 1000) -> Vector{ExactOneSampleKSTest} First, draw `n` realisations of each uncertain value in `ud`, keeping one pool of values for each uncertain value. Then, perform an element-wise (pool-wise) one-sample exact Kolmogorov–Smirnov test of the null hypothesis that each value pool comes from the distribution `d` against the alternative hypothesis that the sample is not drawn from `d`. """ function ExactOneSampleKSTestElementWise(ud::UncertainDataset, d::UnivariateDistribution, n::Int = 1000) [ExactOneSampleKSTest(resample(ud[i], n), d) for i = 1:length(ud)] end """ ApproximateTwoSampleKSTest(d1::AbstractUncertainValue, d2::AbstractUncertainValue, n::Int = 1000) -> ApproximateTwoSampleKSTest Perform an asymptotic two-sample Kolmogorov–Smirnov-test of the null hypothesis that the distribution furnishing the uncertain value `d1` represent the same distribution as the distribution furnishing the uncertain value `d2` against the alternative hypothesis that the furnishing distributions are different. """ function ApproximateTwoSampleKSTest(d1::AbstractUncertainValue, d2::AbstractUncertainValue, n::Int = 1000) x = resample(d1, n) y = resample(d2, n) ApproximateTwoSampleKSTest(x, y) end """ ApproximateTwoSampleKSTestPooled(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000) -> ApproximateTwoSampleKSTest First, draw `n` realisations of each uncertain value in `d1`, then separately draw `n` realisations of each uncertain value in `d2`. Then, pool all realisations for `d1` together and all realisations of `d2` together. On the pooled realisations, perform an asymptotic two-sample Kolmogorov–Smirnov-test of the null hypothesis that the distribution furnishing the `d1` value pool represents the same distribution as the distribution furnishing the `d2` value pool, against the alternative hypothesis that the furnishing distributions are different. """ function ApproximateTwoSampleKSTestPooled(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000) x = vcat(resample(d1, n)...,) y = vcat(resample(d2, n)...,) ApproximateTwoSampleKSTest(x, y) end """ ApproximateTwoSampleKSTestElementWise(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000) -> Vector{ApproximateTwoSampleKSTest} Assuming `d1` and `d2` contain the same number of uncertain observations, draw `n` realisations of each uncertain value in `d1`, then separately and separately draw `n` realisations of each uncertain value in `d2`. Then, perform an asymptotic two-sample Kolmogorov–Smirnov-test of the null hypothesis that the uncertain values in `d1` and `d2` come from the same distribution against the alternative hypothesis that the (element-wise) values in `d1` and `d2` come from different distributions. The test is performed pairwise, i.e. ApproximateTwoSampleKSTest(d1[i], d2[i]) with `n` draws for the ``i``-ith pair of uncertain values. """ function ApproximateTwoSampleKSTestElementWise(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000) N = length(d1) [ApproximateTwoSampleKSTest(resample(d1[i], n), resample(d2[i], n)) for i = 1:N] end export ExactOneSampleKSTest, ExactOneSampleKSTestPooled, ExactOneSampleKSTestElementWise, ApproximateTwoSampleKSTest, ApproximateTwoSampleKSTestPooled, ApproximateTwoSampleKSTestElementWise
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
5549
# The original license on the Mann-Whitney tests from HypothesisTests.jl # is as follows (the filename is wrong, though). We're including it here # because we're mostly copying the documentation of the functions. # # Wilcoxon.jl # Wilcoxon rank sum (Mann-Whitney U) tests # # Copyright (C) 2012 Simon Kornblith # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import HypothesisTests.MannWhitneyUTest """ MannWhitneyUTest(d1::AbstractUncertainValue, d2::AbstractUncertainValue, n::Int = 1000) -> MannWhitneyUTest Let `s1` and `s2` be samples of `n` realisations from the distributions furnishing the uncertain values `d1` and `d2`. Perform a Mann-Whitney U test of the null hypothesis that the probability that an observation drawn from the same population as `s1` is greater than an observation drawn from the same population as `s2` is equal to the probability that an observation drawn from the same population as `s2` is greater than an observation drawn from the same population as `s1` against the alternative hypothesis that these probabilities are not equal. The Mann-Whitney U test is sometimes known as the Wilcoxon rank-sum test. When there are no tied ranks and ≤50 samples, or tied ranks and ≤10 samples, `MannWhitneyUTest` performs an exact Mann-Whitney U test. In all other cases, `MannWhitneyUTest` performs an approximate Mann-Whitney U test. """ function MannWhitneyUTest(d1::AbstractUncertainValue, d2::AbstractUncertainValue, n::Int = 1000) x = resample(d1, n) y = resample(d2, n) MannWhitneyUTest(x, y) end """ MannWhitneyUTest(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000) -> MannWhitneyUTest Let ``s_{1_{i}}`` be a sample of `n` realisations of the distribution furnishing the uncertain value `d1[i]`, where ``i \\in [1, 2, \\ldots, N]`` and ``N`` is the number of uncertain values in `d1`. Next, gather the samples for all ``s_{1_{i}}`` in a pooled sample ``S_1``. Do the same for the second uncertain dataset `d2`, yielding the pooled sample ``S_2``. Perform a Mann-Whitney U test of the null hypothesis that the probability that an observation drawn from the same population as ``S_1`` is greater than an observation drawn from the same population as ``S_2`` is equal to the probability that an observation drawn from the same population as ``S_2`` is greater than an observation drawn from the same population as ``S_1`` against the alternative hypothesis that these probabilities are not equal. The Mann-Whitney U test is sometimes known as the Wilcoxon rank-sum test. When there are no tied ranks and ≤50 samples, or tied ranks and ≤10 samples, `MannWhitneyUTest` performs an exact Mann-Whitney U test. In all other cases, `MannWhitneyUTest` performs an approximate Mann-Whitney U test. """ function MannWhitneyUTestPooled(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000) x = vcat(resample(d1, n)...,) y = vcat(resample(d2, n)...,) MannWhitneyUTest(x, y) end """ MannWhitneyUTest(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000) -> Vector{MannWhitneyUTest} Assume `d1` and `d2` consist of the same number of uncertain values. Let ``s_{1_{i}}`` be a sample of `n` realisations of the distribution furnishing the uncertain value `d1[i]`, where ``i \\in [1, 2, \\ldots, N]`` and ``N`` is the number of uncertain values in `d1`. Let ``s_{2_{i}}`` be the corresponding sample for `d2[i]`. This function Perform an element-wise Mann-Whitney U test of the null hypothesis that the probability that an observation drawn from the same population as ``s_{1_{i}}`` is greater than an observation drawn from the same population as ``s_{2_{i}}`` is equal to the probability that an observation drawn from the same population as ``s_{2_{i}}`` is greater than an observation drawn from the same population as ``s_{1_{i}}`` against the alternative hypothesis that these probabilities are not equal. The Mann-Whitney U test is sometimes known as the Wilcoxon rank-sum test. When there are no tied ranks and ≤50 samples, or tied ranks and ≤10 samples, `MannWhitneyUTest` performs an exact Mann-Whitney U test. In all other cases, `MannWhitneyUTest` performs an approximate Mann-Whitney U test. """ function MannWhitneyUTestElementWise(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000) N = length(d1) [MannWhitneyUTest(resample(d1[i], n), resample(d2[i], n)) for i = 1:N] end export MannWhitneyUTest, MannWhitneyUTestPooled, MannWhitneyUTestElementWise
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
10969
# The original license on the t-tests from HypothesisTests.jl # is as follows. We're including it here because we're mostly copying the # documentation of the functions. # # t.jl # Various forms of t-tests # # Copyright (C) 2012 Simon Kornblith # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import HypothesisTests.OneSampleTTest import HypothesisTests.EqualVarianceTTest import HypothesisTests.UnequalVarianceTTest """ EqualVarianceTTest(d1::AbstractUncertainValue, d2::AbstractUncertainValue, n::Int = 1000; μ0::Real = 0) -> EqualVarianceTTest Consider two samples `s1` and `s2`, each consisting of `n` random draws from the distributions furnishing `d1` and `d2`, respectively. This function performs a two-sample t-test of the null hypothesis that `s1` and `s2` come from distributions with equal means and variances against the alternative hypothesis that the distributions have different means but equal variances. """ function EqualVarianceTTest(d1::AbstractUncertainValue, d2::AbstractUncertainValue, n::Int = 1000; μ0::Real = 0) x = resample(d1, n) y = resample(d2, n) EqualVarianceTTest(x, y, μ0) end """ EqualVarianceTTestPooled(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) -> EqualVarianceTTest Consider two samples `s1[i]` and `s2[i]`, each consisting of `n` random draws from the distributions furnishing the uncertain values `d1[i]` and `d2[i]`, respectively. Gather all `s1[i]` in a pooled sample `S1`, and all `s2[i]` in a pooled sample `S2`. Perform a two-sample t-test of the null hypothesis that `S1` and `S2` come from distributions with equal means and variances against the alternative hypothesis that the distributions have different means but equal variances. """ function EqualVarianceTTestPooled(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) x = vcat(resample(d1, n)...,) y = vcat(resample(d2, n)...,) EqualVarianceTTest(x, y, μ0) end """ EqualVarianceTTestElementWise(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) -> Vector{EqualVarianceTTest} Consider two samples `s1[i]` and `s2[i]`, each consisting of `n` random draws from the distributions furnishing the uncertain values `d1[i]` and `d2[i]`, respectively. This function performs an elementwise `EqualVarianceTTest` on the pairs `(s1[i], s2[i])`. Specifically: Performs an pairwise two-sample t-test of the null hypothesis that `s1[i]` and `s2[i]` come from distributions with equal means and variances against the alternative hypothesis that the distributions have different means but equal variances. """ function EqualVarianceTTestElementWise(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) N = length(d1) [EqualVarianceTTest(resample(d1[i], n), resample(d2[i], n), μ0) for i = 1:N] end """ UnequalVarianceTTest(d1::AbstractUncertainValue, d2::AbstractUncertainValue, n::Int = 1000; μ0::Real = 0) -> UnequalVarianceTTest Consider two samples `s1` and `s2`, each consisting of `n` random draws from the distributions furnishing `d1` and `d2`, respectively. Perform an unequal variance two-sample t-test of the null hypothesis that `s1` and `s2` come from distributions with equal means against the alternative hypothesis that the distributions have different means. """ function UnequalVarianceTTest(d1::AbstractUncertainValue, d2::AbstractUncertainValue, n::Int = 1000; μ0::Real = 0) x = resample(d1, n) y = resample(d2, n) UnequalVarianceTTest(x, y, μ0) end """ UnequalVarianceTTestPooled(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) -> UnequalVarianceTTest Consider two samples `s1[i]` and `s2[i]`, each consisting of `n` random draws from the distributions furnishing the uncertain values `d1[i]` and `d2[i]`, respectively. Gather all `s1[i]` in a pooled sample `S1`, and all `s2[i]` in a pooled sample `S2`. This function performs an unequal variance two-sample t-test of the null hypothesis that `S1` and `S2` come from distributions with equal means against the alternative hypothesis that the distributions have different means. """ function UnequalVarianceTTestPooled(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) x = vcat(resample(d1, n)...,) y = vcat(resample(d2, n)...,) UnequalVarianceTTest(x, y, μ0) end """ UnequalVarianceTTestElementWise(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) -> Vector{UnequalVarianceTTest} Consider two samples `s1[i]` and `s2[i]`, each consisting of `n` random draws from the distributions furnishing the uncertain values `d1[i]` and `d2[i]`, respectively. This function performs an elementwise `EqualVarianceTTest` on the pairs `(s1[i], s2[i])`. Specifically: Performs an pairwise unequal variance two-sample t-test of the null hypothesis that `s1[i]` and `s2[i]` come from distributions with equal means against the alternative hypothesis that the distributions have different means. This test is sometimes known as Welch's t-test. It differs from the equal variance t-test in that it computes the number of degrees of freedom of the test using the Welch-Satterthwaite equation: ```math ν_{χ'} ≈ \\frac{\\left(\\sum_{i=1}^n k_i s_i^2\\right)^2}{\\sum_{i=1}^n \\frac{(k_i s_i^2)^2}{ν_i}} ``` """ function UnequalVarianceTTestElementWise(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) N = length(d1) [UnequalVarianceTTest(resample(d1[i], n), resample(d2[i], n), μ0) for i = 1:N] end """ OneSampleTTest(d::AbstractUncertainValue, n::Int = 1000; μ0::Real = 0) -> OneSampleTTest Perform a one sample t-test of the null hypothesis that the uncertain value has a distribution with mean `μ0` against the alternative hypothesis that its distribution does not have mean `μ0`. `n` indicates the number of draws during resampling. """ function OneSampleTTest(d::AbstractUncertainValue, n::Int = 1000; μ0::Real = 0) x = resample(d, n) OneSampleTTest(x, μ0) end """ OneSampleTTest(d1::AbstractUncertainValue, d2::AbstractUncertainValue, n::Int = 1000; μ0::Real=0) -> OneSampleTTest Perform a paired sample t-test of the null hypothesis that the differences between pairs of uncertain values in `d1` and `d2` come from a distribution with mean `μ0` against the alternative hypothesis that the distribution does not have mean `μ0`. """ function OneSampleTTest(d1::AbstractUncertainValue, d2::AbstractUncertainValue, n::Int = 1000; μ0::Real = 0) x = resample(d1, n) y = resample(d2, n) OneSampleTTest(x, y, μ0) end """ OneSampleTTestPooled(d::UncertainDataset, n::Int = 1000; μ0::Real=0) -> OneSampleTTest First, sample `n` draws of each uncertain value in `d1`, then pooling the draws together. Then, perform a one sample t-test of the null hypothesis that the uncertain values have a pooled distribution with mean `μ0` against the alternative hypothesis that its pooled distribution does not have mean `μ0`. `n` indicates the number of draws during resampling. """ function OneSampleTTestPooled(d::UncertainDataset, n::Int = 1000; μ0::Real = 0) x = vcat([resample(uncertainval, n) for uncertainval in d]...,) OneSampleTTest(x, μ0) end """ OneSampleTTestElementWise(d::UncertainDataset, n::Int = 1000; μ0::Real = 0) -> Vector{OneSampleTTest} First, sample `n` draws of each uncertain value in `d`, then pooling the draws together. Then, perform a one sample t-test of the null hypothesis that the uncertain values have a pooled distribution with mean `μ0` against the alternative hypothesis that its pooled distribution does not have mean `μ0`. `n` indicates the number of draws during resampling. """ function OneSampleTTestElementWise(d::UncertainDataset, n::Int = 1000; μ0::Real = 0) [OneSampleTTest(resample(uncertainval, n), μ0) for uncertainval in d] end """ OneSampleTTestPooled(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) -> OneSampleTTest First, sample `n` draws of each uncertain value in each dataset, pooling the draws from the elements of `d1` and the draws from the elements of `d2` separately. Then, perform a paired sample t-test of the null hypothesis that the differences between pairs of uncertain values in `d1` and `d2` come from a distribution with mean `μ0` against the alternative hypothesis that the distribution does not have mean `μ0`. """ function OneSampleTTestPooled(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) x = vcat(resample(d1, n)...,) y = vcat(resample(d2, n)...,) OneSampleTTest(x, y, μ0) end """ OneSampleTTestElementWise(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) -> Vector{OneSampleTTest} Perform a one sample t-test of the null hypothesis that the uncertain value has a distribution with mean `μ0` against the alternative hypothesis that its distribution does not have mean `μ0` for uncertain value in `d`. `n` indicates the number of draws during resampling. """ function OneSampleTTestElementWise(d1::UncertainDataset, d2::UncertainDataset, n::Int = 1000; μ0::Real = 0) N = length(d1) [OneSampleTTest(resample(d1[i], n), resample(d2[i], n), μ0) for i in 1:N] end export EqualVarianceTTest, EqualVarianceTTestPooled, EqualVarianceTTestElementWise, UnequalVarianceTTest, UnequalVarianceTTestPooled, UnequalVarianceTTestElementWise, OneSampleTTest, OneSampleTTestPooled, OneSampleTTestElementWise
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2971
import HypothesisTests.LjungBoxTest import HypothesisTests.BoxPierceTest # # # """ # LjungBoxTest(d::UncertainDataset; n::Int = 10000, # lag::Int = 1, dof::Int = 0) -> LjungBoxTest # # Compute the Ljung-Box `Q` statistic to test the null hypothesis of # independence in a data series represented by the uncertain dataset `d`. # # `lag` specifies the number of lags used in the construction of `Q`. When # testing the residuals of an estimated model, `dof` has to be set to the number # of estimated parameters. E.g., when testing the residuals of an ARIMA(p,0,q) # model, set `dof=p+q`. # """ # function LjungBoxTest(d::UncertainDataset; lag::Int = 1, dof::Int = 0) # LjungBoxTest(resample(d, 1)[1], lag, dof) # end # # """ # LjungBoxTest(d::UncertainDataset, n_tests::Int = 100; lag::Int = 1, dof::Int = 0) -> LjungBoxTest # # Compute the Ljung-Box `Q` statistic to test the null hypothesis of # independence in a data series represented by the uncertain dataset `d` # by performing the test on `n` independent draws of the dataset. # # `lag` specifies the number of lags used in the construction of `Q`. When # testing the residuals of an estimated model, `dof` has to be set to the number # of estimated parameters. E.g., when testing the residuals of an ARIMA(p,0,q) # model, set `dof=p+q`. # """ # function LjungBoxTest(d::UncertainDataset, n_tests::Int = 100; lag::Int = 1, dof::Int = 0) # [LjungBoxTest(resample(d)[1], lag, dof) for i = 1:n_tests] # end # # """ # BoxPierceTest(d::UncertainDataset; # lag::Int = 1, dof::Int = 0) -> Vector{BoxPierceTest} # # Compute the Box-Pierce `Q` statistic to test the null hypothesis of # independence in a data series represented by the uncertain dataset `d`. # # `lag` specifies the number of lags used in the construction of `Q`. When # testing the residuals of an estimated model, `dof` has to be set to the number # of estimated parameters. E.g., when testing the residuals of an ARIMA(p,0,q) # model, set `dof=p+q`. # """ # function BoxPierceTest(d::UncertainDataset; lag::Int = 1, dof::Int = 0) # BoxPierceTest(resample(d, 1)[1], lag, dof) # end # # # """ # BoxPierceTest(d::UncertainDataset, n_tests::Int = 100; # lag::Int = 1, dof::Int = 0) -> BoxPierceTest # # Compute the BoxPierceTest `Q` statistic to test the null hypothesis of # independence in a data series represented by the uncertain dataset `d` # by performing the test on `n` independent draws of the dataset. # # `lag` specifies the number of lags used in the construction of `Q`. When # testing the residuals of an estimated model, `dof` has to be set to the number # of estimated parameters. E.g., when testing the residuals of an ARIMA(p,0,q) # model, set `dof=p+q`. # """ # function BoxPierceTest(d::UncertainDataset, n_tests::Int = 100; # lag::Int = 1, dof::Int = 0) # [BoxPierceTest(resample(d, 1)[1], lag, dof) for i = 1:n_tests] # end # # # export # LjungBoxTest, # BoxPierceTest
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1548
########################################################################################## # Uncertain values represented by theoretical distributions. # Base stats can be estimated directly from the distributions, so no need for resampling. # These methods definitions are just for compatibility with other uncertain values, which # may not have analytically determined statistics associated with them. ########################################################################################## StatsBase.mode(uv::TheoreticalDistributionScalarValue, n::Int) = mode(uv) Statistics.mean(uv::TheoreticalDistributionScalarValue, n::Int) = mean(uv) Statistics.median(uv::TheoreticalDistributionScalarValue, n::Int) = median(uv) Statistics.middle(uv::TheoreticalDistributionScalarValue, n::Int) = middle(uv) Statistics.quantile(uv::TheoreticalDistributionScalarValue, q, n::Int) = quantile(uv, q) Statistics.std(uv::TheoreticalDistributionScalarValue, n::Int) = std(uv) Statistics.var(uv::TheoreticalDistributionScalarValue, n::Int) = var(uv) StatsBase.mode(uv::UncertainScalarTheoreticalFit, n::Int) = mode(uv) Statistics.mean(uv::UncertainScalarTheoreticalFit, n::Int) = mean(uv) Statistics.median(uv::UncertainScalarTheoreticalFit, n::Int) = median(uv) Statistics.middle(uv::UncertainScalarTheoreticalFit, n::Int) = middle(uv) Statistics.quantile(uv::UncertainScalarTheoreticalFit, q, n::Int) = quantile(uv, q) Statistics.std(uv::UncertainScalarTheoreticalFit, n::Int) = std(uv) Statistics.var(uv::UncertainScalarTheoreticalFit, n::Int) = var(uv)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
3289
import ..UncertainValues: AbstractUncertainScalarKDE, TheoreticalDistributionScalarValue, UncertainScalarTheoreticalFit import Statistics # The general strategy is to resample """ mean(uv::AbstractUncertainValue, n::Int) Compute the mean of an uncertain value over an `n`-draw sample of it. """ Statistics.mean(uv::AbstractUncertainValue, n::Int) = mean(resample(uv, n)) """ mode(uv::AbstractUncertainValue, n::Int) Compute the mode of an uncertain value over an `n`-draw sample of it. """ StatsBase.mode(uv::AbstractUncertainValue, n::Int) = mode(resample(uv, n)) """ median(uv::AbstractUncertainValue, n::Int) Compute the median of an uncertain value over an `n`-draw sample of it. """ Statistics.median(uv::AbstractUncertainValue, n::Int) = median(resample(uv, n)) """ middle(uv::AbstractUncertainValue, n::Int) Compute the middle of an uncertain value over an `n`-draw sample of it. """ Statistics.middle(uv::AbstractUncertainValue, n::Int) = middle(resample(uv, n)) """ quantile(uv::AbstractUncertainValue, q, n::Int) Compute the quantile(s) `q` of an uncertain value over an `n`-draw sample of it. """ Statistics.quantile(uv::AbstractUncertainValue, q, n::Int) = quantile(resample(uv, n), q) """ std(uv::AbstractUncertainValue, n::Int) Compute the standard deviation of an uncertain value over an `n`-draw sample of it. """ Statistics.std(uv::AbstractUncertainValue, n::Int) = std(resample(uv, n)) """ variance(uv::AbstractUncertainValue, n::Int) Compute the variance of an uncertain value over an `n`-draw sample of it. """ Statistics.var(uv::AbstractUncertainValue, n::Int) = var(resample(uv, n)) # But for theoretical distributions, we may directly access properties of the furnishing # distributions ########################################################################################## # Uncertain values represented by theoretical distributions. # Base stats can be estimated directly from the distributions, so no need for resampling. # These methods definitions are just for compatibility with other uncertain values, which # may not have analytically determined statistics associated with them. ########################################################################################## StatsBase.mode(uv::TheoreticalDistributionScalarValue, n::Int) = mode(uv) Statistics.mean(uv::TheoreticalDistributionScalarValue, n::Int) = mean(uv) Statistics.median(uv::TheoreticalDistributionScalarValue, n::Int) = median(uv) Statistics.middle(uv::TheoreticalDistributionScalarValue, n::Int) = middle(uv) Statistics.quantile(uv::TheoreticalDistributionScalarValue, q, n::Int) = quantile(uv, q) Statistics.std(uv::TheoreticalDistributionScalarValue, n::Int) = std(uv) Statistics.var(uv::TheoreticalDistributionScalarValue, n::Int) = var(uv) StatsBase.mode(uv::UncertainScalarTheoreticalFit, n::Int) = mode(uv) Statistics.mean(uv::UncertainScalarTheoreticalFit, n::Int) = mean(uv) Statistics.median(uv::UncertainScalarTheoreticalFit, n::Int) = median(uv) Statistics.middle(uv::UncertainScalarTheoreticalFit, n::Int) = middle(uv) Statistics.quantile(uv::UncertainScalarTheoreticalFit, q, n::Int) = quantile(uv, q) Statistics.std(uv::UncertainScalarTheoreticalFit, n::Int) = std(uv) Statistics.var(uv::UncertainScalarTheoreticalFit, n::Int) = var(uv)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
5955
import ..Resampling.resample import ..UncertainValues.AbstractUncertainScalarKDE import Statistics ######################################### # Statistics on `AbstractUncertainValueDataset`s ######################################### """ mean(d::AbstractUncertainValueDataset, n::Int) Computes the element-wise mean of a dataset of uncertain values. Takes the mean of an `n`-draw sample for each element. """ Statistics.mean(d::AbstractUncertainValueDataset, n::Int; kwargs...) = [mean(resample(d[i], n), kwargs...) for i = 1:length(d)] """ median(d::AbstractUncertainValueDataset, n::Int) Computes the element-wise median of a dataset of uncertain values. Takes the median of an `n`-draw sample for each element. """ Statistics.median(d::AbstractUncertainValueDataset, n::Int; kwargs...) = [median(resample(d[i], n), kwargs...) for i = 1:length(d)] """ middle(d::AbstractUncertainValueDataset, n::Int) Compute the middle of `n` realisations of an `AbstractUncertainValueDataset`. """ Statistics.middle(d::AbstractUncertainValueDataset, n::Int; kwargs...) = [middle(resample(d[i], n), kwargs...) for i = 1:length(d)] """ quantile(d::AbstractUncertainValueDataset, p, n::Int; kwargs...) Compute element-wise quantile(s) `p `of a dataset consisting of uncertain values. Takes the quantiles of an `n`-draw sample for each element. """ Statistics.quantile(d::AbstractUncertainValueDataset, p, n::Int; kwargs...) = [quantile(resample(d[i], n), p, kwargs...) for i = 1:length(d)] """ std(d::AbstractUncertainValueDataset, n::Int; kwargs...) Computes the element-wise standard deviation of a dataset of uncertain values. Takes the standard deviation of an `n`-draw sample for each element. """ Statistics.std(d::AbstractUncertainValueDataset, n::Int; kwargs...) = [std(resample(d[i], n), kwargs...) for i = 1:length(d)] """ var(d::AbstractUncertainValueDataset, n::Int; kwargs...) Computes the element-wise sample variance of a dataset of uncertain values. Takes the sample variance of an `n`-draw sample for each element. """ Statistics.var(d::AbstractUncertainValueDataset, n::Int; kwargs...) = [var(resample(d[i], n), kwargs...) for i = 1:length(d)] """ cor(d1::AbstractUncertainValueDataset, d2::AbstractUncertainValueDataset, n::Int; kwargs...) Obtain a distribution for the Pearson correlation between two uncertain datasets `d1` and `d2`. This is done by resampling both datasets multiple times and compute the correlation between those draws. This yields a distribution of correlation estimates. The procedure is as follows. 1. First, draw a realisation of `d1` according to the distributions furnishing its uncertain values. 2. Then, draw a realisation `d2` according to its furnishing distributions. 3. Compute the correlation between those two draws/realisations, both of which are vectors of length `L = length(d1) = length(d2)`. 4. Repeat the procedure `n` times, drawing `n` separate pairs of realisations of `d1` and `d2`. This yields `n` estimates of the correlation between `d1` and `d2`, which is returned as a vector. """ Statistics.cor(d1::DT, d2::DT, n::Int; kwargs...) where {DT <: AbstractUncertainValueDataset} = [cor(resample(d1), resample(d2), kwargs...) for i = 1:n] """ cor(d1::AbstractUncertainValueDataset, d2::AbstractUncertainValueDataset; kwargs...) Obtain a single estimate of the Pearson correlation between two uncertain datasets `d1` and `d2`. This is done by resampling both datasets independently, that is: First draw a realisation of `d1` according to the distributions furnishing its uncertain values. Then, draw a realisation `d2` according to its furnishing distributions. These those two draws/realisations are now both vectors of length `L = length(d1) = length(d2)`. Finally, compute the correlation between those draws. This yields a single estimates of the correlation between `d1` and `d2`. """ Statistics.cor(d1::DT, d2::DT, kwargs...) where {DT <: AbstractUncertainValueDataset} = cor(resample(d1), resample(d2), kwargs...) """ cov(d1::AbstractUncertainValueDataset, d2::AbstractUncertainValueDataset, n::Int; kwargs...) Obtain a distribution for the covariance between two uncertain datasets `d1` and `d2`. This is done by resampling both datasets multiple times and compute the covariance between those draws. This yields a distribution of covariance estimates. The procedure is as follows. 1. First, draw a realisation of `d1` according to the distributions furnishing its uncertain values. 2. Then, draw a realisation `d2` according to its furnishing distributions. 3. Compute the covariance between those two draws/realisations, both of which are vectors of length `L = length(d1) = length(d2)`. 4. Repeat the procedure `n` times, drawing `n` separate pairs of realisations of `d1` and `d2`. This yields `n` estimates of the covariance between `d1` and `d2`, which is returned as a vector. """ Statistics.cov(d1::DT, d2::DT, n::Int; kwargs...) where DT <: AbstractUncertainValueDataset = [cov(resample(d1), resample(d2), kwargs...) for i = 1:n] """ cov(d1::AbstractUncertainValueDataset, d2::AbstractUncertainValueDataset; kwargs...) Obtain a single estimate for the covariance between two uncertain datasets `d1` and `d2`. This is done by resampling both datasets independently, that is: First draw a realisation of `d1` according to the distributions furnishing its uncertain values. Then, draw a realisation `d2` according to its furnishing distributions. These those two draws/realisations are now both vectors of length `L = length(d1) = length(d2)`. Finally, compute the covariance between those draws. This yields a single estimates of the covariance between `d1` and `d2`. """ Statistics.cov(d1::DT, d2::DT, kwargs...) where {DT <: AbstractUncertainValueDataset} = cov(resample(d1), resample(d2), kwargs...) export cor, cov, var, std, quantile, mean, median
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
19895
""" countne(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) Estimate a `n`-member distribution on the number of indices at which the elements of two collections of uncertain values are not equal. This is done by repeating the following procedure `n` times: 1. Draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Draw a length-`L` realisation of `y` in the same manner. 3. Count the number of indices at which the elements of the two length-`L` draws are not equal. This yields `n` counts of non-equal values between `n` pairs of independent realisations of `x` and `y`. The `n`-member distribution of nonequal-value counts is returned as a vector. """ StatsBase.countne(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) = resample(StatsBase.countne, x, y, n) """ counteq(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) Estimate a `n`-member distribution on the number of indices at which the elements of two collections of uncertain values are equal. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Count the number of indices at which the elements of the two length-`L` draws are equal. This yields `n` counts of non-equal values between `n` pairs of independent realisations of `x` and `y`. The `n`-member distribution of equal-value counts is returned as a vector. """ StatsBase.counteq(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) = resample(StatsBase.counteq, x, y, n) """ corkendall(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) Estimate a `n`-member distribution on Kendalls's rank correlation coefficient between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute Kendall's rank correlation coefficient between the two length-`L` draws. This yields `n` computations of Kendall's rank correlation coefficient between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of correlation estimates is returned as a vector. """ StatsBase.corkendall(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) = resample(StatsBase.corspearman, x, y, n) """ corspearman(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) Estimate a `n`-member distribution on Spearman's rank correlation coefficient between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute Spearman's rank correlation coefficient between the two length-`L` draws. This yields `n` estimates of Spearman's rank correlation coefficient between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of correlation estimates is returned as a vector. """ StatsBase.corspearman(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) = resample(StatsBase.corspearman, x, y, n) """ cor(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) Estimate a distribution on Pearson's rank correlation coefficient between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute Pearson's rank correlation coefficient between the two length-`L` draws. This yields `n` estimates of Pearson's rank correlation coefficient between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of correlation estimates is returned as a vector. """ StatsBase.cor(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) = resample(StatsBase.cor, x, y, n) """ cov(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int; corrected::Bool = true) Obtain a distribution on the covariance between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute the covariance between the two length-`L` draws. This yields `n` estimates of the covariance between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of covariance estimates is returned as a vector. If `corrected` is `true` (the default) then the sum is scaled with `n - 1` for each pair of draws, whereas the sum is scaled with `n` if `corrected` is `false` where `n = length(x)`. """ StatsBase.cov(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int; corrected::Bool = true) = resample(StatsBase.cov, x, y, n; corrected = corrected) """ crosscor(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, [lags], n::Int; demean = true) Obtain a distribution over the cross correlation between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute the cross correlation between the two length-`L` draws. This yields `n` estimates of the cross correlation between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of cross correlation estimates is returned as a vector. `demean` specifies whether, at each iteration, the respective means of the draws should be subtracted from them before computing their cross correlation. When left unspecified, the `lags` used are `-min(n-1, 10*log10(n))` to `min(n, 10*log10(n))`. The output is normalized by `sqrt(var(x_draw)*var(y_draw))`. See `crosscov` for the unnormalized form. """ function StatsBase.crosscor(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int; demean = true) crosscor_estimates = Vector{Vector{Float64}}(undef, n) for i = 1:n draw_x = float.(resample(x)) draw_y = float.(resample(y)) crosscor_estimates[i] = StatsBase.crosscor(draw_x, draw_y, demean = demean) end return crosscor_estimates end function StatsBase.crosscor(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, lags, n::Int; demean = true) crosscor_estimates = Vector{Vector{Float64}}(undef, n) for i = 1:n draw_x = float.(resample(x)) draw_y = float.(resample(y)) crosscor_estimates[i] = StatsBase.crosscor(draw_x, draw_y, lags, demean = demean) end return crosscor_estimates end """ crosscov(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, [lags], n::Int; demean = true) Obtain a distribution over the cross covariance function (CCF) between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute the CCF between the two length-`L` draws. This yields `n` estimates of the CCF between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of CCF estimates is returned as a vector. `demean` specifies whether, at each iteration, the respective means of the draws should be subtracted from them before computing their CCF. When left unspecified, the `lags` used are `-min(n-1, 10*log10(n))` to `min(n, 10*log10(n))`. The output is not normalized. See `crosscor` for a function with normalization. """ function StatsBase.crosscov(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int; demean = true) crosscov_estimates = Vector{Vector{Float64}}(undef, n) for i = 1:n draw_x = float.(resample(x)) draw_y = float.(resample(y)) crosscov_estimates[i] = StatsBase.crosscov(draw_x, draw_y, demean = demean) end return crosscov_estimates end function StatsBase.crosscov(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, lags, n::Int; demean = true) crosscov_estimates = Vector{Vector{Float64}}(undef, n) for i = 1:n draw_x = float.(resample(x)) draw_y = float.(resample(y)) crosscov_estimates[i] = StatsBase.crosscov(draw_x, draw_y, lags, demean = demean) end return crosscov_estimates end """ gkldiv(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution over the generalized Kullback-Leibler divergence between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute the generalized Kullback-Leibler divergence between the two length-`L` draws. This yields `n` estimates of the generalized Kullback-Leibler divergence between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of generalized Kullback-Leibler divergence estimates is returned as a vector. """ StatsBase.gkldiv(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) = resample(StatsBase.gkldiv, x, y, n) """ kldivergence(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, [b], n::Int) Obtain a distribution over the Kullback-Leibler divergence between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute the Kullback-Leibler divergence between the two length-`L` draws. This yields `n` estimates of the Kullback-Leibler divergence between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of Kullback-Leibler divergence estimates is returned as a vector. Optionally a real number `b` can be specified such that the divergence is scaled by `1/log(b)`. """ StatsBase.kldivergence(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) = resample(StatsBase.kldivergence, x, y, n) StatsBase.kldivergence(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, b, n::Int) = resample(StatsBase.kldivergence, x, y, n, b) """ maxad(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution over the maximum absolute deviation between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute the maximum absolute deviation between the two length-`L` draws. This yields `n` estimates of the maximum absolute deviation between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of maximum absolute deviation estimates is returned as a vector. """ StatsBase.maxad(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) = resample(StatsBase.maxad, x, y, n) """ meanad(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution over the mean absolute deviation between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute the mean absolute deviation between the two length-`L` draws. This yields `n` estimates of the mean absolute deviation between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of mean absolute deviation estimates is returned as a vector. """ StatsBase.meanad(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) = resample(StatsBase.meanad, x, y, n) """ msd(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution over the mean squared deviation between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute the mean squared deviation between the two length-`L` draws. This yields `n` estimates of the mean squared deviation between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of mean squared deviation estimates is returned as a vector. """ StatsBase.msd(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) = resample(StatsBase.msd, x, y, n) """ psnr(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, maxv, n::Int) Obtain a distribution over the peak signal-to-noise ratio (PSNR) between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute the PSNR between the two length-`L` draws. This yields `n` estimates of the peak signal-to-noise ratio between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of PSNR estimates is returned as a vector. The PSNR is computed as `10 * log10(maxv^2 / msd(x_draw, y_draw))`, where `maxv` is the maximum possible value `x` or `y` can take """ StatsBase.psnr(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, maxv, n::Int) = resample(StatsBase.psnr, x, y, n, maxv) """ rmsd(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int, normalize = false) Obtain a distribution over the root mean squared deviation between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute the root mean squared deviation between the two length-`L` draws. This yields `n` estimates of the root mean squared deviation between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of root mean squared deviation estimates is returned as a vector. The root mean squared deviation is computed as `sqrt(msd(x_draw, y_draw))` at each iteration. Optionally, `x_draw` and `y_draw` may be normalised. """ StatsBase.rmsd(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int; normalize = false) = resample(StatsBase.rmsd, x, y, n; normalize = normalize) """ sqL2dist(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution over the squared L2 distance between two collections of uncertain values. This is done by repeating the following procedure `n` times: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Second, draw a length-`L` realisation of `y` in the same manner. 3. Compute the squared L2 distance between the two length-`L` draws. This yields `n` estimates of the squared L2 distance between `n` independent pairs of realisations of `x` and `y`. The `n`-member distribution of squared L2 distance estimates is returned as a vector. The squared L2 distance is computed as ``\\sum_{i=1}^n |x_i - y_i|^2``. """ StatsBase.sqL2dist(x::UVAL_COLLECTION_TYPES, y::UVAL_COLLECTION_TYPES, n::Int) = resample(StatsBase.sqL2dist, x, y, n)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
24436
import StatsBase """ median(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the median of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the median is computed for each of those length-`L` realisations, yielding a distribution of median estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the median for the realisation. 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the median of `x`, which is returned as a vector. """ function StatsBase.median(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.median, x, n) end """ mean(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the mean of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the mean is computed for each of those length-`L` realisations, yielding a distribution of mean estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the mean for the realisation. 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the mean of `x`, which is returned as a vector. """ function StatsBase.mean(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.mean, x, n) end """ mode(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the mode of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the mode is computed for each of those length-`L` realisations, yielding a distribution of mode estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the mode for the realisation, which is a vector of length `L` 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the mode of `x`, which is returned as a vector. """ function StatsBase.mode(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.mode, x, n) end """ quantile(x::UVAL_COLLECTION_TYPES, q, n::Int) Obtain a distribution for the quantile(s) `q` of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the quantile is computed for each of those length-`L` realisations, yielding a distribution of quantile estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the quantile for the realisation, which is a vector of length `L` 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the quantile of `x`, which is returned as a vector. """ function StatsBase.quantile(x::UVAL_COLLECTION_TYPES, q, n::Int) resample(StatsBase.quantile, x, n, q) end """ iqr(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the interquartile range (IQR), i.e. the 75th percentile minus the 25th percentile, of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the IQR is computed for each of those length-`L` realisations, yielding a distribution of IQR estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the IQR for the realisation, which is a vector of length `L` 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the IQR of `x`, which is returned as a vector. """ function StatsBase.iqr(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.iqr, x, n) end """ middle(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the middle of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the middle is computed for each of those length-`L` realisations, yielding a distribution of middle estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the middle for the realisation, which is a vector of length `L` 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the middle of `x`, which is returned as a vector. """ function StatsBase.middle(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.middle, x, n) end """ std(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the standard deviation of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the standard deviation is computed for each of those length-`L` realisations, yielding a distribution of standard deviation estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the std for the realisation, which is a vector of length `L` 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the standard deviation of `x`, which is returned as a vector. """ function StatsBase.std(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.std, x, n) end """ var(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the variance of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the variance is computed for each of those length-`L` realisations, yielding a distribution of variance estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the variance for the realisation, which is a vector of length `L` 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the variance of `x`, which is returned as a vector. """ function StatsBase.var(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.var, x, n) end """ genmean(x::UVAL_COLLECTION_TYPES, p, n::Int) Obtain a distribution for the generalized/power mean with exponent `p` of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the generalized mean is computed for each of those length-`L` realisations, yielding a distribution of generalized mean estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the generalized mean for the realisation, which is a vector of length `L` 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the generalized mean of `x`, which is returned as a vector. """ function StatsBase.genmean(x::UVAL_COLLECTION_TYPES, p, n::Int) resample(StatsBase.genmean, x, n, p) end """ genvar(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the generalized sample variance of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the generalized sample variance is computed for each of those length-`L` realisations, yielding a distribution of generalized sample variance estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the generalized sample variance for the realisation, which is a vector of length `L`. 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the generalized sample variance of `x`, which is returned as a vector. """ function StatsBase.genvar(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.genvar, x, n) end """ harmmean(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the harmonic mean of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the harmonic mean is computed for each of those length-`L` realisations, yielding a distribution of harmonic mean estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the harmonic mean for the realisation. 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the harmonic mean of `x`, which is returned as a vector. """ function StatsBase.harmmean(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.harmmean, x, n) end """ geomean(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the geometric mean of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the geometric mean is computed for each of those length-`L` realisations, yielding a distribution of geometric mean estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the geometric mean for the realisation. 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the geometric mean of `x`, which is returned as a vector. """ function StatsBase.geomean(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.geomean, x, n) end """ kurtosis(x::UVAL_COLLECTION_TYPES, n::Int, f = StatsBase.mean) Obtain a distribution for the kurtosis of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the kurtosis is computed for each of those length-`L` realisations, yielding a distribution of kurtosis estimates. Optionally, a center function `f` can be specified. This function is used to compute the center of each draw, i.e. for the i-th draw, call `StatsBase.kurtosis(draw_i, f(draw_i))`. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the kurtosis for the realisation. 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the kurtosis of `x`, which is returned as a vector. """ function StatsBase.kurtosis(x::UVAL_COLLECTION_TYPES, n::Int, f = StatsBase.mean) kurtosis_estimates = zeros(Float64, n) for i = 1:n draw = resample(x) kurtosis_estimates[i] = StatsBase.kurtosis(draw, f(draw)) end return kurtosis_estimates end """ moment(x::UVAL_COLLECTION_TYPES, k, n::Int) Obtain a distribution for the `k`-th order central moment of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the `k`-th order central moment is computed for each of those length-`L`realisations, yielding a distribution of `k`-th order central moment estimates. The procedure is as follows. 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the `k`-th order central moment for the realisation. 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the `k`-th order central moment of `x`, which is returned as a vector. """ function StatsBase.moment(x::UVAL_COLLECTION_TYPES, k, n::Int) resample(StatsBase.moment, x, n, k) end """ percentile(x::UVAL_COLLECTION_TYPES, p, n::Int) Obtain a distribution for the percentile(s) `p` of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the percentile is computed for each of those length-`L` realisations, yielding a distribution of percentile estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the percentile for the realisation, which is a vector of length `L` 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the percentile of `x`, which is returned as a vector. """ function StatsBase.percentile(x::UVAL_COLLECTION_TYPES, p, n::Int) resample(StatsBase.percentile, x, n, p) end """ renyientropy(x::UVAL_COLLECTION_TYPES, α, n::Int) Obtain a distribution for the Rényi (generalized) entropy of order `α` of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the generalized entropy is computed for each of those length-`L`realisations, yielding a distribution of generalized entropy estimates. The procedure is as follows. 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the Rényi (generalized) entropy of order `α` for the realisation. 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the Rényi (generalized) entropy of order `α` of `x`, which is returned as a vector. """ function StatsBase.renyientropy(x::UVAL_COLLECTION_TYPES, α, n::Int) resample(StatsBase.renyientropy, x, n, α) end """ rle(x::UVAL_COLLECTION_TYPES, α, n::Int) Obtain a distribution for the run-length encoding of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the run-length encoding is computed for each of those length-`L`realisations, yielding a distribution of run-length encoding estimates. Returns a vector of tuples of run-length encodings. The procedure is as follows. 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the run-length encoding for the realisation. This gives a tuple, where the first element of the tuple is a vector of values of the input and the second is the number of consecutive occurrences of each element. 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the run-length encoding of `x`, which is returned as a vector of the run-length encoding tuples. """ function StatsBase.rle(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.rle, x, n) end """ sem(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the standard error of the mean of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the standard error of the mean is computed for each of those length-`L` realisations, yielding a distribution of standard error of the mean estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the standard error of the mean for the realisation, which is a vector of length `L`. 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the standard error of the mean of `x`, which is returned as a vector. """ function StatsBase.sem(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.sem, x, n) end """ skewness(x::UVAL_COLLECTION_TYPES, n::Int, f = StatsBase.mean) Obtain a distribution for the skewness of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the skewness is computed for each of those length-`L` realisations, yielding a distribution of skewness estimates. Optionally, a center function `f` can be specified. This function is used to compute the center of each draw, i.e. for the i-th draw, call `StatsBase.skewness(draw_i, f(draw_i))`. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the skewness for the realisation. 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the skewness of `x`, which is returned as a vector. """ function StatsBase.skewness(x::UVAL_COLLECTION_TYPES, n::Int, f = StatsBase.mean) skewness_estimates = zeros(Float64, n) for i = 1:n draw = resample(x) skewness_estimates[i] = StatsBase.skewness(draw, f(draw)) end return skewness_estimates end """ span(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the span of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the span is computed for each of those length-`L` realisations, yielding a distribution of span estimates. Returns a length-`L` vector of `span`s, where the i-th span is the range `minimum(draw_x_i):maximum(draw_x_i)`. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the span for the realisation, which is a vector of length `L` 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the span of `x`, which is returned as a vector. """ function StatsBase.span(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.span, x, n) end """ totalvar(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the total variance of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the total variance is computed for each of those length-`L` realisations, yielding a distribution of total variance estimates. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the total variance for the realisation, which is a vector of length `L` 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the total variance of `x`, which is returned as a vector. """ function StatsBase.totalvar(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.totalvar, x, n) end """ summarystats(x::UVAL_COLLECTION_TYPES, n::Int) Obtain a distribution for the summary statistics of a collection of uncertain values. This is done by first drawing `n` length-`L` realisations of `x`, where `L = length(x)`. Then, the summary statistics is computed for each of those length-`L` realisations, yielding a distribution of summary statistics estimates. Returns a length-`L` vector of `SummaryStats` objects containing the mean, minimum, 25th percentile, median, 75th percentile, and maximum for each draw of `x`. Detailed steps: 1. First, draw a length-`L` realisation of `x` by drawing one random number from each uncertain value furnishing the dataset. The draws are independent, so that no element-wise dependencies (e.g. sequential correlations) that are not already present in the data are introduced in the realisation. 2. Compute the summary statistics for the realisation, which is a vector of length `L` 3. Repeat the procedure `n` times, drawing `n` independent realisations of `x`. This yields `n` estimates of the summary statistics of `x`, which is returned as a vector. """ function StatsBase.summarystats(x::UVAL_COLLECTION_TYPES, n::Int) resample(StatsBase.summarystats, x, n) end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
8535
""" countne(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) Count the number of indices at which the elements of two independent length-`n` draws of `x` and for `y` are not equal. """ StatsBase.countne(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) = resample(StatsBase.countne, x, y, n) """ counteq(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) Count the number of indices at which the elements of two independent length-`n` draws of `x` and for `y` are equal. """ StatsBase.counteq(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) = resample(StatsBase.counteq, x, y, n) """ corkendall(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) Compute Kendalls's rank correlation coefficient between two uncertain values by independently drawing `n` samples from `x` and `n` samples from `y`, then computing Kendalls's rank correlation coefficient between those length-`n` draws. """ StatsBase.corkendall(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) = resample(StatsBase.corspearman, x, y, n) """ corspearman(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) Compute Spearman's rank correlation coefficient between two uncertain values by independently drawing `n` samples from `x` and `n` samples from `y`, then computing the Spearman's rank correlation coefficient between those length-`n` draws. """ StatsBase.corspearman(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) = resample(StatsBase.corspearman, x, y, n) """ cor(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) Compute the Pearson correlation between two uncertain values by independently drawing `n` samples from `x` and `n` samples from `y`, then computing the Pearson correlation between those length-`n` draws. """ StatsBase.cor(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) = resample(StatsBase.cor, x, y, n) """ cov(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int; corrected::Bool = true) Compute the covariance between two uncertain values by independently drawing `n` samples from `x` and `n` samples from `y` , then computing the covariance between those length-`n` draws. """ StatsBase.cov(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int; corrected::Bool = true) = resample(StatsBase.cov, x, y, n; corrected = corrected) """ crosscor(x::AbstractUncertainValue, y::AbstractUncertainValue, [lags], n::Int; demean = true) Compute the cross correlation between two uncertain values by independently drawing `n` samples from `x` and `n` samples from `y`, `x_draw` and `y_draw`, then computing the cross correlation between those length-`n` draws. `demean` specifies whether the respective means of the `x_draw` and `y_draw` should be subtracted from them before computing their cross correlation. When left unspecified, the `lags` used are `-min(n-1, 10*log10(n))` to `min(n, 10*log10(n))`. The output is normalized by `sqrt(var(x_draw)*var(y_draw))`. See `crosscov` for the unnormalized form. """ StatsBase.crosscor(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int; demean = true) = resample(StatsBase.crosscor, x, y, n; demean = demean) StatsBase.crosscor(x::AbstractUncertainValue, y::AbstractUncertainValue, lags, n::Int; demean = true) = resample(StatsBase.crosscor, x, y, n, lags; demean = demean) """ crosscov(x::AbstractUncertainValue, y::AbstractUncertainValue, [lags], n::Int; demean = true) Compute the cross covariance function (CCF) between two uncertain values by independently drawing `n` samples from `x` and `n` samples from `y`, `x_draw` and `y_draw`, then computing the cross correlation between those length-`n` draws. `demean` specifies whether the respective means of the `x_draw` and `y_draw` should be subtracted from them before computing their CCF. When left unspecified, the `lags` used are `-min(n-1, 10*log10(n))` to `min(n, 10*log10(n))`. The output is not normalized. See `crosscor` for a function with normalization. """ StatsBase.crosscov(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int; demean = true) = resample(StatsBase.crosscov, x, y, n; demean = demean) StatsBase.crosscov(x::AbstractUncertainValue, y::AbstractUncertainValue, lags, n::Int; demean = true) = resample(StatsBase.crosscov, x, y, n, lags; demean = demean) """ gkldiv(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) Compute the generalized Kullback-Leibler divergence between two uncertain values by independently drawing `n` samples from `x` and `n` samples from `y`, then computing the generalized Kullback-Leibler divergence between those length-`n` draws. """ StatsBase.gkldiv(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) = resample(StatsBase.gkldiv, x, y, n) """ kldivergence(x::AbstractUncertainValue, y::AbstractUncertainValue, [b], n::Int) Compute the Kullback-Leibler divergence between two uncertain values by independently drawing `n` samples from `x` and `n` samples from `y`, then computing the Kullback-Leibler divergence between those length-`n` draws. Optionally a real number `b` can be specified such that the divergence is scaled by `1/log(b)`. """ StatsBase.kldivergence(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) = resample(StatsBase.kldivergence, x, y, n) StatsBase.kldivergence(x::AbstractUncertainValue, y::AbstractUncertainValue, b, n::Int) = resample(StatsBase.kldivergence, x, y, n, b) """ maxad(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) Compute the maximum absolute deviation between two uncertain values by independently drawing `n` samples from `x` and `n` samples from `y`, then computing the maximum absolute deviation between those length-`n` draws. """ StatsBase.maxad(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) = resample(StatsBase.maxad, x, y, n) """ meanad(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) Compute the mean absolute deviation between two uncertain values by independently drawing `n` samples from `x` and `n` samples from `y`, then computing the mean absolute deviation between those length-`n` draws. """ StatsBase.meanad(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) = resample(StatsBase.meanad, x, y, n) """ msd(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) Compute the mean squared deviation between two uncertain values by independently drawing `n` samples from `x` and `n` samples from `y`, then computing the mean squared deviation between those length-`n` draws. """ StatsBase.msd(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) = resample(StatsBase.msd, x, y, n) """ psnr(x::AbstractUncertainValue, y::AbstractUncertainValue, maxv, n::Int) Compute the peak signal-to-noise ratio between two uncertain values by independently drawing `n` samples from `x` and from `y`, yielding `x_draw` and `y_draw`, then computing the peak signal-to-noise ratio between those length-`n` draws. The PSNR is computed as `10 * log10(maxv^2 / msd(x_draw, y_draw))`, where `maxv` is the maximum possible value `x` or `y` can take """ StatsBase.psnr(x::AbstractUncertainValue, y::AbstractUncertainValue, maxv, n::Int) = resample(StatsBase.psnr, x, y, n, maxv) """ rmsd(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int, normalize = false) Compute the root mean squared deviation between two uncertain values by independently drawing `n` samples from `x` and from `y`, yielding `x_draw` and `y_draw`, then computing the the root mean squared deviation between those length-`n` draws. The root mean squared deviation is computed as `sqrt(msd(x_draw, y_draw))`. Optionally, `x_draw` and `y_draw` may be normalised. """ StatsBase.rmsd(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int; normalize = false) = resample(StatsBase.rmsd, x, y, n; normalize = normalize) """ sqL2dist(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) Compute the squared L2 distance between two uncertain values by independently drawing `n` samples from `x` and from `y`, then computing the squared L2 distance between those length-`n` draws: ``\\sum_{i=1}^n |x_i - y_i|^2``. """ StatsBase.sqL2dist(x::AbstractUncertainValue, y::AbstractUncertainValue, n::Int) = resample(StatsBase.sqL2dist, x, y, n)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
5875
import StatsBase """ mean(uv::AbstractUncertainValue, n::Int) Compute the mean of an uncertain value over an `n`-draw sample of it. """ StatsBase.mean(x::AbstractUncertainValue, n::Int) = resample(StatsBase.mean, x, n) """ mode(uv::AbstractUncertainValue, n::Int) Compute the mode of an uncertain value over an `n`-draw sample of it. """ StatsBase.mode(x::AbstractUncertainValue, n::Int) = resample(StatsBase.mode, x, n) """ quantile(uv::AbstractUncertainValue, q, n::Int) Compute the quantile(s) `q` of an uncertain value over an `n`-draw sample of it. """ StatsBase.quantile(x::AbstractUncertainValue, q, n::Int) = resample(StatsBase.quantile, x, n, q) """ iqr(uv::AbstractUncertainValue, n::Int) Compute the interquartile range (IQR), i.e. the 75th percentile minus the 25th percentile, over an `n`-draw sample of an uncertain value. """ StatsBase.iqr(x::AbstractUncertainValue, n::Int) = resample(StatsBase.iqr, x, n, q) """ median(uv::AbstractUncertainValue, n::Int) Compute the median of an uncertain value over an `n`-draw sample of it. """ StatsBase.median(x::AbstractUncertainValue, n::Int) = resample(StatsBase.median, x, n) """ middle(uv::AbstractUncertainValue, n::Int) Compute the middle of an uncertain value over an `n`-draw sample of it. """ StatsBase.middle(x::AbstractUncertainValue, n::Int) = resample(StatsBase.middle, x, n) """ std(uv::AbstractUncertainValue, n::Int) Compute the standard deviation of an uncertain value over an `n`-draw sample of it. """ StatsBase.std(x::AbstractUncertainValue, n::Int) = resample(StatsBase.std, x, n) """ variance(uv::AbstractUncertainValue, n::Int) Compute the variance of an uncertain value over an `n`-draw sample of it. """ StatsBase.var(x::AbstractUncertainValue, n::Int) = resample(StatsBase.var, x, n) """ genmean(uv::AbstractUncertainValue, p, n::Int) Compute the generalized/power mean with exponent `p` of an uncertain value over an `n`-draw sample of it. """ StatsBase.genmean(x::AbstractUncertainValue, p, n::Int) = resample(StatsBase.genmean, x, n, p) """ genvar(uv::AbstractUncertainValue, n::Int) Compute the generalized sample variance of an uncertain value over an `n`-draw sample of it. """ StatsBase.genvar(x::AbstractUncertainValue, n::Int) = resample(StatsBase.genvar, x, n) """ harmmean(uv::AbstractUncertainValue, n::Int) Compute the harmonic mean of an uncertain value over an `n`-draw sample of it. """ StatsBase.harmmean(x::AbstractUncertainValue, n::Int) = resample(StatsBase.harmmean, x, n) """ geomean(uv::AbstractUncertainValue, n::Int) Compute the geometric mean of an uncertain value over an `n`-draw sample of it. """ StatsBase.geomean(x::AbstractUncertainValue, n::Int) = resample(StatsBase.geomean, x, n) """ kurtosis(uv::AbstractUncertainValue, n::Int, m = mean(uv, n)) Compute the excess kurtosis of an uncertain value over an `n`-draw sample of it, optionally specifying a center `m`). """ StatsBase.kurtosis(x::AbstractUncertainValue, n::Int, m = mean(x, n)) = resample(StatsBase.kurtosis, x, n, m) """ moment(x::AbstractUncertainValue, k, n::Int, m = mean(x, n)) Compute the `k`-th order central moment of an uncertain value over an `n`-draw sample of it, optionally specifying a center `m`. """ StatsBase.moment(x::AbstractUncertainValue, k, n::Int, m = mean(x, n)) = resample(StatsBase.moment, x, n, k, m) """ percentile(x::AbstractUncertainValue, p, n::Int) Compute the percentile(s) `p` of an uncertain value over an `n`-draw sample of it. """ StatsBase.percentile(x::AbstractUncertainValue, p, n::Int) = resample(StatsBase.percentile, x, n, p) """ renyientropy(uv::AbstractUncertainValue, α, n::Int) Compute the Rényi (generalized) entropy of order `α` of an uncertain value over an `n`-draw sample of it. """ StatsBase.renyientropy(x::AbstractUncertainValue, α, n::Int) = resample(StatsBase.quantile, x, n, α) """ rle(x::AbstractUncertainValue, n::Int) Compute the run-length encoding of an uncertain value over a `n`-draw sample of it as a tuple. The first element of the tuple is a vector of values of the input and the second is the number of consecutive occurrences of each element. """ StatsBase.rle(x::AbstractUncertainValue, n::Int) = resample(StatsBase.rle, x, n) """ sem(x::AbstractUncertainValue, n::Int) Compute the standard error of the mean of an uncertain value over a `n`-draw sample of it, optionally specifying a center `m`, i.e. `sqrt(var(x_draw, corrected = true) / length(x_draw))`. """ StatsBase.sem(x::AbstractUncertainValue, n::Int) = resample(StatsBase.sem, x, n) """ skewness(x::AbstractUncertainValue, n::Int, m = mean(x, n)) Compute the standardized skewness of an uncertain value over an `n`-draw sample of it, optionally specifying a center `m`. """ StatsBase.skewness(x::AbstractUncertainValue, n::Int; m = mean(x, n)) = resample(StatsBase.skewness, x, n, m) """ span(x::AbstractUncertainValue, n::Int) Compute the span of a collection, i.e. the range `minimum(x):maximum(x)`, of an uncertain value over an `n`-draw sample of it. The minimum and maximum of the draws of `x` are computed in one pass using extrema. """ StatsBase.span(x::AbstractUncertainValue, n::Int) = resample(StatsBase.span, x, n) """ summarystats(uv::AbstractUncertainValue, n::Int) Compute summary statistics of an uncertain value over an `n`-draw sample of it. Returns a `SummaryStats` object containing the mean, minimum, 25th percentile, median, 75th percentile, and maximum. """ StatsBase.summarystats(x::AbstractUncertainValue, n::Int) = resample(StatsBase.summarystats, x, n) """ totalvar(uv::AbstractUncertainValue, n::Int) Compute the total sample variance of an uncertain value over an `n`-draw sample of it. For a single uncertain value, this is equivalent to the sample variance. """ StatsBase.totalvar(x::AbstractUncertainValue, n::Int) = resample(StatsBase.totalvar, x, n)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
274
abstract type AbstractUncertainDataset end function summarise(ud::AbstractUncertainDataset) _type = typeof(ud) summary = "$_type" return summary end Base.show(io::IO, ud::AbstractUncertainDataset) = println(io, summarise(ud)) export AbstractUncertainDataset
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1229
abstract type AbstractUncertainIndexDataset <: AbstractUncertainValueDataset end ########################## # Indexing and iteration ######################### Base.getindex(uvd::AbstractUncertainIndexDataset, i) = uvd.indices[i] Base.length(uvd::AbstractUncertainIndexDataset) = length(uvd.indices) Base.size(uvd::AbstractUncertainIndexDataset) = length(uvd) Base.firstindex(uvd::AbstractUncertainIndexDataset) = 1 Base.lastindex(uvd::AbstractUncertainIndexDataset) = length(uvd.indices) Base.eachindex(ud::AbstractUncertainIndexDataset) = Base.OneTo(length(ud.indices)) Base.iterate(ud::AbstractUncertainIndexDataset, state = 1) = iterate(ud.indices, state) Base.minimum(udata::AbstractUncertainIndexDataset) = minimum([minimum(uval) for uval in udata]) Base.maximum(udata::AbstractUncertainIndexDataset) = maximum([maximum(uval) for uval in udata]) ################### # Pretty printing ################### function summarise(uvd::AbstractUncertainIndexDataset) _type = typeof(uvd) n_values = length(uvd.indices) summary = "$_type with $n_values values" return summary end Base.show(io::IO, uvd::AbstractUncertainIndexDataset) = println(io, summarise(uvd)) export AbstractUncertainIndexDataset
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1661
""" AbstractUncertainIndexValueDataset A dataset of uncertain value (`UncertainValue` instances), with indices (time, depth, etc..) that are also uncertain values (`UncertainValue` instances). Concrete types must as a minimum implement the following fields: - **`indices::UncertainDataset`**: The (uncertain) indices of the dataset. - **`values::UncertainDataset`**: The (uncertain) values of the dataset. """ abstract type AbstractUncertainIndexValueDataset <: AbstractUncertainDataset end function summarise(uid::AbstractUncertainIndexValueDataset) _type = typeof(uid) n_values = length(uid.values) n_indices = length(uid.indices) summary = "$_type containing $n_values uncertain values coupled with $n_indices uncertain indices" return summary end Base.show(io::IO, uid::AbstractUncertainIndexValueDataset) = println(io, summarise(uid)) Base.getindex(uvd::AbstractUncertainIndexValueDataset, i) = uvd.indices[i], uvd.values[i] Base.length(uvd::AbstractUncertainIndexValueDataset) = length(uvd.values) Base.size(uvd::AbstractUncertainIndexValueDataset) = length(uvd.values) Base.firstindex(uvd::AbstractUncertainIndexValueDataset) = 1 Base.lastindex(uvd::AbstractUncertainIndexValueDataset) = length(uvd.values) import ..UncertainValues: minimum, maximum function Base.minimum(udata::AbstractUncertainIndexValueDataset) [minimum(uval) for uval in udata.indices], [minimum(uval) for uval in udata.values] end function Base.maximum(udata::AbstractUncertainIndexValueDataset) [maximum(uval) for uval in udata.indices], [maximum(uval) for uval in udata.values] end export AbstractUncertainIndexValueDataset
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
1791
import ..UncertainValues: minimum, maximum """ AbstractUncertainValueDataset A dataset of uncertain values, with fixed indices. Concrete implementations must as a minimum implement the following fields: - **`values::UncertainDataset`**: The (uncertain) values of the dataset. """ abstract type AbstractUncertainValueDataset <: AbstractUncertainDataset end ########################## # Indexing and iteration ######################### Base.getindex(uvd::AbstractUncertainValueDataset, i) = uvd.values[i] Base.length(uvd::AbstractUncertainValueDataset) = length(uvd.values) Base.size(uvd::AbstractUncertainValueDataset) = length(uvd) Base.firstindex(uvd::AbstractUncertainValueDataset) = 1 Base.lastindex(uvd::AbstractUncertainValueDataset) = length(uvd.values) Base.eachindex(ud::AbstractUncertainValueDataset) = Base.OneTo(length(ud.values)) Base.iterate(ud::AbstractUncertainValueDataset, state = 1) = iterate(ud.values, state) Base.minimum(udata::AbstractUncertainValueDataset) = minimum([minimum(uval) for uval in udata]) Base.maximum(udata::AbstractUncertainValueDataset) = maximum([maximum(uval) for uval in udata]) ################### # Pretty printing ################### function summarise(ud::AbstractUncertainValueDataset) _type = typeof(ud) n_values = length(ud.values) summary = "$_type with $n_values values" return summary end Base.show(io::IO, ud::AbstractUncertainValueDataset) = print(io, summarise(ud)) ################### # Various useful functions ################### """ distributions(ud::UncertainDataset) Returns the distributions for all the uncertain values of the dataset. """ distributions(ud::AbstractUncertainValueDataset) = [ud[i].distribution for i = 1:length(ud)] export AbstractUncertainValueDataset, distributions
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
914
""" UncertainDataset Generic dataset containing uncertain values. ## Fields - **`values::AbstractVector{<:AbstractUncertainValue}`**: The uncertain values. """ struct UncertainDataset <: AbstractUncertainValueDataset values::AbstractVector{<:AbstractUncertainValue} end """ ConstrainedUncertainDataset Generic constrained dataset containing uncertain values. ## Fields - **`values::AbstractVector{<:AbstractUncertainValue}`**: The uncertain values. """ struct ConstrainedUncertainDataset <: AbstractUncertainValueDataset values::AbstractVector{<:AbstractUncertainValue} end UncertainDataset(uv::T) where {T <:AbstractUncertainValue} = UncertainDataset([uv]) ConstrainedUncertainDataset(uv::T) where {T<:AbstractUncertainValue} = ConstrainedUncertainDataset([uv]) ########################## # Sorting ######################### export UncertainDataset, ConstrainedUncertainDataset
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2087
using Reexport @reexport module UncertainDatasets using ..UncertainValues using Distributions using IntervalArithmetic using RecipesBase using StatsBase using StaticArrays using Statistics # The abstract type for all types of dataset holding uncertain values include("AbstractUncertainDataset.jl") # An abstract type for uncertain datasets containing uncertain values yielding scalar # values when resampled. include("AbstractUncertainValueDataset.jl") include("AbstractUncertainIndexDataset.jl") # One composite type for indices, another one for values. This distinction allows more # flexibility when applying sampling constraints (some constraints may be meaningful # only for indices, for example). include("UncertainValueDataset.jl") include("UncertainIndexDataset.jl") # A generic type with all the functionality of `AbstractUncertainValueDataset`, if you # can't be bothered with specifying include("UncertainDataset.jl") # An abstract type for datasets containing both indices and data values. include("AbstractUncertainIndexValueDataset.jl") # A composite type with two fields: `indices` and `values`. Both fields may be # any subtype of AbstractUncertainValueDataset. include("UncertainIndexValueDataset.jl") # Conversion and promotion include("conversions.jl") """ UVAL_COLLECTION_TYPES = Union{UD, UV} where { UD <: AbstractUncertainValueDataset, UV <: AbstractVector{T} where { T <: AbstractUncertainValue}} A type union used to represent types of uncertain values. """ const UVAL_COLLECTION_TYPES = Union{UD, UV} where { UD <: AbstractUncertainValueDataset, UV <: AbstractVector{T} where { T <: AbstractUncertainValue}} export UVAL_COLLECTION_TYPES end # module """ UncertainDatasets A module defining uncertain datasets, which are collections of uncertain values defined in the `UncertainValues` module. """ UncertainDatasets
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
804
""" UncertainIndexDataset Generic dataset containing uncertain indices. ## Fields - **`indices::AbstractVector{AbstractUncertainValue}`**: The uncertain values. """ struct UncertainIndexDataset <: AbstractUncertainIndexDataset indices::AbstractVector{<:AbstractUncertainValue} end """ ConstrainedUncertainIndexDataset Constrained dataset containing uncertain indices. ## Fields - **`indices::AbstractVector{AbstractUncertainValue}`**: The uncertain indices. """ struct ConstrainedUncertainIndexDataset <: AbstractUncertainIndexDataset indices::AbstractVector{<:AbstractUncertainValue} end function UncertainIndexDataset(x::AbstractArray{T, 1}) where T UncertainIndexDataset(CertainValue.(x)) end export UncertainIndexDataset, ConstrainedUncertainIndexDataset, distributions
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
5704
""" UncertainIndexValueDataset{ IDXTYP<:AbstractUncertainIndexDataset, VALSTYP<:AbstractUncertainValueDataset} A generic dataset type consisting of a set of uncertain `indices` (e.g. time, depth, order, etc...) and a set of uncertain `values`. The i-th index is assumed to correspond to the i-th value. For example, if `data` is an instance of a `UncertainIndexValueDataset`, then - `data.indices[2]` is the index for the value `data.values[2]` - `data.values[7]` is the value for the index `data.indices[7]`. - `data[3]` is an index-value tuple `(data.indices[3], data.values[3])`. ## Fields - **`indices::T where {T <: AbstractUncertainIndexDataset}`**: The uncertain indices, represented by some type of uncertain index dataset. - **`values::T where {T <: AbstractUncertainValueDataset}`**: The uncertain values, represented by some type of uncertain index dataset. ## Example ```julia # Simulate some data values measured a specific times. times = 1:100 values = sin.(0.0:0.1:100.0) # Assume the data were measured by a device with normally distributed # measurement uncertainties with fluctuating standard deviations σ_range = (0.1, 0.7) uncertain_values = [UncertainValue(Normal, val, rand(Uniform(σ_range...))) for val in values] # Assume the clock used to record the times is uncertain, but with uniformly # distributed noise that doesn't change through time. uncertain_times = [UncertainValue(Uniform, t-0.1, t+0.1) for t in times] # Pair the time-value data. If vectors are provided to the constructor, # the first will be interpreted as the indices and the second as the values. data = UncertainIndexValueDataset(uncertain_times, uncertain_values) # A safer option is to first convert to UncertainIndexDataset and # UncertainValueDataset, so you don't accidentally mix the indices # and the values. uidxs = UncertainIndexDataset(uncertain_times) uvals = UncertainValueDataset(uncertain_values) data = UncertainIndexValueDataset(uidxs, uvals) ``` """ struct UncertainIndexValueDataset{IDXTYP <: AbstractUncertainIndexDataset, VALSTYP <: AbstractUncertainValueDataset} <: AbstractUncertainIndexValueDataset """ The indices of the uncertain index-value dataset. """ indices::IDXTYP """ The values of the uncertain index-value dataset. """ values::VALSTYP function UncertainIndexValueDataset( indices::UncertainIndexDataset, values::UncertainValueDataset) IDXTYP = UncertainIndexDataset VALSTYP = UncertainValueDataset new{IDXTYP, VALSTYP}( UncertainIndexDataset(indices), UncertainValueDataset(values) ) end function UncertainIndexValueDataset( indices::ConstrainedUncertainIndexDataset, values::UncertainValueDataset) IDXTYP = ConstrainedUncertainIndexDataset VALSTYP = UncertainValueDataset new{IDXTYP, VALSTYP}(indices, values) end function UncertainIndexValueDataset( indices::UncertainIndexDataset, values::ConstrainedUncertainValueDataset) IDXTYP = UncertainIndexDataset VALSTYP = ConstrainedUncertainValueDataset new{IDXTYP, VALSTYP}(indices, values) end function UncertainIndexValueDataset( indices::ConstrainedUncertainIndexDataset, values::ConstrainedUncertainValueDataset) IDXTYP = ConstrainedUncertainIndexDataset VALSTYP = ConstrainedUncertainValueDataset new{IDXTYP, VALSTYP}(indices, values) end function UncertainIndexValueDataset( indices::Vector{<:AbstractUncertainValue}, values::Vector{<:AbstractUncertainValue}) IDXTYP = UncertainIndexDataset VALSTYP = UncertainValueDataset new{IDXTYP, VALSTYP}( UncertainIndexDataset(indices), UncertainValueDataset(values) ) end function UncertainIndexValueDataset( indices::DT, values::Vector{<:AbstractUncertainValue}) where {DT <: AbstractUncertainIndexDataset} IDXTYP = DT VALSTYP = UncertainValueDataset new{IDXTYP, VALSTYP}(indices, UncertainValueDataset(values)) end function UncertainIndexValueDataset( indices::Vector{<:AbstractUncertainValue}, values::DT) where {DT <: AbstractUncertainValueDataset} IDXTYP = UncertainIndexDataset VALSTYP = DT new{IDXTYP, VALSTYP}(UncertainIndexDataset(indices), values) end end function UncertainIndexValueDataset(x::AbstractArray{T, 1}, y::AbstractArray{T, 1}) where T idxs = UncertainIndexDataset(x) vals = UncertainValueDataset(y) UncertainIndexValueDataset(idxs, vals) end Base.length(u::UncertainIndexValueDataset) = length(u.values) Base.size(u::UncertainIndexValueDataset) = length(u.values) Base.getindex(u::UncertainIndexValueDataset, i) = (u.indices[i], u.values[i]) Base.getindex(u::UncertainIndexValueDataset, i::AbstractVector) = [(u.indices[i], u.values[i]) for i in 1:length(u)] Base.getindex(u::UncertainIndexValueDataset, i::Colon) = [(u.indices[i], u.values[i]) for i in 1:length(u)] Base.firstindex(u::UncertainIndexValueDataset) = 1 Base.lastindex(u::UncertainIndexValueDataset) = length(u.values) Base.eachindex(u::UncertainIndexValueDataset) = Base.OneTo(length(u)) Base.iterate(u::UncertainIndexValueDataset, state = 1) = iterate((u.indices, u.values), state) index(u::UncertainIndexValueDataset, i) = u.indices[i] value(u::UncertainIndexValueDataset, i) = u.values[i] export UncertainIndexValueDataset, index, value
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
840
""" UncertainValueDataset A dataset of uncertain values. ## Fields - **`values::AbstractVector{<:AbstractUncertainValue}`**: The uncertain values. Each value is represented by an `AbstractUncertainValue`. """ struct UncertainValueDataset <: AbstractUncertainValueDataset values::AbstractVector{<:AbstractUncertainValue} end """ ConstrainedUncertainValueDataset Generic constrained dataset containing uncertain values. ## Fields - **`values::AbstractVector{<:AbstractUncertainValue}`**: The uncertain values. """ struct ConstrainedUncertainValueDataset <: AbstractUncertainValueDataset values::AbstractVector{<:AbstractUncertainValue} end function UncertainValueDataset(x::AbstractArray{T, 1}) where T UncertainValueDataset(CertainValue.(x)) end export UncertainValueDataset, ConstrainedUncertainValueDataset
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2365
promote_rule(::Type{UncertainDataset}, ::Type{UncertainValueDataset}) = UncertainValueDataset promote_rule(::Type{UncertainValueDataset}, ::Type{UncertainDataset}) = UncertainValueDataset promote_rule(::Type{UncertainDataset}, ::Type{UncertainIndexDataset}) = UncertainValueDataset promote_rule(::Type{UncertainIndexDataset}, ::Type{UncertainDataset}) = UncertainValueDataset promote_rule(::Type{UncertainValueDataset}, ::Type{UncertainIndexDataset}) = UncertainValueDataset promote_rule(::Type{UncertainIndexDataset}, ::Type{UncertainValueDataset}) = UncertainValueDataset convert(::Type{UncertainValueDataset}, udata::T) where {T <: AbstractUncertainValueDataset} = UncertainValueDataset(udata.values) convert(::Type{UncertainValueDataset}, udata::UncertainIndexDataset) = UncertainValueDataset(udata.indices) convert(::Type{UncertainIndexDataset}, udata::T) where {T <: AbstractUncertainValueDataset} = UncertainIndexDataset(udata.values) convert(::Type{UncertainIndexDataset}, udata::UncertainIndexDataset) = UncertainIndexDataset(udata.indices) convert(::Type{UncertainDataset}, udata::T) where {T <: AbstractUncertainValueDataset} = UncertainDataset(udata.values) convert(::Type{UncertainDataset}, udata::UncertainIndexDataset) = UncertainDataset(udata.indices) # Converting vectors of any input to uncertain datasets function convert(::Type{UncertainValueDataset}, uvec::AbstractVector) uvals = [UncertainValue(x) for x in uvec] UncertainValueDataset(uvals) end function convert(::Type{UncertainValueDataset}, uvec::AbstractVector{T}) where {T<:AbstractUncertainValue} UncertainValueDataset(uvec) end UncertainValueDataset(udata::UncertainDataset) = convert(UncertainValueDataset, udata) UncertainValueDataset(udata::UncertainIndexDataset) = convert(UncertainValueDataset, udata) UncertainValueDataset(udata::UncertainValueDataset) = udata UncertainIndexDataset(udata::UncertainDataset) = convert(UncertainIndexDataset, udata) UncertainIndexDataset(udata::UncertainIndexDataset) = udata UncertainIndexDataset(udata::UncertainValueDataset) = convert(UncertainIndexDataset, udata) UncertainDataset(udata::UncertainDataset) = udata UncertainDataset(udata::UncertainIndexDataset) = convert(UncertainDataset, udata) UncertainDataset(udata::UncertainValueDataset) = convert(UncertainDataset, udata)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
188
struct UncertainAgeValueDataset <: UncertainIndexValueDataset indices::Vector{AbstractUncertainValue} values::Vector{AbstractUncertainValue} end export UncertainAgeValueDataset
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
192
struct UncertainDepthValueDataset <: UncertainIndexValueDataset indices::Vector{AbstractUncertainValue} values::Vector{AbstractUncertainValue} end export UncertainDepthValueDataset
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2529
""" CertainValue A simple wrapper type for values with no uncertainty (i.e. represented by a scalar). ## Examples The two following ways of constructing values without uncertainty are equivalent. ```julia u1, u2 = CertainValue(2.2), CertainValue(6) w1, w2 = UncertainValue(2.2), UncertainValue(6) ``` """ struct CertainValue{T} <: AbstractUncertainValue value::T end Broadcast.broadcastable(x::CertainValue) = Ref(x.value) function summarise(uval::CertainValue) _type = typeof(uval) val = uval.value "$_type($val)" end Base.show(io::IO, uval::CertainValue) = print(io, summarise(uval)) eltype(v::CertainValue{T}) where {T} = T Base.size(x::CertainValue) = () Base.size(x::CertainValue,d) = convert(Int,d)<1 ? throw(BoundsError()) : 1 Base.axes(x::CertainValue) = () Base.axes(x::CertainValue,d) = convert(Int,d)<1 ? throw(BoundsError()) : Base.OneTo(1) Base.ndims(x::CertainValue) = 0 Base.ndims(::Type{<:CertainValue}) = 0 Base.length(x::CertainValue) = 1 Base.firstindex(x::CertainValue) = 1 Base.lastindex(x::CertainValue) = 1 Base.IteratorSize(::Type{<:CertainValue}) = Base.HasShape{0}() Base.keys(::CertainValue) = Base.OneTo(1) Base.getindex(x::CertainValue) = x function Base.getindex(x::CertainValue, i::Integer) Base.@_inline_meta @boundscheck i == 1 || throw(BoundsError()) x end function Base.getindex(x::CertainValue, I::Integer...) Base.@_inline_meta @boundscheck all([i == 1 for i in I]) || throw(BoundsError()) x end Base.first(x::CertainValue) = x Base.last(x::CertainValue) = x Base.copy(x::CertainValue) = x Base.minimum(v::CertainValue) = v.value Base.maximum(v::CertainValue) = v.value Base.isnan(x::CertainValue) = Base.isnan(x.value) Base.abs2(x::CertainValue) = Base.abs2(x.value) StatsBase.mean(v::CertainValue) = v.value StatsBase.median(v::CertainValue) = v.value StatsBase.middle(v::CertainValue) = v.value StatsBase.quantile(v::CertainValue, q) = v.value StatsBase.quantile(v::CertainValue, q, n::Int) = v.value StatsBase.std(v::CertainValue{T}) where {T} = zero(T) Base.rand(v::CertainValue) = v.value Base.rand(v::CertainValue{T}, n::Int) where T = repeat([v.value], n) Base.float(v::CertainValue) = float(v.value) function Base.:<(x::CertainValue{T1}, y::CertainValue{T2}) where { T1 <: Real, T2 <: Real} x.value < y.value end function IntervalArithmetic.interval(x::CertainValue{T1}, y::CertainValue{T2}) where { T1 <: Real, T2 <: Real} interval(x.value, y.value) end export CertainValue, UncertainValue
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
5295
import IntervalArithmetic: interval import Distributions import StatsBase const POTENTIAL_UVAL_TYPES = Union{T1, T2} where {T1<:Number, T2} where T2 <: AbstractUncertainValue """ UncertainScalarPopulation(values, probs) UncertainScalarPopulation(values, probs::Vector{Number}) UncertainScalarPopulation(values, probs::Statsbase.AbstractWeights) An `UncertainScalarPopulation`, which consists of some population members (`values`) and some weights (`probs`) that indicate the relative importance of the population members (for example during resampling). ## Fields - **`values`**: The members of the population. Can be either numerical values, any type of uncertain value defined in this package (including populations), and `Measurement` instances from Measurements.jl. - **`probs`**: The probabilities of sampling each member of the population. ## Constructors - If `values` contains only scalar numeric values, then the `values` field will be of type `Vector{Number}`. - If `values` contains one or more uncertain values, then the `values` field will be of type `Vector{AbstractUncertainValue}` ## Example ```julia # Uncertain population consisting of CertainValues (scalars get promoted to # CertainValue), theoretical distributions and KDE distributions pop1 = UncertainScalarPopulation( [3.0, UncertainValue(Normal, 0, 1), UncertainValue(Gamma, 2, 3), UncertainValue(Uniform, rand(1000))], [0.5, 0.5, 0.5, 0.5]) # Uncertain population consisting of scalar values pop2 = UncertainScalarPopulation([1, 2, 3], rand(3)) pop3 = UncertainScalarPopulation([1, 2, 3], Weights(rand(3))) # Uncertain population consisting of uncertain populations pop4 = UncertainScalarPopulation([pop1, pop2], [0.1, 0.5]) # Uncertain population consisting of uncertain populations, a scalar and # a normal distribution. Assign random weights. vals = [pop1, pop2, 2, UncertainValue(Normal, 0.3, 0.014)] pop5 = UncertainScalarPopulation(vals, Weights(rand(4))) ``` """ struct UncertainScalarPopulation{T, PW <: StatsBase.AbstractWeights} <: AbstractScalarPopulation{T, PW} values::Vector{T} probs::PW end """ UncertainScalarPopulation(values::Vector, probabilities::Vector{Float64}) Construct a population from a vector of values and a vector of probabilities associated to those values.""" function UncertainScalarPopulation(values::Vector{T1}, probabilities::Vector{T2}) where {T1 <: Number, T2 <: Number} if length(values) != length(probabilities) throw(ArgumentError("Lengths of values and probability vectors do not match.")) end UncertainScalarPopulation(values, StatsBase.weights(probabilities)) end function UncertainScalarPopulation(values::VT, probabilities) where VT <: Vector{ELTYPE} where {ELTYPE<:POTENTIAL_UVAL_TYPES} if length(values) != length(probabilities) throw(ArgumentError("Lengths of values and probability vectors do not match.")) end UncertainScalarPopulation(UncertainValue.(values), StatsBase.weights(probabilities)) end """ ConstrainedUncertainScalarPopulation(values, probs) ConstrainedUncertainScalarPopulation(values, probs::Vector{Number}) ConstrainedUncertainScalarPopulation(values, probs::Statsbase.AbstractWeights) A `ConstrainedUncertainScalarPopulation`, which consists of some population members (`values`)and some weights (`probs`) that indicate the relative importance of the population members (for example during resampling). The uncertain values for this type is meant to consist of constrained uncertain values (generated by calling `constrain(uval, sampling_constraint`) on them. This is just a convenience type to indicate that the population has been constrained. It behaves identically to `UncertainScalarPopulation`. There are different constructors for different types of `values`: - If `values` contains only scalar numeric values, then the `values` field will be of type `Vector{Number}`. - If `values` contains one or more uncertain values, then the `values` field will be of type `Vector{AbstractUncertainValue}` """ struct ConstrainedUncertainScalarPopulation{T, PW <: StatsBase.AbstractWeights} <: AbstractScalarPopulation{T, PW} values::Vector{T} probs::PW end """ ConstrainedUncertainScalarPopulation(values::Vector, probabilities::Vector{Float64}) Construct a constrained population from a vector of values and a vector of probabilities associated to those values. """ function ConstrainedUncertainScalarPopulation(values::Vector{T1}, probabilities::Vector{T2}) where {T1 <: Number, T2 <: Number} if length(values) != length(probabilities) throw(ArgumentError("Lengths of values and probability vectors do not match.")) end ConstrainedUncertainScalarPopulation(float.(values), StatsBase.weights(probabilities)) end function ConstrainedUncertainScalarPopulation(values::VT, probabilities) where VT <: Vector{ELTYPE} where {ELTYPE<:POTENTIAL_UVAL_TYPES} if length(values) != length(probabilities) throw(ArgumentError("Lengths of values and probability vectors do not match.")) end ConstrainedUncertainScalarPopulation(UncertainValue.(values), StatsBase.weights(probabilities)) end export UncertainScalarPopulation, ConstrainedUncertainScalarPopulation
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
3911
import Base.rand import StatsBase.quantile import StatsBase.median import Distributions.support import Distributions.ecdf import Base: minimum, maximum, max, min """ UncertainScalarKDE(d::KernelDensity.UnivariateKDE, values::AbstractVector{T}, range, pdf) An empirical value represented by a distribution estimated from actual data. ## Fields - **`distribution`**: The `UnivariateKDE` estimate for the distribution of `values`. - **`values`**: The values from which `distribution` is estimated. - **`range`**: The values for which the pdf is estimated. - **`pdf`**: The values of the pdf at each point in `range`. """ struct UncertainScalarKDE{T} <: AbstractUncertainScalarKDE{T} distribution::KernelDensity.UnivariateKDE values::AbstractVector{T} range pdf::StatsBase.Weights end """ TruncatedUncertainScalarKDE A truncated [`UncertainScalarKDE`](@ref). """ struct TruncatedUncertainScalarKDE{T} <: AbstractUncertainScalarKDE{T} distribution::KernelDensity.UnivariateKDE values::AbstractVector{T} range pdf::StatsBase.Weights end function summarise(uv::AbstractUncertainScalarKDE{T}) where {T} dist = uv.distribution range = uv.range dist = typeof(uv.distribution) _type = typeof(uv) "$_type($dist, range = $range)" end Base.show(io::IO, uv::AbstractUncertainScalarKDE{T}) where {T} = print(io, summarise(uv)) """ rand(uv::AbstractUncertainScalarKDE) Sample a random number from an uncertain value represented by a kernel density estimate. """ function rand(uv::AbstractUncertainScalarKDE) # Box width δ = step(uv.range) # Sample a box sampled_val = sample(uv.range, uv.pdf) # Sample uniformly from within the box rand(Uniform(sampled_val, sampled_val + δ)) end """ rand(uv::AbstractUncertainScalarKDE, n::Int) Sample a random number from an uncertain value represented by a kernel density estimate. """ function rand(uv::AbstractUncertainScalarKDE, n::Int) # Box width δ = step(uv.range) # Sample n boxes according to estimated pdf sampled_vals = Vector{Float64}(undef, n) sample!(uv.range, uv.pdf, sampled_vals) # Sample uniformly from within each box [rand(Uniform(sampled_vals[i], sampled_vals[i] + δ)) for i = 1:n] end """ ecdf(uv::AbstractUncertainScalarKDE) Empirical cumulative distribution function for an uncertain value approximated by kernel density estimation. """ ecdf(uv::AbstractUncertainScalarKDE) = cumsum(uv.pdf) """ The mode (most likely value) of an uncertain value represented by a kernel density estimate. """ mode(uv::AbstractUncertainScalarKDE) = uv.range(findmax(uv.distribution.density)[2]) """ quantile(uv::UncertainScalarKDE, q) Return the `q`-th quantile of the distribution furnishing the uncertain value. """ function quantile(uv::AbstractUncertainScalarKDE{T}, q) where T uv.range[findfirst(ecdf(uv) .> q)] end median(uv::AbstractUncertainScalarKDE{T}) where T = quantile(uv, 0.5) """ support(uv::UncertainScalarKDE) Return the support of an uncertain value furnished by a kernel density estimate. """ support(uv::AbstractUncertainScalarKDE{T}) where T = interval(minimum(uv.range), maximum(uv.range)) """ getquantileindex(uv::AbstractUncertainScalarKDE, q::Float64) Return the index of the range/density value corresponding to the `q`-th quantile of an uncertain value furnished by a kernel density estimate. """ function getquantileindex(uv::AbstractUncertainScalarKDE{T}, q::Float64) where T findfirst(ecdf(uv) .> q) end minimum(uv::AbstractUncertainScalarKDE) = minimum(uv.range) maximum(uv::AbstractUncertainScalarKDE) = maximum(uv.range) min(uv::AbstractUncertainScalarKDE) = minimum(uv.range) max(uv::AbstractUncertainScalarKDE) = maximum(uv.range) export AbstractUncertainScalarKDE, UncertainScalarKDE ecdf, support, getquantileindex, UnivariateKDE, minimum, maximum
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
9413
""" struct ConstrainedUncertainScalarValueOneParameter{S, T1 <: Number} distribution::Distribution{Univariate, S} a::T1 end A constrained uncertain value represented by a one-parameter distribution, where the original distribution has been truncated. ## Fields: - **`distribution`**: The truncated version of the original distribution. - **`a`**: The original value of the parameter of the original distribution. """ struct ConstrainedUncertainScalarValueOneParameter{S, T1 <: Number} <: AbstractUncertainOneParameterScalarValue{S, T1} distribution::Distribution{Univariate, S} a::T1 end """ struct ConstrainedUncertainScalarValueTwoParameter{S, T1 <: Number, T2 <: Number} distribution::Distribution{Univariate, S} a::T1 b::T2 end A constrained uncertain value represented by a two-parameter distribution, where the original distribution has been truncated. ## Fields: - **`distribution`**: The truncated version of the original distribution. - **`a`**: The original value of the first parameter of the original distribution. - **`b`**: The original value of the second parameter of the original distribution. """ struct ConstrainedUncertainScalarValueTwoParameter{S, T1 <: Number, T2 <: Number} <: AbstractUncertainTwoParameterScalarValue{S, T1, T2} distribution::Distribution{Univariate, S} a::T1 b::T2 end """ struct ConstrainedUncertainScalarValueThreeParameter{S, T1 <: Number, T2 <: Number, T3 <: Number} distribution::Distribution{Univariate, S} a::T1 b::T2 c::T3 end A constrained uncertain value represented by a two-parameter distribution, where the original distribution has been truncated. ## Fields: - **`distribution`**: The truncated version of the original distribution. - **`a`**: The original value of the first parameter of the original distribution. - **`b`**: The original value of the second parameter of the original distribution. - **`c`**: The original value of the third parameter of the original distribution. """ struct ConstrainedUncertainScalarValueThreeParameter{S, T1 <: Number, T2 <: Number, T3 <: Number} <: AbstractUncertainThreeParameterScalarValue{S, T1, T2, T3} distribution::Distribution{Univariate, S} a::T1 b::T2 c::T3 end import Distributions.Normal import Distributions.Uniform import Distributions.Beta import Distributions.BetaPrime import Distributions.BetaBinomial import Distributions.Gamma import Distributions.Frechet """ Uncertain value represented by a generic three-parameter distribution. """ struct UncertainScalarTheoreticalThreeParameter{S<:ValueSupport, T1<:Number, T2<:Number, T3<:Number} <: AbstractUncertainThreeParameterScalarValue{S, T1, T2, T3} distribution::Distribution{Univariate, S} a::T1 b::T2 c::T3 end """ Uncertain value represented by a generic two-parameter distribution. """ struct UncertainScalarTheoreticalTwoParameter{S<:ValueSupport, T1<:Number, T2<:Number} <: AbstractUncertainTwoParameterScalarValue{S, T1, T2} distribution::Distribution{Univariate, S} a::T1 b::T2 end """ Uncertain value represented by a generic one-parameter distribution. """ struct UncertainScalarGenericOneParameter{S<:ValueSupport, T1<:Number} <: AbstractUncertainOneParameterScalarValue{S, T1} distribution::Distribution{Univariate, S} a::T1 end """ Uncertain value represented by a normal distribution. """ struct UncertainScalarNormallyDistributed{S<:ValueSupport, T1<:Number, T2<:Number} <: AbstractUncertainTwoParameterScalarValue{S, T1, T2} distribution::Distribution{Univariate, S} μ::T1 σ::T2 end """ Uncertain value represented by a uniform distribution. """ struct UncertainScalarUniformlyDistributed{S<:ValueSupport, T1<:Number, T2<:Number} <: AbstractUncertainTwoParameterScalarValue{S, T1, T2} distribution::Distribution{Univariate, S} lower::T1 upper::T2 end """ Uncertain value represented by a beta distribution. """ struct UncertainScalarBetaDistributed{S<:ValueSupport, T1<:Number, T2<:Number} <: AbstractUncertainTwoParameterScalarValue{S, T1, T2} distribution::Distribution{Univariate, S} α::T1 β::T2 end """ Uncertain value represented by a beta prime distribution. """ struct UncertainScalarBetaPrimeDistributed{S<:ValueSupport, T1<:Number, T2<:Number} <: AbstractUncertainTwoParameterScalarValue{S, T1, T2} distribution::Distribution{Univariate, S} α::T1 β::T2 end """ Uncertain value represented by a beta binomial distribution. """ struct UncertainScalarBetaBinomialDistributed{S<:ValueSupport, T1<:Number, T2<:Number, T3<:Number} <: AbstractUncertainThreeParameterScalarValue{S, T1, T2, T3} distribution::Distribution{Univariate, S} n::T1 α::T2 β::T3 end """ Uncertain value represented by a gamma distribution. """ struct UncertainScalarGammaDistributed{S<:ValueSupport, T1<:Number, T2<:Number} <: AbstractUncertainTwoParameterScalarValue{S, T1, T2} distribution::Distribution{Univariate, S} α::T1 θ::T2 end """ Uncertain value represented by a Fréchet distribution. """ struct UncertainScalarFrechetDistributed{S<:ValueSupport, T1<:Number, T2<:Number} <: AbstractUncertainTwoParameterScalarValue{S, T1, T2} distribution::Distribution{Univariate, S} α::T1 θ::T2 end """ Uncertain value represented by a binomial distribution. """ struct UncertainScalarBinomialDistributed{S<:ValueSupport, T1<:Number, T2<:Number} <: AbstractUncertainTwoParameterScalarValue{S, T1, T2} distribution::Distribution{Univariate, S} n::T1 p::T2 end ################### # Pretty printing ################### function summarise(o::AbstractUncertainTwoParameterScalarValue) a = @sprintf "%.3f" o.a b = @sprintf "%.3f" o.b dist = o.distribution _type = typeof(o) "$_type($a, $b, $dist)" end Base.show(io::IO, q::AbstractUncertainTwoParameterScalarValue) = print(io, summarise(q)) function summarise(o::AbstractUncertainThreeParameterScalarValue) a = @sprintf "%.3f" o.a b = @sprintf "%.3f" o.b c = @sprintf "%.3f" o.c dist = o.distribution _type = typeof(o) "$_type($a, $b, $c, $dist)" end Base.show(io::IO, q::AbstractUncertainThreeParameterScalarValue) = print(io, summarise(q)) function summarise(o::UncertainScalarNormallyDistributed) μ = @sprintf "%.3f" o.μ σ = @sprintf "%.3f" o.σ dist = o.distribution _type = typeof(o) "$_type(μ = $μ, σ = $σ)" end Base.show(io::IO, q::UncertainScalarNormallyDistributed) = print(io, summarise(q)) function summarise(o::UncertainScalarUniformlyDistributed) lower = @sprintf "%.3f" o.lower upper = @sprintf "%.3f" o.upper dist = o.distribution _type = typeof(o) "$_type(lower = $lower, upper = $upper)" end Base.show(io::IO, q::UncertainScalarUniformlyDistributed) = print(io, summarise(q)) function summarise(o::UncertainScalarBetaDistributed) α = @sprintf "%.3f" o.α β = @sprintf "%.3f" o.β dist = o.distribution _type = typeof(o) "$_type(α = $α, β = $β)" end Base.show(io::IO, q::UncertainScalarBetaDistributed) = print(io, summarise(q)) function summarise(o::UncertainScalarBetaPrimeDistributed) α = @sprintf "%.3f" o.α β = @sprintf "%.3f" o.β dist = o.distribution _type = typeof(o) "$_type(α = $α, β = $β)" end Base.show(io::IO, q::UncertainScalarBetaPrimeDistributed) = print(io, summarise(q)) function summarise(o::UncertainScalarBetaBinomialDistributed) n = @sprintf "%.3f" o.n α = @sprintf "%.3f" o.α β = @sprintf "%.3f" o.β dist = o.distribution _type = typeof(o) "$_type(n = $n, α = $α, β = $β)" end Base.show(io::IO, q::UncertainScalarBetaBinomialDistributed) = print(io, summarise(q)) function summarise(o::UncertainScalarGammaDistributed) α = @sprintf "%.3f" o.α θ = @sprintf "%.3f" o.θ dist = o.distribution _type = typeof(o) "$_type(α = $α, θ = $θ)" end Base.show(io::IO, q::UncertainScalarGammaDistributed) = print(io, summarise(q)) function summarise(o::UncertainScalarFrechetDistributed) α = @sprintf "%.3f" o.α θ = @sprintf "%.3f" o.θ dist = o.distribution _type = typeof(o) "$_type(α = $α, θ = $θ)" end Base.show(io::IO, q::UncertainScalarFrechetDistributed) = print(io, summarise(q)) function summarise(o::UncertainScalarBinomialDistributed) n = @sprintf "%.3f" o.n p = @sprintf "%.3f" o.p dist = o.distribution _type = typeof(o) "$_type(n = $n, p = $p)" end Base.show(io::IO, q::UncertainScalarBinomialDistributed) = print(io, summarise(q)) export TheoreticalDistributionScalarValue, AbstractUncertainOneParameterScalarValue, AbstractUncertainTwoParameterScalarValue, AbstractUncertainThreeParameterScalarValue, ConstrainedUncertainScalarValueOneParameter, ConstrainedUncertainScalarValueTwoParameter, ConstrainedUncertainScalarValueThreeParameter, UncertainScalarGenericOneParameter, UncertainScalarTheoreticalTwoParameter, UncertainScalarTheoreticalThreeParameter, UncertainScalarNormallyDistributed, UncertainScalarUniformlyDistributed, UncertainScalarBetaDistributed, UncertainScalarBetaPrimeDistributed, UncertainScalarBetaBinomialDistributed, UncertainScalarBinomialDistributed, UncertainScalarGammaDistributed, UncertainScalarFrechetDistributed, Normal, Uniform, Beta, BetaPrime, BetaBinomial, Gamma, Frechet, Binomial
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2381
import Distributions import Statistics abstract type TheoreticalFittedUncertainScalar <: TheoreticalDistributionScalarValue end Broadcast.broadcastable(uv::TheoreticalFittedUncertainScalar) = Ref(uv.distribution) """ UncertainScalarTheoreticalFit An empirical value represented by a distribution estimated from actual data. ## Fields - **`distribution`** The distribution describing the value. - **`values`**: The values from which `distribution` is estimated. """ struct UncertainScalarTheoreticalFit{D <: Distribution, T} <: TheoreticalFittedUncertainScalar distribution::FittedDistribution{D} # S may be Continuous or Discrete values::AbstractVector{T} end """ ConstrainedUncertainScalarTheoreticalFit An empirical value represented by a distribution estimated from actual data. ## Fields - **`distribution`** The truncated version of the distribution describing the value. - **`values`**: The values from which the original distribution was estimated. """ struct ConstrainedUncertainScalarTheoreticalFit{D <: Distribution, T} <: TheoreticalFittedUncertainScalar distribution::FittedDistribution{D} # S may be Continuous or Discrete values::AbstractVector{T} end """ Truncate a fitted distribution. """ Distributions.truncated(fd::FittedDistribution, lower, upper) = Distributions.truncated(fd.distribution, lower, upper) Base.rand(fd::UncertainScalarTheoreticalFit) = rand(fd.distribution.distribution) Base.rand(fd::UncertainScalarTheoreticalFit, n::Int) = rand(fd.distribution.distribution, n) # For the fitted distributions, we need to access the fitted distribution's distribution Distributions.pdf(fd::UncertainScalarTheoreticalFit, x) = pdf(fd.distribution.distribution, x) StatsBase.mode(uv::UncertainScalarTheoreticalFit) = mode(uv.distribution.distribution) Statistics.mean(uv::UncertainScalarTheoreticalFit) = mean(uv.distribution.distribution) Statistics.median(uv::UncertainScalarTheoreticalFit) = median(uv.distribution.distribution) Statistics.quantile(uv::UncertainScalarTheoreticalFit, q) = quantile(uv.distribution.distribution, q) Statistics.std(uv::UncertainScalarTheoreticalFit) = std(uv.distribution.distribution) Statistics.var(uv::UncertainScalarTheoreticalFit) = var(uv.distribution.distribution) export TheoreticalFittedUncertainScalar, UncertainScalarTheoreticalFit, ConstrainedUncertainScalarTheoreticalFit
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
16000
import KernelDensity.UnivariateKDE import Distributions.Distribution import StatsBase: AbstractWeights, Weights import Distributions """ UncertainValue(x::T) where T <: Real Create a `CertainValue` instance from a scalar with no uncertainty. """ UncertainValue(x::T) where T <: Real = CertainValue(x) # Identity constructor UncertainValue(uval::AbstractUncertainValue) = uval # From Measurements.jl UncertainValue(m::Measurement{T}) where T = UncertainValue(Normal, m.val, m.err) """ UncertainValue(values::Vector{<:Number}, probs::Vector{<:Number}) From a numeric vector, construct an `UncertainPopulation` whose members are scalar values. """ function UncertainValue(values::Vector{<:Number}, probs::Vector{<:Number}) UncertainScalarPopulation(float.(values), probs) end """ UncertainValue(values::Vector{<:Number}, probs::Vector{<:Number}) From a numeric vector, construct an `UncertainPopulation` whose members are scalar values. """ function UncertainValue(values::Vector{<:Number}, probs::W) where {W <: AbstractWeights} UncertainScalarPopulation(float.(values), probs) end """ UncertainValue(values::Vector, probs::Union{Vector, AbstractWeights}) Construct a population whose members are given by `values` and whose sampling probabilities are given by `probs`. The elements of `values` can be either numeric or uncertain values of any type. """ function UncertainValue(values::VT, probs) where VT <: Vector{ELTYPE} where {ELTYPE<:POTENTIAL_UVAL_TYPES} UncertainScalarPopulation(UncertainValue.(values), probs) end function UncertainValue(values::VT, probs::Vector{Number}) where VT <: Vector{ELTYPE} where {ELTYPE<:POTENTIAL_UVAL_TYPES} UncertainScalarPopulation(UncertainValue.(values), probs) end """ UncertainValue(data::Vector{T}; kernel::Type{D} = Normal, npoints::Int=2048) where {D <: Distributions.Distribution, T} Construct an uncertain value by a kernel density estimate to `data`. Fast Fourier transforms are used in the kernel density estimation, so the number of points should be a power of 2 (default = 2048). """ function UncertainValue(data::Vector{T}; kernel::Type{D} = Normal, bandwidth = KernelDensity.default_bandwidth(data), npoints::Int = 2048) where {D <: Distributions.Distribution, T} # Kernel density estimation KDE = kde(data, npoints = npoints, kernel = kernel, bandwidth = bandwidth) # Get the x value for which the density is estimated. xrange = KDE.x # Normalise estimated density density = KDE.density ./ sum(KDE.density) # Create an uncertain value UncertainScalarKDE(KDE, data, xrange, Weights(density)) end """ UncertainValue(kerneldensity::Type{K}, data::Vector{T}; kernel::Type{D} = Normal, npoints::Int=2048) where {K <: UnivariateKDE, D <: Distribution, T} Construct an uncertain value by a kernel density estimate to `data`. Fast Fourier transforms are used in the kernel density estimation, so the number of points should be a power of 2 (default = 2048). """ function UncertainValue(kerneldensity::Type{K}, data::Vector{T}; kernel::Type{D} = Normal, bandwidth = KernelDensity.default_bandwidth(data)/4, npoints::Int = 2048) where {K <: UnivariateKDE, D <: Distribution, T} # Kernel density estimation KDE = kde(data, npoints = npoints, kernel = kernel, bandwidth = bandwidth) # Get the x value for which the density is estimated. xrange = KDE.x # Normalise estimated density density = KDE.density ./ sum(KDE.density) # Create an uncertain value UncertainScalarKDE(KDE, data, xrange, Weights(density)) end # For vectors of zero-dimensional arrays. UncertainValue(x::Vector{Array{<:Real, 0}}) = UncertainValue([el[] for el in x]) """ UncertainValue(empiricaldata::AbstractVector{T}, d::Type{D}) where {D <: Distribution} # Constructor for empirical distributions. Fit a distribution of type `d` to the data and use that as the representation of the empirical distribution. Calls `Distributions.fit` behind the scenes. ## Arguments - **`empiricaldata`**: The data for which to fit the `distribution`. - **`distribution`**: A valid univariate distribution from `Distributions.jl`. """ function UncertainValue(d::Type{D}, empiricaldata::Vector{T}) where {D<:Distribution, T} distribution = FittedDistribution(Distributions.fit(d, empiricaldata)) UncertainScalarTheoreticalFit(distribution, empiricaldata) end """ UncertainValue(distribution::Type{D}, a::T1, b::T2; kwargs...) where {T1<:Number, T2 <: Number, D<:Distribution} # Constructor for two-parameter distributions `UncertainValue`s are currently implemented for the following two-parameter distributions: `Uniform`, `Normal`, `Binomial`, `Beta`, `BetaPrime`, `Gamma`, and `Frechet`. ### Arguments - **`a`, `b`**: Generic parameters whose meaning varies depending on what `distribution` is provided. See the list below. - **`distribution`**: A valid univariate distribution from `Distributions.jl`. Precisely what `a` and `b` are depends on which distribution is provided. - `UncertainValue(Normal, μ, σ)` returns an `UncertainScalarNormallyDistributed` instance. - `UncertainValue(Uniform, lower, upper)` returns an `UncertainScalarUniformlyDistributed` instance. - `UncertainValue(Beta, α, β)` returns an `UncertainScalarBetaDistributed` instance. - `UncertainValue(BetaPrime, α, β)` returns an `UncertainScalarBetaPrimeDistributed` instance. - `UncertainValue(Gamma, α, θ)` returns an `UncertainScalarGammaDistributed` instance. - `UncertainValue(Frechet, α, θ)` returns an `UncertainScalarFrechetDistributed` instance. - `UncertainValue(Binomial, n, p)` returns an `UncertainScalarBinomialDistributed` instance. ### Keyword arguments - **`nσ`**: If `distribution <: Distributions.Normal`, then how many standard deviations away from `μ` does `lower` and `upper` (i.e. both, because they are the same distance away from `μ`) represent? - **`tolerance`**: A threshold determining how symmetric the uncertainties must be in order to allow the construction of Normal distribution (`upper - lower > threshold` is required). - **`trunc_lower`**: Lower truncation bound for distributions with infinite support. Defaults to `-Inf`. - **`trunc_upper`**: Upper truncation bound for distributions with infinite support. Defaults to `Inf`. ## Examples ### Normal distribution Normal distributions are formed by using the constructor `UncertainValue(μ, σ, Normal; kwargs...)`. This gives a normal distribution with mean μ and standard deviation σ/nσ (nσ must be given as a keyword argument). ```julia # A normal distribution with mean = 2.3 and standard deviation 0.3. UncertainValue(2.3, 0.3, Normal) # A normal distribution with mean 2.3 and standard deviation 0.3/2. UncertainValue(2.3, 0.3, Normal, nσ = 2) # A normal distribution with mean 2.3 and standard deviation = 0.3, truncated to the interval `[1, 3]`. UncertainValue(2.3, 0.3, Normal, trunc_lower = 1.0, trunc_upper = 3.0) ``` ### Uniform distribution Uniform distributions are formed using the `UncertainValue(lower, upper, Uniform)` constructor. ```julia # A uniform distribution on `[2, 3]` UncertainValue(-2, 3, Uniform) ``` """ function UncertainValue(distribution::Type{D}, a::T1, b::T2; kwargs...) where {T1<:Number, T2 <: Number, D<:Distribution} if distribution == Uniform dist = assigndist_uniform(a, b) UncertainScalarUniformlyDistributed(dist, a, b) elseif distribution == Binomial dist = assigndist_binomial(a, b) UncertainScalarBinomialDistributed(dist, a, b) elseif distribution == Normal dist = assigndist_normal(a, b; kwargs...) UncertainScalarNormallyDistributed(dist, a, b) elseif distribution == Beta dist = assigndist_beta(a, b; kwargs...) UncertainScalarBetaDistributed(dist, a, b) elseif distribution == BetaPrime dist = assigndist_betaprime(a, b; kwargs...) UncertainScalarBetaPrimeDistributed(dist, a, b) elseif distribution == Gamma if !((a > 0) & (b > 0)) error("α and θ must both be > 0") end dist = assigndist_gamma(a, b; kwargs...) UncertainScalarGammaDistributed(dist, a, b) elseif distribution == Frechet dist = assigndist_frechet(a, b; kwargs...) UncertainScalarFrechetDistributed(dist, a, b) else throw(DomainError("Two-parameter $dist is not implemented.")) end end """ UncertainValue(distribution::Type{D}, a::T1, b::T2, c::T3; kwargs...) where {T1<:Number, T2<:Number, T3<:Number, D<:Distribution} ## Constructor for three-parameter distributions Currently implemented distributions are `BetaBinomial`. ### Arguments - **`a`, `b`, `c`**: Generic parameters whose meaning varies depending on what `distribution` is provided. See the list below. - **`distribution`**: A valid univariate distribution from `Distributions.jl`. Precisely what `a`, `b` and `c` are depends on which distribution is provided. - `UncertainValue(BetaBinomial, n, α, β)` returns an `UncertainScalarBetaBinomialDistributed` instance. ### Keyword arguments - **`nσ`**: If `distribution <: Distributions.Normal`, then how many standard deviations away from `μ` does `lower` and `upper` (i.e. both, because they are the same distance away from `μ`) represent? - **`tolerance`**: A threshold determining how symmetric the uncertainties must be in order to allow the construction of Normal distribution (`upper - lower > threshold` is required). - **`trunc_lower`**: Lower truncation bound for distributions with infinite support. Defaults to `-Inf`. - **`trunc_upper`**: Upper truncation bound for distributions with infinite support. Defaults to `Inf`. ## Examples ### BetaBinomial distribution Normal distributions are formed by using the constructor `UncertainValue(μ, σ, Normal; kwargs...)`. This gives a normal distribution with mean μ and standard deviation σ/nσ (nσ must be given as a keyword argument). ```julia # A beta binomial distribution with n = 100 trials and parameters α = 2.3 and # β = 5 UncertainValue(100, 2.3, 5, BetaBinomial) ``` """ function UncertainValue(distribution::Type{D}, a::T1, b::T2, c::T3; kwargs...) where {T1<:Number, T2<:Number, T3<:Number, D<:Distribution} if distribution == BetaBinomial dist = assigndist_betabinomial(a, b, c; kwargs...) UncertainScalarBetaBinomialDistributed(dist, a, b, c) else throw(DomainError("Three-parameter $dist is not implemented.")) end end """ nested_disttype(t::Distributions.Truncated) Get the type of the untruncated distribution for a potentially nested truncated distribution. """ function untruncated_dist(t::Distributions.Truncated) t_untrunc = t while typeof(t_untrunc) <: Distributions.Truncated t_untrunc = t_untrunc.untruncated end return t_untrunc end """ untruncated_disttype(t::Distributions.Truncated) Get the type of the untruncated distribution for a potentially nested truncated distribution. """ function untruncated_disttype(t::Distributions.Truncated) t_untrunc = t while typeof(t_untrunc) <: Distributions.Truncated t_untrunc = t_untrunc.untruncated end return typeof(t_untrunc) end """ UncertainValue(t::Distributions.Truncated) Construct an uncertain value from an instance of a distribution. If a specific uncertain value type has not been implemented, the number of parameters is determined from the distribution and an instance of one of the following types is returned: - `ConstrainedUncertainScalarValueOneParameter` - `ConstrainedUncertainScalarValueTwoParameter` - `ConstrainedUncertainScalarValueThreeParameter` ## Examples ```julia # Normal distribution truncated to the interval [0.5, 0.7] t = truncated(Normal(0, 1), 0.5, 0.7) UncertainValue(t) # Gamma distribution truncated to the interval [0.5, 3.5] t = Truncate(Gamma(4, 5.1), 0.5, 3.5) UncertainValue(t) # Binomial distribution truncated to the interval [2, 7] t = Truncate(Binomial(10, 0.4), 2, 7) UncertainValue(t) ``` """ function UncertainValue(t::Distributions.Truncated) dist_type = untruncated_disttype(t) original_dist = untruncated_dist(t) params = fieldnames(dist_type) param_values = [getfield(original_dist, p) for p in params] n_params = length(params) if n_params == 1 return ConstrainedUncertainScalarValueOneParameter(t, param_values...) elseif n_params == 2 return ConstrainedUncertainScalarValueTwoParameter(t, param_values...) elseif n_params == 3 return ConstrainedUncertainScalarValueThreeParameter(t, param_values...) end end """ UncertainValue(d::Distributions.Distribution) Construct an uncertain value from an instance of a distribution. If a specific uncertain value type has not been implemented, the number of parameters is determined from the distribution and an instance of one of the following types is returned: - `UncertainScalarTheoreticalOneParameter` - `UncertainScalarTheoreticalTwoParameter` - `UncertainScalarTheoreticalThreeParameter` ## Examples ```julia UncertainValue(Normal(0, 1)) UncertainValue(Gamma(4, 5.1)) UncertainValue(Binomial, 8, 0.2) ``` """ function UncertainValue(d::Distributions.Distribution) params = fieldnames(typeof(d)) n_params = length(params) param_values = [getfield(d, p) for p in params] if d isa Uniform UncertainScalarUniformlyDistributed(d, param_values...) elseif d isa Binomial UncertainScalarBinomialDistributed(d, param_values...) elseif d isa Normal UncertainScalarNormallyDistributed(d, param_values...) elseif d isa Beta UncertainScalarBetaDistributed(d, param_values...) elseif d isa BetaPrime UncertainScalarBetaPrimeDistributed(d, param_values...) elseif d isa Gamma UncertainScalarGammaDistributed(d, param_values...) elseif d isa Frechet UncertainScalarFrechetDistributed(d, param_values...) # if no specific type is implemented for this distribution, just create # a generic one else if n_params == 1 return UncertainScalarTheoreticalOneParameter(d, param_values...) elseif n_params == 2 return UncertainScalarTheoreticalTwoParameter(d, param_values...) elseif n_params == 3 return UncertainScalarTheoreticalThreeParameter(d, param_values...) else msg = "uncertain value type for $n_params-parameter $d not implemented." throw(DomainError(msg)) end end end ############################## # Macro constructors ############################## # macro uncertainvalue(μ, lower, upper, d, # nσ = 2, trunc_lower = -Inf, trunc_upper = Inf, tolerance = 1e-3) # return :(UncertainValue($μ, $lower, $upper, $d, # nσ = $nσ, # trunc_lower = $trunc_lower, # trunc_upper = $trunc_upper, # tolerance = $tolerance)) # end # #macro uncertainvalue(empiricaldata, dist) # return :(UncertainValue($empiricaldata, $dist)) #end # # """ # A macro for the construction of uncertain values. Calls # [`UncertainValue`](@ref) with the provided arguments. # # - **`@uncertainvalue(μ, lower, upper, dist, kwargs...)`**: # Fit a distribution of type `dist` to `μ` and `lower`/`upper` uncertainty # bounds. # - **`@uncertainvalue(empiricaldata, dist)`**: # Fit a distribution of type `dist` to a vector of empirical data. # """ #:(@uncertainvalue) # # # """ # @evalue(values, d) # # Construct an uncertain value from an empirical distribution. # """ #macro uncertain(values::AbstractVector, d) # :(UncertainScalarTheoreticalFit($values, $d)) #end #uncertain, @uncertain export UncertainValue #uncertainvalue, #@uncertainvalue
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
2797
using Reexport @reexport module UncertainValues using IntervalArithmetic import IntervalArithmetic: interval using Distributions using RecipesBase using StaticArrays using Measurements import KernelDensity: KernelDensity, UnivariateKDE, default_bandwidth, kde import StatsBase: StatsBase, ProbabilityWeights, pweights, FrequencyWeights, fweights, Weights, weights, AnalyticWeights, aweights ########################################## # Abstract types ########################################## include("abstract_types/AbstractUncertainValue.jl") include("abstract_types/AbstractTheoreticalDistributionScalarValue.jl") include("abstract_types/AbstractPopulation.jl") include("abstract_types/AbstractScalarPopulation.jl") include("abstract_types/AbstractEmpirical.jl") include("abstract_types/AbstractUncertainScalarKDE.jl") ########################################## # Distributions ########################################## include("distributions/assign_dist.jl") ########################################## # Fitted distribution type ########################################## include("distributions/fitted_distribution.jl") ########################################## # Composite uncertain scalar types ########################################## # Theoretical distributions with known parameters include("UncertainScalarsTheoretical.jl") # Theoretical distributions with fitted parameters include("UncertainScalarsTheoreticalFitted.jl") # Kernel density estimated distributions include("UncertainScalarsKDE.jl") # Populations with weighted probabilities include("UncertainScalarPopulation.jl") # Certain values (i.e. values without uncertainty) include("CertainValue.jl") ########################################## # Composite uncertain vector types ########################################## include("UncertainVectorsTheoretical.jl") include("UncertainVectorsTheoreticalFitted.jl") include("UncertainVectorsKDE.jl") # Define common constructor, so a similar syntax may be used to construct # all types of uncertain values. include("UncertainValue.jl") ########################################## # Comparisons ########################################## include("operations/comparisons.jl") # Conversions include("convert.jl") export KernelDensity, UnivariateKDE, default_bandwidth, kde ProbabilityWeights, pweights, FrequencyWeights, fweights, Weights, weights, AnalyticWeights, aweights end #module """ UncertainValues A module defining uncertain value types. """ UncertainValues
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
347
struct UncertainVectorValue{T1<:Number, T2<:Number, T3<:Number, S<:ValueSupport} <: AbstractUncertainValue distribution::Distribution{Multivariate, S} value::AbstractVector{T1} lower::AbstractVector{T2} upper::AbstractVector{T3} end dimension(usv::UncertainVectorValue) = length(distribution) export UncertainVectorValue
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
413
""" UncertainEmpiricalVectorValue An empirical value represented by a distribution estimated from actual data. ## Fields - **`distribution`** The distribution describing the value. - **`values`**: The values from which `distribution` is estimated. """ struct UncertainEmpiricalVectorValue{D <: Distribution, T} <: AbstractEmpiricalValue distribution::D values::AbstractVector{AbstractVector{T}} end
UncertainData
https://github.com/kahaaga/UncertainData.jl.git
[ "MIT" ]
0.16.0
df107bbf91afba419309adb9daa486b0457c693c
code
170
convert(::Type{CertainValue}, x::T) where {T <: Number} = CertainValue(x) convert(::Type{T1}, x::T2) where {T1 <: AbstractUncertainValue, T2 <: Number} = CertainValue(x)
UncertainData
https://github.com/kahaaga/UncertainData.jl.git