licenses
sequencelengths
1
3
version
stringclasses
677 values
tree_hash
stringlengths
40
40
path
stringclasses
1 value
type
stringclasses
2 values
size
stringlengths
2
8
text
stringlengths
25
67.1M
package_name
stringlengths
2
41
repo
stringlengths
33
86
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
13680
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # An efficient implementation of the Bernoulli process for sampling from a categorical # distribution. #### Bernoulli process # θ ~ Multin(1; p₁, …, pₖ) # θ₁ ~ Bin(1, p₁) # θ₂, …, θₖ₋₁ ~ Bin(1 - ∑ᵢ₌₁ʲ⁻¹θᵢ, pⱼ / ∑ᵢ₌ⱼᵏpᵢ) # θₖ = 1 - ∑ᵢ₌₁ᵏ⁻¹θᵢ # Implies # θⱼ ~ Bin(1 - ∑ᵢ₌₁ʲ⁻¹θᵢ, pⱼ / ∑ᵢ₌ⱼᵏ) # In essence, the process is defined by repeated Bernoulli trials of # j ∨ rest # pⱼ / ∑ᵢ₌ⱼᵏpᵢ 1 - pⱼ / ∑ᵢ₌ⱼᵏpᵢ # The process ends if j occurs at any point #### Alternate phrasing # Consider the remaining probability mass # ∑ᵢ₌ⱼᵏpᵢ = 1 - ∑ᵢ₌₁ʲ⁻¹pᵢ # ∑ᵢ₌₁ᵏpᵢ - ∑ᵢ₌₁ʲ⁻¹pᵢ # ∑ᵢ₌₁ʲ⁻¹pᵢ is the probability mass removed when one is on the jᵗʰ category # Hence, the process can be viewed as # j ∨ rest # pⱼ / (1 - ∑ᵢ₌₁ʲ⁻¹pᵢ) 1 - pⱼ / (1 - ∑ᵢ₌₁ʲ⁻¹pᵢ) #### # The alternate phrasing of the Bernoulli process enables efficient sampling by # avoiding the cumulative sums which would otherwise be required. The real # advantages are only realized during the analogous Binomial process (which samples # from the Multinomial distribution with n > 1). While the Bernoulli process # provides a clever general approach to constructing reversible Markov # transition kernels (involving the categorical distribution), # there exists a faster sampler for categorical distributions in which all # pⱼ's are known. In essence, one can utilize the properties of the cumulative # distribution function, CDF(p, j) = ∑ᵢ₌₁ʲpᵢ, noting that a sample # u ~ U(0, 1) belongs to the jᵗʰ category where j is the first # j which satisfies CDF(p, j) > u. (In other words, one notes that the sample # u could not have fallen into the j-1ᵗʰ category as ∑ᵢ₌₁ʲ⁻¹pᵢ < u). ################################################################ # using BenchmarkTools, Statistics # As a recursion # function catrand(p::Vector{T}, j::Int, k::Int, d::Float64=1.0) where {T<:Real} # if j ≤ k - 1 # u = rand() # pⱼ = p[j] # p′ = pⱼ / d # return u ≤ p′ ? j : catrand(p, j + 1, k, d - pⱼ) # else # return j # end # end # catrand(p::Vector{T}) where {T<:Real} = catrand(p, 1, length(p), 1.0) # catrandmulti₀(p::Vector{T}, N::Int) where {T<:Real} = [catrand(p) for _ = 1:N] # p = [0.1, 0.25, 0.05, 0.35, 0.25] # @benchmark catrand($p, 1, 5, 1.0) # @benchmark catrand($p) # @benchmark catrandmulti₀($p, 1000000) # @timev c = catrandmulti₀(p, 1000000); # t = [count(==(i), c) for i = 1:5] # function rmass(p::Vector{T}, j::Int, i::Int=1, d::Float64=1.0) where {T<:Real} # if j == 1 # return d # elseif i ≤ j - 1 # return rmass(p, j, i + 1, d - p[i]) # else # return d # end # end # function bernoullimass(p::Vector{T}) where {T<:Real} # k = length(p) # d = one(promote_type(T, Float64)) # p̃ = Vector{promote_type(T, Float64)}(undef, k) # for j ∈ eachindex(p, p̃) # pⱼ = p[j] # p̃[j] = pⱼ / d # d -= pⱼ # end # return p̃ # end # rmass(p, 2) # r = map(j -> rmass(p, j), 1:5) # @benchmark bernoullimass(p) # p̃ = bernoullimass(p) # p̃ == p ./ r # If one wanted to repeat the computation several times, one could try: # function catrandmultiₗ(p::Vector{T}, N::Int) where {T<:Real} # p̃ = bernoullimass(p) # k = length(p) # c = Vector{Int}(undef, N) # for n ∈ eachindex(c) # for j ∈ eachindex(p̃) # rand() ≤ p̃[j] && (c[n] = j; break) # end # end # return c # end # @benchmark catrandmultiₗ($p, 1000000) # c2 = catrandmultiₗ(p, 1000000); # t2 = [count(==(i), c2) for i = 1:5] #### Essentially, there is very little difference in cost between rcatrand using # pre-computed p̃ and dynamically computing it recursively. Correspondingly, # the looped catrandmulti is faster than the recursive one. # Particularly in the case of the loop it makes sense -- the resultant # machine code is just much simpler. # function rcatrand(p̃::Vector{T}, j::Int, k::Int) where {T<:Real} # if j ≤ k - 1 # u = rand() # return u ≤ p̃[j] ? j : rcatrand(p̃, j + 1, k) # else # return j # end # end # rcatrand(p̃::Vector{T}) where {T<:Real} = rcatrand(p̃, 1, length(p̃)) # @benchmark rcatrand($p̃) # @timev cᵣ = [rcatrand(p̃) for _ = 1:1000000]; # tᵣ = [count(==(i), cᵣ) for i = 1:5] # function catrandmultiᵣ(p::Vector{T}, N::Int) where {T<:Real} # p̃ = bernoullimass(p) # k = length(p) # c = Vector{Int}(undef, N) # for n ∈ eachindex(c) # c[n] = rcatrand(p̃, 1, k) # end # return c # end # @benchmark catrandmultiᵣ($p, 1000000) #### Attempt using a loop for the catrand itself, rather than recursion. # For larger k's, the loop really pulls ahead -- at k = 100, it is twice as fast. # At k = 1000, twice as fast. # In essence, while the recursion is conceptually elegant, a loop provides a simpler interface # AND 2x speed. # function catrandₗ(p::Vector{T}) where {T<:Real} # k = length(p) # d = one(promote_type(T, Float64)) # for j ∈ eachindex(p) # pⱼ = p[j] # u = rand() # u ≤ pⱼ / d && return j # d -= pⱼ # end # return k # end # @benchmark catrandₗ($p) # @benchmark catrand($p) # @timev cₗ = [catrandₗ(p) for _ = 1:1000000]; # tₗ = [count(==(i), cₗ) for i = 1:5] # function norm1!(w::Vector{T}) where {T<:Real} # s = zero(T) # @inbounds @simd for i ∈ eachindex(w) # s += w[i] # end # c = inv(s) # @inbounds @simd for i ∈ eachindex(w) # w[i] *= c # end # return w # end # p2 = norm1!(rand(100)); # @benchmark catrandₗ($p2) # @benchmark catrand($p2) # p3 = norm1!(rand(1000)); # @benchmark catrandₗ($p3) # @benchmark catrand($p3) # p4 = norm1!(rand(10000)); # @benchmark catrandₗ($p4) # @benchmark catrand($p4) # # If one wanted to repeat the computation several times, one could try: # function catrandₗ(p::Vector{T}, dims::Vararg{Int, N}) where {T<:Real} where {N} # p̃ = bernoullimass(p) # c = Array{Int, N}(undef, dims) # for n ∈ eachindex(c) # for j ∈ eachindex(p̃) # rand() ≤ p̃[j] && (c[n] = j; break) # end # end # return c # end # @benchmark catrandmultiₗ($p, 10000) # @benchmark catrandₗ($p, 10000) # using Distributions # @benchmark rand(Categorical($p), 10000) # c = rand(Categorical(p), 1000000); # t = [count(==(i), c) for i = 1:5] # #### Attempt using meta-unrolling -- 10x worse! (unless Val(k) is provided directly) # @generated function catrandₘ(p::Vector{T}, ::Val{k}) where {T<:Real} where {k} # quote # d = one(T) # Base.Cartesian.@nexprs $k j -> rand() ≤ p[j] / d ? (return j) : d -= p[j] # return $k # end # end # catrandₘ(p::Vector{T}) where {T<:Real} = catrandₘ(p, Val(length(p))) # @timev cₘ = [catrandₘ(p) for _ = 1:1000000]; # tₘ = [count(==(i), cₘ) for i = 1:5] # @benchmark catrandₘ($p, Val(5)) # @benchmark catrandₘ($p) #### A revised sampler using CDF properties -- improves speed by ≈1.3 at small k, # and at large k, is considerably faster. """ categorical(p::Vector{<:Real}) Draw a sample from the categorical distribution, where the number of categories is equal to the length of `p`. Caller is responsible for ensuring that `∑p = 1`. See also: [`categorical!`](@ref) """ @inline function categorical(p::AbstractVector{T}) where {T<:Real} k = length(p) j = 1 s = p[1] u = rand() @inbounds while s < u && j < k s += p[j += 1] end return j end @inline function rand_invcdf(Σω::AbstractVector{T}) where {T<:Real} k = length(Σω) j = 1 s = Σω[1] u = rand() @inbounds while s < u && j < k j += 1 s = Σω[j] end j end @inline function invcdf(Σω::AbstractVector{T}, u::Real) where {T<:Real} k = length(Σω) j = 1 s = Σω[1] @inbounds while s < u && j < k s = Σω[j += 1] end j end @inline function invcdf(Σω::AbstractVector{T}, u::Real, k::Int) where {T<:Real} j = 1 s = Σω[1] @inbounds while s < u && j < k s = Σω[j += 1] end j end # @inline function categorical(p::AbstractVector{T}, Iₛ::Vector{Int}) where {T<:Real} # k = length(p) # j = 1 # s = p[1] # u = rand() # @inbounds while s < u && j < k # s += p[j += 1] # end # return Iₛ[j] # end """ categorical!(C::Array{<:Integer, N}, p::Vector{<:Real}) where {N} Fill `C` with draws from the k-dimensional categorical distribution defined by the vector of probabilities `p`. The time complexity of this call should be assumed to be greater than the batch method, as `rng` internal calls are sequential. This may be useful when the memory overhead of a batch `rng` call exceeds the time savings. Caller is responsible for ensuring that `∑p = 1`. """ @inline function categorical!(C::AbstractArray{S, N}, p::AbstractVector{T}) where {T<:Real} where {S<:Integer, N} k = length(p) Σp = cumsum(p) s₀ = Σp[1] @inbounds for i ∈ eachindex(C) j = 1 s = s₀ u = rand() while s < u && j < k s = Σp[j += 1] end C[i] = j end return C end """ categorical(p::Vector{<:Real}, dims::Int...) Sample an array of categories from the k-dimensional categorical distribution defined by the vector of probabilities `p`. """ @inline categorical(p::AbstractVector{T}, dims::Vararg{Int, N}) where {T<:Real} where {N} = categorical!(Array{Int, N}(undef, dims), p) ############################################################################################ #### 2022-04-11: Batch SIMD sampler """ categorical!(C::Array{<:Integer, N}, U::Array{T, N}, Σp::Vector{T}) where {T<:Real, N} Fill `C` with draws from the k-dimensional categorical distribution defined by the vector of cumulative probabilities `Σp`. This method is optimized (internal `rng` calls are batched) for repeated calls involving arrays `C`, `U` of the same size, potentially with different `Σp`'s. Note: `U` is storage, potentially uninitialized, for the uniform random draws which will ultimately be used to draw samples from the categorical distribution. """ @inline function categorical!(C::AbstractArray{S, N}, U::AbstractArray{T, N}, Σp::AbstractVector{T}) where {T<:AbstractFloat} where {N} where {S<:Integer} k = length(Σp) rand!(U) s₀ = Σp[1] @inbounds for i ∈ eachindex(C, U) u = U[i] j = 1 s = s₀ while s < u && j < k s = Σp[j += 1] end C[i] = j end return C end """ categorical!(C::Array{<:Integer, N}, U::Array{T, N}, Σp::Vector{T}, p::Vector{T}) where {T<:AbstractFloat, N} `Σp` is a vector of any size, potentially uninitialized, which will be `resize!`'d and filled with the cumulative probabilities required for sampling. """ @inline function categorical!(C::AbstractArray{S, N}, U::AbstractArray{T, N}, Σp::AbstractVector{T}, p::AbstractVector{T}) where {T<:AbstractFloat} where {N} where {S<:Integer} k = length(p) resize!(Σp, k) cumsum!(Σp, p) rand!(U) s₀ = Σp[1] @inbounds for i ∈ eachindex(C, U) u = U[i] j = 1 s = s₀ while s < u && j < k s = Σp[j += 1] end C[i] = j end return C end ############################################################################################ #### 2022-05-20: Multinomial # # Demonstrate that the sum of categorical distributions cannot be represented by # # a multinomial distribution. # # Test 1 # Is = [[1,2], [1,2,3,4], [1,2,3,4,5,6]]; # A = sample(Int, Is, 6, 10^6); # ω = A ./ 3; # mean(ω, dims=2) # var(ω, dims=2) # # Property tests # # We know that the moments and properties will all be correct, as this is the only correct # # way to simulate the distribution of θ₁ + θ₂ + θ₃ # mean(A, dims=2) # # Var(θ₁ + θ₂ + θ₃) = 1/4 + 3/16 + 5/36 # var(A, dims=2) # # Pr(θ₁ = 1 ∩ θ₂ = 1 ∩ θ₃ = 1) = 1/48 # count(==(3), A, dims=2) ./ 10^6 # ws = [fill(1/2, 2), fill(1/4, 4), fill(1/6, 6)]; # function multin(ws::Vector{Vector{T}}) where {T<:Real} # ω = zeros(promote_type(T, Float64), maximum(length, ws)) # for w ∈ ws # for i ∈ eachindex(w) # ω[i] += w[i] # end # end # J⁻¹ = one(promote_type(T, Float64)) / length(ws) # for i ∈ eachindex(ω) # ω[i] *= J⁻¹ # end # ω # end # # ws′ = [[w; zeros(6 - length(w))] for w ∈ ws] # # ws′[1] * ws′[2]' .* reshape(ws′[3], 1,1,6) # # ws[1] * transpose(ws[2]) .* reshape(ws[3], 1,1,6) # # # Test 2 # # w = rand(6) # # ws₂ = [normweights(I, w) for I ∈ Is] # # A₂ = sample(Int, Is, w, 6, 10^6); # # ω₂ = A₂ ./ 3; # # mean(ω₂, dims=2) # # var(ω₂, dims=2) # # ω₂′ = multin(ws₂) # # w′ = normalize1(w) # # [normweights(I, w′) for I ∈ Is] # using Distributions, BenchmarkTools # ω′ = multin(ws) # d = Multinomial(3, ω′); # # Multinomial is approximately 10x slower than Bernoulli process, though, this is small N. # # I would expect the Bernoulli-Binomial process (presumably) used in the Multinomial # # to become faster at larger N # # @benchmark rand(d, 100000) # # @benchmark sample(Int, Is, 6, 100000) # A₃ = rand(d, 10^6); # ω₃ = A₃ ./ 3; # mean(ω₃, dims=2) # var(ω₃, dims=2) # 11/36 # # Property tests # # We know the expectation will match, but the variance will not be correct, # # nor will other properties such conditional probabilities # mean(A₃, dims=2) # # Var(θ₁ + θ₂ + θ₃) = 1/4 + 3/16 + 5/36 # var(A₃, dims=2) # # Pr(θ₁ = 1 ∩ θ₂ = 1 ∩ θ₃ = 1) = 1/48, but clearly not true, as shown below # count(==(3), A₃, dims=2) ./ 10^6
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
6601
# # Date created: 2022-06-22 # Author: aradclif # # ############################################################################################ # Performance tests and variants of Marsaglia alias table generation (square histogram) function marsaglia4(p::Vector{T}) where {T<:AbstractFloat} N = length(p) K = Vector{Int}(undef, N) V = Vector{promote_type(T, Float64)}(undef, N) q = similar(p) a = inv(N) # initialize for i ∈ eachindex(K, V, p, q) K[i] = i V[i] = i * a q[i] = p[i] end for _ = 1:N-1 qᵢ, i = findmin(q) qⱼ, j = findmax(q) K[i] = j V[i] = (i - 1) * a + qᵢ q[j] = qⱼ - (a - qᵢ) q[i] = a end K, V end function vmarsaglia4(p::Vector{T}) where {T<:AbstractFloat} N = length(p) K = Vector{Int}(undef, N) V = Vector{promote_type(T, Float64)}(undef, N) q = similar(p) a = inv(N) # initialize for i ∈ eachindex(K, V, p, q) K[i] = i V[i] = i * a q[i] = p[i] end for _ = 1:N-1 qᵢ, i = vfindmin(q) qⱼ, j = vfindmax(q) K[i] = j V[i] = (i - 1) * a + qᵢ q[j] = qⱼ - (a - qᵢ) q[i] = a end K, V end function vmarsaglia5(p::Vector{T}) where {T<:AbstractFloat} N = length(p) K = Vector{Int}(undef, N) V = Vector{promote_type(T, Float64)}(undef, N) q = similar(p) a = inv(N) # initialize for i ∈ eachindex(K, V, p, q) K[i] = i V[i] = i * a q[i] = p[i] end for _ = 1:N-1 ((qᵢ, i), (qⱼ, j)) = vfindextrema(q) K[i] = j V[i] = (i - 1) * a + qᵢ q[j] = qⱼ - (a - qᵢ) q[i] = a end K, V end function vmarsaglia6(p::Vector{T}) where {T<:AbstractFloat} N = length(p) K = Vector{Int}(undef, N) V = Vector{promote_type(T, Float64)}(undef, N) # ix = Vector{Int}(undef, N) q = similar(p) a = inv(N) # initialize @turbo for i ∈ eachindex(K, V, p, q) K[i] = i V[i] = i * a q[i] = p[i] end for _ = 1:N-1 ((qᵢ, i), (qⱼ, j)) = vfindextrema(q) K[i] = j V[i] = (i - 1) * a + qᵢ q[j] = qⱼ - (a - qᵢ) q[i] = a end K, V end for i = 1:10 n = (1 << i) p = normalize1!(rand(n)); println("n = $n, marsaglia") @btime marsaglia($p) println("n = $n, marsaglia4") @btime marsaglia4($p) println("n = $n, vmarsaglia4") @btime vmarsaglia4($p) println("n = $n, vmarsaglia5") @btime vmarsaglia5($p) println("n = $n, vmarsaglia6") @btime vmarsaglia6($p) end @benchmark marsaglia($p) @benchmark marsaglia4($p) @benchmark vmarsaglia4($p) @benchmark vmarsaglia5($p) @benchmark vmarsaglia6($p) marsaglia(p) == marsaglia4(p) vmarsaglia4(p) == vmarsaglia5(p) K1, V1 = marsaglia(p) K2, V2 = marsaglia4(p) K2_4, V2_4 = marsaglia4_2(p) K2_5, V2_5 = marsaglia5(p) K3, V3 = vmarsaglia4(p) K4, V4 = vmarsaglia5(p) K5, V5 = vmarsaglia6(p) K1 == K2 == K3 == K4 == K5 V1 == V2 == V3 == V4 == V5 K3 == K4 == K5 V3 == V4 == V5 ii23 = findall(K2 .!= K3) ii34 = findall(K3 .!= K4) ii35 = findall(K3 .!= K5) ii45 = findall(K4 .!= K5) K, V, q = marsaglia4_2(p); K0, V0, q0 = marsaglia(p); K2, V2, q2 = marsaglia4(p); K4, V4, q4 = vmarsaglia4(p); ii = findall(K2 .!= K4) [V0[ii] V[ii] V2[ii] V4[ii]] #### Generation benchmarks using BenchmarkTools, Random K, V = marsaglia(p) @benchmark marsaglia_generate($K, $V) @benchmark marsaglia_generate2($K, $V) @benchmark marsaglia_generate3($K, $V) p = rand(100); normalize1!(p); K, V = marsaglia(p); n_samples = 1024 C = Vector{Int}(undef, n_samples); @benchmark marsaglia_generate!($C, $K, $V) @benchmark vmarsaglia_generate!($C, $K, $V) U = similar(C, Float64); @benchmark marsaglia_generate!($C, $U, $K, $V) @benchmark vmarsaglia_generate!($C, $U, $K, $V) [[count(==(i), C) for i = 1:length(p)] ./ n_samples p] # Equal probability comparison p = fill(1/100, 100); K, V = marsaglia(p); @benchmark vmarsaglia_generate!($C, $U, $K, $V) @benchmark vmarsaglia_equiprobable!($C, $U, 100) ur = 1:100 @benchmark rand!($C, $ur) # faster than nearly-divisionless? -- in fact, both are. p = fill(1/10000, 10000); K, V = marsaglia(p); r = 1:10000 @benchmark rand!($C, $r) x = rand(1024); @benchmark rand!($x) 1024 / 2e-6 Σp = cumsum(p); U = rand(length(C)); @benchmark categorical!($C, $U, $Σp) #### Comparison to results from paper itself using Distributions l2cache = 1280 * 10^3 l2cache ÷ 2^4 # 80000 # n_sample = 2^16 # 10^8 A = Vector{Int}(undef, n_sample); U = similar(A, Float64); d = Poisson(100.) p = map(n -> pdf(d, n), 0:200) K, V = vmarsaglia(p); @benchmark vmarsaglia_generate!($A, $U, $K, $V) # 2^16 / (57.096 * 1e-6) t = MarsagliaSquareHistogram(p) @benchmark vmarsaglia_generate!($A, $U, $t) d = Binomial(100, .345) p = map(n -> pdf(d, n), 0:100) K, V = vmarsaglia(p); @benchmark vmarsaglia_generate!($A, $U, $K, $V) for λ ∈ [1, 10, 25, 100, 250, 1000] println("λ = ", λ) d = Poisson(λ) p = map(n -> pdf(d, n), 0:max(1.5λ, 100)) K, V = vmarsaglia(p) @btime vmarsaglia_generate!($A, $U, $K, $V) end for n ∈ [20, 100, 1000, 10000, 100000] println("n = ", n) for ρ ∈ (.1, .4) println("\t p = ", ρ) d = Binomial(n, ρ) p = map(n -> pdf(d, n), 0:n) K, V = vmarsaglia(p) @btime vmarsaglia_generate!($A, $U, $K, $V) end end #### Robin Hood (𝒪(N²)) squaring vs. 𝒪(NlogN) squaring n_cat = 2^20 p = normalize1!(rand(n_cat)); @timev K1, V1 = vmarsaglia(p); @timev K2, V2 = marsaglia2(p); n_sample = 2^16 A1 = Vector{Int}(undef, n_sample); A2 = Vector{Int}(undef, n_sample); U = similar(A, Float64); @benchmark vmarsaglia_generate!($A1, $U, $K1, $V1) @benchmark vmarsaglia_generate!($A2, $U, $K2, $V2) A1 = vmarsaglia_generate(K1, V1, 10^9); A2 = vmarsaglia_generate(K2, V2, 10^9); [countcategory(A1) countcategory(A2)] ./ 10^9 p = fill(1/n_cat, n_cat); #### Numerical stability tests n = 10^3 p = normalize1!(rand(n)); p_b = big.(p); K1, V1 = marsaglia(p); K2, V2 = marsaglia2(p); vK1, vV1 = vmarsaglia(p); bK1, bV1 = marsaglia(p_b); bK2, bV2 = marsaglia2(p_b); K1 == K2 V1 == V2 K1 == vK1 V1 == vV1 bV1 == V1 bV1 == bV2 bV1 == V2 bV2 == V2 count(V1 .== V2) count(bV1 .== V1) count(bV2 .== V2) extrema(V1 .- V2) extrema(bV1 .- V1) extrema(bV2 .- V2) sum(abs, bV1 .- V1) sum(abs, bV2 .- V2) # a case which is unstable p₁ = 0.999 # n = 10^4 for i = 1:10 # n = (1 << i) n = 10^i p = [p₁; fill((1.0 - p₁) / n, n)]; K1, V1 = marsaglia(p); K2, V2 = marsaglia2(p); @test K1 == K2 @test V1 == V2 end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
12152
# # Date created: 2022-06-21 # Author: aradclif # # ############################################################################################ # https://www.jstatsoft.org/article/view/v011i03 #### Marsaglia's Square Histogram (Method II in the above article) # p ∈ ℝᴺ, ∑ᵢpᵢ = 1, a = 1/N # K ∈ ℕᴺ, Kᵢ = i # V ∈ ℝⁿ, Vᵢ = i * a # Generate: j = ⌊N*U+1⌋; if U < V[j], return j, else return K[j] # Theoretically, just one U ~ Uniform(0,1) is sufficient. In practice, it is also faster as # U=rand(); j=floor(Int, N * U + 1) # is far fewer instructions than # j=rand(1:N); U=rand() #### Robin Hood ## Motivation # The frequency of the `else` statement being required is proportional to the "over-area", # i.e. the part of the final squared histogram that lies above the division points. # Or, to quantify it: ∑ᵢ (i * a) - V[i] # The objective is to minimize the number of times the `else return K[j]` occurs # by creating V such that each V[j] is as close to j/N as possible -- an NP-hard problem. # Marsaglia's suggestion of the Robin Hood method is a good solution which is 𝒪(NlogN). ## Thoughts # The reduction in `else` statements leads to faster sampling -- 𝒪(1) regardless -- # as it would certainly lead to a more predictable instruction pipeline due to minimizing # the number of times the `else` branch is executed (or used, even if executed). # Does it justify the 𝒪(NlogN) construction cost for the alias tables? -- given # that we could pay 𝒪(N) instead to construct an inferior table (but no limit on how terrible). # - An analysis based on the objective function above would be informative; # can be verified with Monte Carlo: compute 𝒻(𝐱) = ∑ᵢ (i * a) - V[i] # using the V produced by each procedure. # - Number of samples to be drawn is an orthogonal decision variable; # one surmises that increasing number of samples favor better table. ## Algorithm # repeat these two steps N - 1 times # 1. find the smallest probability, pᵢ, and the largest probability, pⱼ # 2. set K[i] = j; V[i] = (i - 1) * a + pᵢ; replace pⱼ with pⱼ - (a - pᵢ) ## Numerical stability # Replacing pⱼ is the only point at which stability is a real concern. There are a few # options for the order of operations and parenthesis. Unsurprisingly, Marsaglia gives # the most stable form: pⱼ = pⱼ - (a - pᵢ) # But it is worthwhile to show that this is the most stable form. # First, consider that it should be the case that pᵢ ≤ a, hence 0 ≤ (a - pᵢ) ≤ 1/n. # (a - pᵢ) may be a small number, but (a - pᵢ) is always well-defined since it occurs # at eps(a)≡ulp(a). # It is worth noting that the (a - pᵢ) operation becomes unstable when pᵢ ≤ eps(a)/4, assuming # the worst case, a=1. However, this has the opposite relationship to n: increasing n will # result in a subtraction which takes place at smaller values, hence, the (floating) # points are more densely packed (i.e. distance to nearest float is smaller). # It is reassuring to note that eps(.5) = 2⁻⁵³, hence, even for a vector of length 2, # the (a - pᵢ) is stable for pᵢ > 2⁻⁵⁵. # The subsequent subtraction, i.e. pⱼ - (a - pᵢ), will occur at eps(pⱼ)≡ulp(pⱼ). Thus, # the operation will be unstable when pⱼ - c * ulp(pⱼ), c ≤ 1/4 (for pⱼ = 1, the worst case). # That is, unstable when: (a - pᵢ) ≤ eps(pⱼ)/4 # If pᵢ ≈ 0, (a - pᵢ) ≈ 1/n ⟹ 1/n ≤ eps(pⱼ)/4 is unstable # As pⱼ is at most 1, the worst case will be eps(pⱼ)/4 = 2⁻⁵⁴, i.e. 1/n ≤ 2⁻⁵⁴. # ∴ in the worse case, instability begins at n ≥ 2⁵⁴ if pᵢ ≈ 0. # ∴ in general, expect instability if (a - pᵢ) ≤ 2⁻⁵⁴. # The above assumed Float64 with 53 bits of precision; the general form in terms of precision # replaces 2⁻⁵⁴ → 2⁻ᵖ⁻¹. # These are very permissive bounds; one is likely to run into other issues well before # the algorithm becomes numerically unstable. ## Numerical stability, revisit # Oddly, Marsaglia's implementation in TplusSQ.c uses pⱼ + pᵢ - a, which has slightly # worse numerical stability. (pⱼ + pᵢ) becomes unstable when pᵢ ≤ eps(pⱼ)/2, which # corresponds to 2⁻ᵖ at pⱼ = 1. # It is possible to find cases where both suffer roundoff, which is ≤ 2⁻ᵖ⁺¹ for pⱼ + pᵢ - a # and ≤ 2⁻ᵖ for pⱼ - (a - pᵢ). # Provided that one is working with Float64, it most likely does not matter. # However, if p is provided as Float32, it may be preferable to forcibly promote to Float64 # just to ensure stability; naturally, Float16 is largely unsuitable and needs promotion. # ## # # Comparison of stability # # f1 is unstable at n = 10, qⱼ = .999999999999 # # However, f2 and f3 are unstable at n = 10, qⱼ = 5 # f1(qⱼ, qᵢ, a) = qⱼ - (a - qᵢ) # f2(qⱼ, qᵢ, a) = qⱼ + qᵢ - a # ff2(qⱼ, qᵢ, a) = @fastmath qⱼ + qᵢ - a # f3(qⱼ, qᵢ, a) = (qⱼ + qᵢ) - a # n = 10 # a = 1 / n # qⱼ = .999999999999 # qᵢ = (1 - qⱼ) / n # f1(qⱼ, qᵢ, a) # f2(qⱼ, qᵢ, a) # ff2(qⱼ, qᵢ, a) # f3(qⱼ, qᵢ, a) # f1(big(qⱼ), big(qᵢ), big(a)) # f2(big(qⱼ), big(qᵢ), big(a)) # f3(big(qⱼ), big(qᵢ), big(a)) # # Another example # f1(1.0, eps()/2, .0003) # f2(1.0, eps()/2, .0003) # f2(1.0, big(eps()/2), .0003) # isfeq(qⱼ, qᵢ, a) = f1(qⱼ, qᵢ, a) == f2(qⱼ, qᵢ, a) # isfeq_big(qⱼ, qᵢ, a) = f1(qⱼ, qᵢ, a) == Float64(f2(big(qⱼ), big(qᵢ), big(a))) # count(i -> isfeq(qⱼ, i, a), qᵢ:qᵢ:n*qᵢ) # isfeq.(qⱼ, qᵢ:qᵢ:n*qᵢ, a) # isfeq_big.(qⱼ, qᵢ:qᵢ:n*qᵢ, a) function marsaglia(p::Vector{T}) where {T<:AbstractFloat} n = length(p) K = Vector{Int}(undef, n) V = Vector{promote_type(T, Float64)}(undef, n) q = similar(p, promote_type(T, Float64)) a = inv(n) # initialize @inbounds for i ∈ eachindex(K, V, p, q) K[i] = i V[i] = i * a q[i] = p[i] end for _ = 1:n-1 qᵢ, i = findmin(q) qⱼ, j = findmax(q) K[i] = j V[i] = (i - 1) * a + qᵢ q[j] = qⱼ - (a - qᵢ) q[i] = a end K, V end function vmarsaglia(p::Vector{T}) where {T<:AbstractFloat} n = length(p) K = Vector{Int}(undef, n) V = Vector{promote_type(T, Float64)}(undef, n) a = inv(n) q = similar(p, promote_type(T, Float64)) # initialize @turbo for i ∈ eachindex(K, V, p, q) K[i] = i V[i] = i * a q[i] = p[i] end for _ = 1:n-1 qᵢ, i = vfindmin(q) qⱼ, j = vfindmax(q) K[i] = j V[i] = (i - 1) * a + qᵢ q[j] = qⱼ - (a - qᵢ) q[i] = a end K, V end function marsaglia2(p::Vector{T}) where {T<:AbstractFloat} n = length(p) K = Vector{Int}(undef, n) V = Vector{promote_type(T, Float64)}(undef, n) q = similar(p, promote_type(T, Float64)) a = inv(n) larges = Vector{Int}(undef, n) smalls = Vector{Int}(undef, n) kl = 0 ks = 0 # initialize @inbounds for i ∈ eachindex(K, V, p, q) K[i] = i V[i] = i * a q[i] = p[i] if p[i] > a larges[kl+=1] = i else smalls[ks+=1] = i end end while kl > 0 && ks > 0 j = larges[kl]; kl -= 1 i = smalls[ks]; ks -= 1 qᵢ = q[i] qⱼ = q[j] K[i] = j V[i] = (i - 1) * a + qᵢ q[j] = qⱼ - (a - qᵢ) q[i] = a if q[j] > a larges[kl+=1] = j else smalls[ks+=1] = j end end K, V end # p = [0.2, 0.3, 0.1, 0.4] # p = [.21, .18, .26, .17, .18] # p = [2/15, 7/15, 6/15] function marsaglia_generate(K::Vector{Int}, V::Vector{T}) where {T<:AbstractFloat} n = length(K) u = rand() j = trunc(Int, muladd(u, n, 1)) u < V[j] ? j : K[j] end function marsaglia_generate!(A::AbstractArray, K::Vector{Int}, V::Vector{T}) where {T<:AbstractFloat} length(K) == length(V) || throw(ArgumentError("K and V must be of same size")) n = length(K) @inbounds for i ∈ eachindex(A) # safe to also use @fastmath, @simd u = rand() j = trunc(Int, muladd(u, n, 1)) # muladd is faster than u * n + 1 by ≈5-6% A[i] = u < V[j] ? j : K[j] end A end function marsaglia_generate!(A::AbstractArray, u::AbstractArray{Float64}, K::Vector{Int}, V::Vector{T}) where {T<:AbstractFloat} length(K) == length(V) || throw(ArgumentError("K and V must be of same size")) n = length(K) rand!(u) @inbounds for i ∈ eachindex(A, u) # safe to also use @fastmath, @simd j = trunc(Int, muladd(u[i], n, 1)) # muladd is faster than u * n + 1 by ≈5-6% A[i] = u[i] < V[j] ? j : K[j] end A end function marsaglia_generate(K::Vector{Int}, V::Vector{T}, dims::Vararg{Int, N}) where {T<:AbstractFloat} where {N} marsaglia_generate!(Array{Int}(undef, dims), K, V) end function marsaglia!(K::Vector{Int}, V::Vector{T}, q::Vector{T}, p::Vector{T}) where {T<:AbstractFloat} n = length(p) a = inv(n) @inbounds for i ∈ eachindex(K, V, p, q) K[i] = i V[i] = i * a q[i] = p[i] end for _ = 1:n-1 qᵢ, i = findmin(q) qⱼ, j = findmax(q) K[i] = j V[i] = (i - 1) * a + qᵢ q[j] = qⱼ - (a - qᵢ) q[i] = a end K, V end function vmarsaglia!(K::Vector{Int}, V::Vector{T}, q::Vector{T}, p::Vector{T}) where {T<:AbstractFloat} n = length(p) a = inv(n) @inbounds for i ∈ eachindex(K, V, p, q) K[i] = i V[i] = i * a q[i] = p[i] end for _ = 1:n-1 qᵢ, i = vfindmin(q) qⱼ, j = vfindmax(q) K[i] = j V[i] = (i - 1) * a + qᵢ q[j] = qⱼ - (a - qᵢ) q[i] = a end K, V end # faster, but not necessarily the method to use due to LoopVectorization and Base.Threads # alas, it is ≈5x faster function vmarsaglia_generate!(A::AbstractArray, u::AbstractArray{Float64}, K::Vector{Int}, V::Vector{T}) where {T<:AbstractFloat} length(K) == length(V) || throw(ArgumentError("K and V must be of same size")) n = length(K) rand!(u) @turbo for i ∈ eachindex(A, u) j = trunc(Int, muladd(u[i], n, 1)) A[i] = ifelse(u[i] < V[j], j, K[j]) end A end vmarsaglia_generate!(A::AbstractArray, K::Vector{Int}, V::Vector{T}) where {T<:AbstractFloat} = vmarsaglia_generate!(A, similar(A, Float64), K, V) function vmarsaglia_generate(K::Vector{Int}, V::Vector{T}, dims::Vararg{Int, N}) where {T<:AbstractFloat} where {N} vmarsaglia_generate!(Array{Int}(undef, dims), K, V) end ################ # convenience utils @inline _marsaglia_init(T::Type{<:AbstractFloat}, n::Int) = Vector{Int}(undef, n), Vector{T}(undef, n), Vector{T}(undef, n) @inline _marsaglia_init(T::Type{<:AbstractFloat}) = _marsaglia_init(T, 0) @inline _marsaglia_init() = _marsaglia_init(Float64) @inline _genstorage_init(T::Type{<:AbstractFloat}, n::Int) = Vector{Int}(undef, n), Vector{T}(undef, n) ################ # experimental mutable struct MarsagliaSquareHistogram{Ti<:Integer, Tv<:AbstractFloat} K::Vector{Ti} V::Vector{Tv} n::Int end # MarsagliaSquareHistogram{Ti, Tv}(K, V, n) where {Ti<:Integer, Tv<:AbstractFloat} = # MarsagliaSquareHistogram(convert(Vector{Ti}, K), convert(Vector{Tv}, V), n) MarsagliaSquareHistogram(K, V) = MarsagliaSquareHistogram(K, V, length(K)) MarsagliaSquareHistogram((K, V), n) = MarsagliaSquareHistogram(K, V, n) MarsagliaSquareHistogram(p) = MarsagliaSquareHistogram(marsaglia(p), length(p)) vmarsaglia_generate!(C, t::MarsagliaSquareHistogram) = ((; K, V, n) = t; vmarsaglia_generate!(C, K, V)) vmarsaglia_generate!(C, U, t::MarsagliaSquareHistogram) = ((; K, V, n) = t; vmarsaglia_generate!(C, U, K, V)) vmarsaglia_generate(t::MarsagliaSquareHistogram, dims::Vararg{Int, N}) where {N} = vmarsaglia_generate!(Array{Int}(undef, dims), t) ################ # Equal probability case admits an optimized form: # U ~ Uniform(0,1) # j = ⌊nU + 1⌋; return j function vfill!(A::AbstractArray, v::Real) @turbo for i ∈ eachindex(A) A[i] = v end A end function vmarsaglia_generate!(A::AbstractArray, u::AbstractArray{Float64}, n::Int) n > 0 || throw(ArgumentError("n must be > 0")) n == 1 && return vfill!(A, 1) rand!(u) @turbo for i ∈ eachindex(A, u) A[i] = trunc(Int, muladd(u[i], n, 1)) end A end vmarsaglia_generate!(A::AbstractArray, n::Int) = vmarsaglia_generate!(A, similar(A, Float64), n)
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
16433
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # mirror of sampler.jl; separate file for variants on threading # The bare minimum for `sample` interface-- covers all 4 other definitions. tsample(::Type{S}, A, n_sim, n_cat; dims=:) where {S} = tsample(S, A, n_sim, n_cat, dims) tsample(::Type{S}, A, n_sim; dims=:) where {S} = tsample(S, A, n_sim, num_cat(A), dims) tsample(::Type{S}, A, n_sim::Int, n_cat::Int, dims::Int) where {S} = tsample(S, A, n_sim, n_cat, (dims,)) function tsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T, N} Dᴬ = size(A) Dᴮ = tuple(n_cat, n_sim, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = fill!(similar(A, S, Dᴮ), zero(S)) tsample!(B, A) end function tsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T, N} B = fill!(similar(A, S, (n_cat, n_sim)), zero(S)) tsample!(B, A) end # for recursive spawning function tsample!(B, A) _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) tsample!(B, A, keep, default, firstindex(B, 2):size(B, 2)) end # The expected case: vectors of sparse vectors (as their bare components) function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(T) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, ω) ∈ a n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) marsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of sparse vectors function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {T<:AbstractFloat, N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(T) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, ω = A[IA] n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) marsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a sparse vector tsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = tsample(S, A, n_sim, n_cat, :) tsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = tsample(S, A, n_sim, n_cat, :) function tsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_cat, n_sim) tsample!(B, A) end function tsample!(B::AbstractMatrix, A::Tuple{Vector{Int}, Vector{<:AbstractFloat}}) _check_reducedims(B, A) tsample!(B, A, firstindex(B, 2):size(B, 2)) end function tsample!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{T}}, 𝒥::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 Iₛ, ω = A K, V = marsaglia(ω) n = length(K) @inbounds for j ∈ 𝒥 u = rand() j′ = floor(Int, muladd(u, n, 1)) c = u < V[j′] ? j′ : K[j′] B[Iₛ[c], j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, start:h) tsample!(B, A, (h + 1):stop) end return B end end ################ # Specialized method for eltype(A)::Vector{Vector{Int}} # or, in other words, where the probability mass on each element is 1 / length(Iₛ) function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init() ω = Vector{Float64}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a n = length(Iₛ) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) resize!(ω, n) fill!(ω, inv(n)) marsaglia!(K, V, q, ix, ω) marsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of sparse vectors function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Int}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init() ω = Vector{Float64}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ = A[IA] n = length(Iₛ) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) resize!(ω, n) fill!(ω, inv(n)) marsaglia!(K, V, q, ix, ω) marsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a sparse vector function tsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {N} B = zeros(S, n_cat, n_sim) tsample!(B, A) end # Trivial parallelism is preferable here, but it's not safe! # These are questionable methods (though, the function barrier approach is safe). # @inline function _tsample!(B::AbstractMatrix{S}, A::Vector{Int}, j::Int) where {S<:Real} # c = rand(A) # @inbounds B[c, j] += one(S) # B # end # function tsample0!(B::AbstractMatrix{S}, A::Vector{Int}) where {S<:Real} # _check_reducedims(B, A) # # @inbounds Threads.@threads for j ∈ axes(B, 2) # # c = rand(A) # # B[c, j] += one(S) # # end # @inbounds Threads.@threads for j ∈ axes(B, 2) # _tsample!(B, A, j) # end # B # end function tsample!(B::AbstractMatrix, A::Vector{Int}) _check_reducedims(B, A) tsample!(B, A, firstindex(B, 2):size(B, 2)) end function tsample!(B::AbstractMatrix{S}, A::Vector{Int}, 𝒥::UnitRange{Int}) where {S<:Real} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 n = length(A) K, V = marsaglia(fill(inv(n), n)) @inbounds for j ∈ 𝒥 u = rand() j′ = floor(Int, muladd(u, n, 1)) c = u < V[j′] ? j′ : K[j′] B[A[c], j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, start:h) tsample!(B, A, (h + 1):stop) end return B end end ################ # General case: dense vectors, the linear index of which indicates the category function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(T) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for ω ∈ a n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) marsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[c, j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of dense vectors function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{T}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {T<:AbstractFloat, N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(T) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) ω = A[IA] n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) marsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[c, j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a dense vector tsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = tsample(S, A, n_sim, n_cat, :) tsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = tsample(S, A, n_sim, n_cat, :) function tsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_cat, n_sim) tsample!(B, A) end function tsample!(B::AbstractMatrix, A::Vector{<:AbstractFloat}) _check_reducedims(B, A) tsample!(B, A, firstindex(B, 2):size(B, 2)) end function tsample!(B::AbstractMatrix{S}, A::Vector{T}, 𝒥::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 ω = A K, V = marsaglia(ω) n = length(K) @inbounds for j ∈ 𝒥 u = rand() j′ = floor(Int, muladd(u, n, 1)) c = u < V[j′] ? j′ : K[j′] B[c, j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, start:h) tsample!(B, A, (h + 1):stop) end return B end end ################ # General case: sparse vectors, the nzval of which indicates the category function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(Tv) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for sv ∈ a (; n, nzind, nzval) = sv Iₛ, ω = nzind, nzval n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) marsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of sparse vectors function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{SparseVector{Tv, Ti}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {Tv<:AbstractFloat, Ti<:Integer, N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(Tv) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) sv = A[IA] (; n, nzind, nzval) = sv Iₛ, ω = nzind, nzval n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) marsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a sparse vector tsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = tsample(S, A, n_sim, n_cat, :) tsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = tsample(S, A, n_sim, n_cat, :) function tsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_cat, n_sim) tsample!(B, A) end function tsample!(B::AbstractMatrix, A::SparseVector{<:AbstractFloat}) _check_reducedims(B, A) tsample!(B, A, firstindex(B, 2):size(B, 2)) end function tsample!(B::AbstractMatrix{S}, A::SparseVector{T}, 𝒥::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 (; n, nzind, nzval) = A Iₛ, ω = nzind, nzval K, V = marsaglia(ω) n = length(K) @inbounds for j ∈ 𝒥 u = rand() j′ = floor(Int, muladd(u, n, 1)) c = u < V[j′] ? j′ : K[j′] B[Iₛ[c], j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, start:h) tsample!(B, A, (h + 1):stop) end return B end end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
16520
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # mirror of sampler.jl; separate file for variants on threading # The bare minimum for `sample` interface-- covers all 4 other definitions. vtsample(::Type{S}, A, n_sim, n_cat; dims=:) where {S} = vtsample(S, A, n_sim, n_cat, dims) vtsample(::Type{S}, A, n_sim; dims=:) where {S} = vtsample(S, A, n_sim, num_cat(A), dims) vtsample(::Type{S}, A, n_sim::Int, n_cat::Int, dims::Int) where {S} = vtsample(S, A, n_sim, n_cat, (dims,)) function vtsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T, N} Dᴬ = size(A) Dᴮ = tuple(n_cat, n_sim, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = fill!(similar(A, S, Dᴮ), zero(S)) vtsample!(B, A) end function vtsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T, N} B = fill!(similar(A, S, (n_cat, n_sim)), zero(S)) vtsample!(B, A) end # for recursive spawning function vtsample!(B, A) _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) vtsample!(B, A, keep, default, firstindex(B, 2):size(B, 2)) end # The expected case: vectors of sparse vectors (as their bare components) function vtsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(T) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, ω) ∈ a n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) vmarsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, keep, default, start:h) vtsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of sparse vectors function vtsample!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {T<:AbstractFloat, N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(T) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, ω = A[IA] n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) vmarsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, keep, default, start:h) vtsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a sparse vector vtsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = vtsample(S, A, n_sim, n_cat, :) vtsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = vtsample(S, A, n_sim, n_cat, :) function vtsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_cat, n_sim) vtsample!(B, A) end function vtsample!(B::AbstractMatrix, A::Tuple{Vector{Int}, Vector{<:AbstractFloat}}) _check_reducedims(B, A) vtsample!(B, A, firstindex(B, 2):size(B, 2)) end function vtsample!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{T}}, 𝒥::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 Iₛ, ω = A K, V = marsaglia(ω) n = length(K) @inbounds for j ∈ 𝒥 u = rand() j′ = floor(Int, muladd(u, n, 1)) c = u < V[j′] ? j′ : K[j′] B[Iₛ[c], j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, start:h) vtsample!(B, A, (h + 1):stop) end return B end end ################ # Specialized method for eltype(A)::Vector{Vector{Int}} # or, in other words, where the probability mass on each element is 1 / length(Iₛ) function vtsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init() ω = Vector{Float64}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a n = length(Iₛ) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) resize!(ω, n) fill!(ω, inv(n)) marsaglia!(K, V, q, ix, ω) vmarsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, keep, default, start:h) vtsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of sparse vectors function vtsample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Int}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init() ω = Vector{Float64}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ = A[IA] n = length(Iₛ) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) resize!(ω, n) fill!(ω, inv(n)) marsaglia!(K, V, q, ix, ω) vmarsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, keep, default, start:h) vtsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a sparse vector function vtsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {N} B = zeros(S, n_cat, n_sim) vtsample!(B, A) end # Trivial parallelism is preferable here, but it's not safe! # These are questionable methods (though, the function barrier approach is safe). # @inline function _vtsample!(B::AbstractMatrix{S}, A::Vector{Int}, j::Int) where {S<:Real} # c = rand(A) # @inbounds B[c, j] += one(S) # B # end # function vtsample0!(B::AbstractMatrix{S}, A::Vector{Int}) where {S<:Real} # _check_reducedims(B, A) # # @inbounds Threads.@threads for j ∈ axes(B, 2) # # c = rand(A) # # B[c, j] += one(S) # # end # @inbounds Threads.@threads for j ∈ axes(B, 2) # _vtsample!(B, A, j) # end # B # end function vtsample!(B::AbstractMatrix, A::Vector{Int}) _check_reducedims(B, A) vtsample!(B, A, firstindex(B, 2):size(B, 2)) end function vtsample!(B::AbstractMatrix{S}, A::Vector{Int}, 𝒥::UnitRange{Int}) where {S<:Real} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 n = length(A) K, V = marsaglia(fill(inv(n), n)) @inbounds for j ∈ 𝒥 u = rand() j′ = floor(Int, muladd(u, n, 1)) c = u < V[j′] ? j′ : K[j′] B[A[c], j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, start:h) vtsample!(B, A, (h + 1):stop) end return B end end ################ # General case: dense vectors, the linear index of which indicates the category function vtsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(T) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for ω ∈ a n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) vmarsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[c, j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, keep, default, start:h) vtsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of dense vectors function vtsample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{T}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {T<:AbstractFloat, N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(T) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) ω = A[IA] n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) vmarsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[c, j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, keep, default, start:h) vtsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a dense vector vtsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = vtsample(S, A, n_sim, n_cat, :) vtsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = vtsample(S, A, n_sim, n_cat, :) function vtsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_cat, n_sim) vtsample!(B, A) end function vtsample!(B::AbstractMatrix, A::Vector{<:AbstractFloat}) _check_reducedims(B, A) vtsample!(B, A, firstindex(B, 2):size(B, 2)) end function vtsample!(B::AbstractMatrix{S}, A::Vector{T}, 𝒥::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 ω = A K, V = marsaglia(ω) n = length(K) @inbounds for j ∈ 𝒥 u = rand() j′ = floor(Int, muladd(u, n, 1)) c = u < V[j′] ? j′ : K[j′] B[c, j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, start:h) vtsample!(B, A, (h + 1):stop) end return B end end ################ # General case: sparse vectors, the nzval of which indicates the category function vtsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(Tv) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for sv ∈ a (; n, nzind, nzval) = sv Iₛ, ω = nzind, nzval n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) vmarsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, keep, default, start:h) vtsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of sparse vectors function vtsample!(B::AbstractArray{S, N′}, A::AbstractArray{SparseVector{Tv, Ti}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {Tv<:AbstractFloat, Ti<:Integer, N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C, U = _genstorage_init(Float64, L) K, V, ix, q = _marsaglia_init(Tv) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) sv = A[IA] (; n, nzind, nzval) = sv Iₛ, ω = nzind, nzval n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) vmarsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, keep, default, start:h) vtsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a sparse vector vtsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = vtsample(S, A, n_sim, n_cat, :) vtsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = vtsample(S, A, n_sim, n_cat, :) function vtsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_cat, n_sim) vtsample!(B, A) end function vtsample!(B::AbstractMatrix, A::SparseVector{<:AbstractFloat}) _check_reducedims(B, A) vtsample!(B, A, firstindex(B, 2):size(B, 2)) end function vtsample!(B::AbstractMatrix{S}, A::SparseVector{T}, 𝒥::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 (; n, nzind, nzval) = A Iₛ, ω = nzind, nzval K, V = marsaglia(ω) n = length(K) @inbounds for j ∈ 𝒥 u = rand() j′ = floor(Int, muladd(u, n, 1)) c = u < V[j′] ? j′ : K[j′] B[Iₛ[c], j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn vtsample!(B, A, start:h) vtsample!(B, A, (h + 1):stop) end return B end end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
2975
# # Date created: 2022-07-11 # Author: aradclif # # ############################################################################################ # Some benchmarks of normalization performance #### normalize1 for i = 1:15 for j = -1:1 n = (1 << i) + j p = rand(n) println("normalize1!, n = ", n) @btime normalize1!($p) println("vnormalize1!, n = ", n) @btime vnormalize1!($p) end end w = [.1, .2, .3, .4, 0.0] w = zeros(5) p = similar(w) algorithm3!(p, w, 0.0) w = rand(2^6); w[rand(1:2^6, 10)] .= 0; p = similar(w); u = 0.5 @benchmark algorithm3!($p, $w, $u) @benchmark algorithm3_v2!($p, $w, $u) @benchmark algorithm3_v3!($p, $w, $u) #### Algorithm 3 # As one might expect, @turbo handles tails better than base julia u = 0.5 for i = 1:15 for j = -1:1 n = (1 << i) + j w = rand(n) w[rand(1:n, n >> 1)] .= 0 p = similar(w) println("algorithm3!, n = ", n) @btime algorithm3!($p, $w, $u) println("algorithm3_v2!, n = ", n) @btime algorithm3_v2!($p, $w, $u) println("algorithm3_v3!, n = ", n) @btime algorithm3_v3!($p, $w, $u) println("valgorithm3!, n = ", n) @btime valgorithm3!($p, $w, $u) end end function algorithm3_v3!(p::Vector{S}, w::Vector{T}, u::S) where {S<:AbstractFloat, T<:Real} s = zero(T) z = 0 @inbounds @simd for i ∈ eachindex(p, w) w̃ = w[i] s += w̃ p[i] = w̃ z += w̃ == zero(T) end c = z == 0 ? inv(s) : (one(S) - u) / s u′ = z == length(p) ? inv(z) : u / z @inbounds @simd for i ∈ eachindex(p) pᵢ = p[i] p[i] = ifelse(pᵢ == zero(S), u′, pᵢ * c) end p end function valgorithm3!(p::Vector{S}, w::Vector{T}, u::S) where {S<:AbstractFloat, T<:Real} s = zero(T) z = 0 @turbo for i ∈ eachindex(p, w) w̃ = w[i] s += w̃ p[i] = w̃ z += w̃ == zero(T) end c = z == 0 ? inv(s) : (one(S) - u) / s u′ = z == length(p) ? inv(z) : u / z @turbo for i ∈ eachindex(p) pᵢ = p[i] p[i] = ifelse(pᵢ == zero(S), u′, pᵢ * c) end p end #### Algorithm 2.2 algorithm2_2_quote(3) algorithm2_2_normalize_quote(3) algorithm2_2_normalize1!(p, Is, ws) = normalize1!(algorithm2_2!(p, Is, ws)) N = 2^10 n = 2^7 m = 3 Is = ntuple(_ -> rand(1:N, n), m); ws = ntuple(_ -> rand(N), m); p = zeros(n); @benchmark algorithm2_2!($p, $Is, $ws) @benchmark algorithm2_2_normalize1!($p, $Is, $ws) @benchmark algorithm2_2_normalize!($p, $Is, $ws) @benchmark valgorithm2_2_normalize!($p, $Is, $ws) @timev algorithm2_2_normalize!(p, (Is[1],), (ws[1],)) str = """ @inbounds @simd ivdep for j = eachindex(I_1, I_2, I_3) w′[j] = w_1[I_1[j]] * w_2[I_2[j]] * w_3[I_3[j]] end """ e = Meta.parse(str) Meta.show_sexpr(e) eq = :(@inbounds @simd ivdep for j = eachindex(I_1, I_2, I_3) w′[j] = w_1[I_1[j]] * w_2[I_2[j]] * w_3[I_3[j]] end) Meta.show_sexpr(eq)
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
11500
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # A ∈ 𝔻ᴰ¹ˣᴰ²ˣᴰ³ˣ⋯ ; eltype(A) = Vector{Tuple{Vector{Int}, Vector{<:AbstractFloat}}} # (Iₛ, ω) OR (Iₛ, Σω) # Each sampling routine is identical: unpack the tuple, draw c ~ Categorical(ω) and # obtain the real category as Iₛ[c]. # This enables an encapsulation of all PVG-induced variability, hence, a consistent # interface for the sampler. # Technically, `sample` only needs to know ndims(A), not necessarily the element type. # The appropriate dispatch on element type is necessary for `sample!` # `sample` could instead use # A::AbstractArray{U, N} where {U<:Union{Vector{Tuple{Vector{Int}, Vector{T}}}, Tuple{Vector{Int}, Vector{T}}}} where {T<:AbstractFloat} # Actually, this should ideally support: # array of array of (sparse) vector # A::AbstractArray{T, N} where {N} where {T<:AbstractArray{S, M}} where {M} where {S<:Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}}} # array of (sparse) vector # A::AbstractArray{T, N} where {N} where {T<:Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}}} # (sparse) vector # A::Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}} # The bare minimum for `sample` interface-- covers all 4 other definitions. sample(::Type{S}, A, n_sim, n_cat; dims=:) where {S} = sample(S, A, n_sim, n_cat, dims) sample(::Type{S}, A, n_sim; dims=:) where {S} = sample(S, A, n_sim, num_cat(A), dims) sample(::Type{S}, A, n_sim::Int, n_cat::Int, dims::Int) where {S} = sample(S, A, n_sim, n_cat, (dims,)) function sample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T, N} Dᴬ = size(A) Dᴮ = tuple(n_cat, n_sim, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = fill!(similar(A, S, Dᴮ), zero(S)) sample!(B, A) end function sample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T, N} B = fill!(similar(A, S, (n_cat, n_sim)), zero(S)) sample!(B, A) end # The expected case: vectors of sparse vectors (as their bare components) function sample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C = Vector{Int}(undef, size(B, 2)) U = Vector{Float64}(undef, size(B, 2)) Σω = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, ω) ∈ a resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for j ∈ axes(B, 2) c = C[j] B[Iₛ[c], j, IR] += one(S) end end end B end # A simplification: an array of sparse vectors function sample!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C = Vector{Int}(undef, size(B, 2)) U = Vector{Float64}(undef, size(B, 2)) Σω = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, ω = A[IA] resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for j ∈ axes(B, 2) c = C[j] B[Iₛ[c], j, IR] += one(S) end end B end # The simplest case: a sparse vector sample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = sample(S, A, n_sim, n_cat, :) sample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = sample(S, A, n_sim, n_cat, :) function sample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} where {N} B = zeros(S, n_cat, n_sim) sample!(B, A) end function sample!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{T}}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) Iₛ, ω = A k = length(ω) Σω = cumsum(ω) s₀ = Σω[1] @inbounds for j ∈ axes(B, 2) u = rand() c = 1 s = s₀ while s < u && c < k c += 1 s = Σω[c] end B[Iₛ[c], j] += one(S) end B end ################ # Specialized method for eltype(A)::Vector{Vector{Int}} # or, in other words, where the probability mass on each element is 1 / length(Iₛ) function sample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C = Vector{Int}(undef, size(B, 2)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a rand!(C, Iₛ) for j ∈ axes(B, 2) c = C[j] B[c, j, IR] += one(S) end end end B end # A simplification: an array of sparse vectors function sample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Int}, N}) where {S<:Real, N′} where {N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C = Vector{Int}(undef, size(B, 2)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ = A[IA] rand!(C, Iₛ) for j ∈ axes(B, 2) c = C[j] B[c, j, IR] += one(S) end end B end # The simplest case: a sparse vector function sample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {N} B = zeros(S, n_cat, n_sim) sample!(B, A) end # Oddly, the fastest sampler is non-allocating -- most likely due to # the elimination of store + access instructions associated with using a temporary array. function sample!(B::AbstractMatrix{S}, A::Vector{Int}) where {S<:Real} _check_reducedims(B, A) @inbounds for j ∈ axes(B, 2) c = rand(A) B[c, j] += one(S) end B end ################ # General case: dense vectors, the linear index of which indicates the category function sample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C = Vector{Int}(undef, size(B, 2)) U = Vector{Float64}(undef, size(B, 2)) Σω = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for ω ∈ a resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for j ∈ axes(B, 2) c = C[j] B[c, j, IR] += one(S) end end end B end # A simplification: an array of dense vectors function sample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{T}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C = Vector{Int}(undef, size(B, 2)) U = Vector{Float64}(undef, size(B, 2)) Σω = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) ω = A[IA] resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for j ∈ axes(B, 2) c = C[j] B[c, j, IR] += one(S) end end B end # The simplest case: a dense vector sample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = sample(S, A, n_sim, n_cat, :) sample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = sample(S, A, n_sim, n_cat, :) function sample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} where {N} B = zeros(S, n_cat, n_sim) sample!(B, A) end function sample!(B::AbstractMatrix{S}, A::Vector{T}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) ω = A k = length(ω) Σω = cumsum(ω) s₀ = Σω[1] @inbounds for j ∈ axes(B, 2) u = rand() c = 1 s = s₀ while s < u && c < k c += 1 s = Σω[c] end B[c, j] += one(S) end B end ################ # General case: sparse vectors, the nzval of which indicates the category function sample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C = Vector{Int}(undef, size(B, 2)) U = Vector{Float64}(undef, size(B, 2)) Σω = Vector{Tv}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for sv ∈ a (; n, nzind, nzval) = sv Iₛ, ω = nzind, nzval resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for j ∈ axes(B, 2) c = C[j] B[Iₛ[c], j, IR] += one(S) end end end B end # A simplification: an array of sparse vectors function sample!(B::AbstractArray{S, N′}, A::AbstractArray{SparseVector{Tv, Ti}, N}) where {S<:Real, N′} where {Tv<:AbstractFloat, Ti<:Integer, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C = Vector{Int}(undef, size(B, 2)) U = Vector{Float64}(undef, size(B, 2)) Σω = Vector{Tv}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) sv = A[IA] (; n, nzind, nzval) = sv Iₛ, ω = nzind, nzval resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for j ∈ axes(B, 2) c = C[j] B[Iₛ[c], j, IR] += one(S) end end B end # The simplest case: a sparse vector sample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = sample(S, A, n_sim, n_cat, :) sample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = sample(S, A, n_sim, n_cat, :) function sample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} where {N} B = zeros(S, n_cat, n_sim) sample!(B, A) end function sample!(B::AbstractMatrix{S}, A::SparseVector{T}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) (; n, nzind, nzval) = A Iₛ, ω = nzind, nzval k = length(ω) Σω = cumsum(ω) s₀ = Σω[1] @inbounds for j ∈ axes(B, 2) u = rand() c = 1 s = s₀ while s < u && c < k c += 1 s = Σω[c] end B[Iₛ[c], j] += one(S) end B end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
26414
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # Examples: equal probability mass A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = sample(Int, A, 10, 6, (1,)) B′ = dropdims(B, dims=2) @code_warntype sample!(B, A, (1,)) A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = sample(Int, A, 10, 6, (1,)) B′ = dropdims(B, dims=2) @code_warntype sample!(B, A, (1,)) C = fill(A, 2,3,4); B = sample(Int, C, 10, 6, (1,3)); @code_warntype sample!(B, C, (1,3)) ################ B_1 = sample(Int, A, 1000, 6, (1,)); B_2 = sample_simd(Int, A, 6, 1000); @benchmark sample!($B_1, $A, $(1,)) @benchmark sample_simd!($B_2, $A) # Eventually, at num_categories=10^4, num_samples=10^5, the in-order traversal wins B_3 = sample(Int, C, 1000, 6, (1,2,3)); B_4 = sample_simd(Int, C, 6, 1000); @benchmark sample!($B_3, $C, $(1,2,3)) @benchmark sample_simd!($B_4, $C) using Random A′ = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] A′ = [[1, 1000], [100, 200, 300, 400], [200, 400, 600, 800, 1000, 900]] # A′ = [[1, 10], [10, 20, 30, 40], [20, 40, 60, 80, 100, 900]] D′ = fill(A′, 100,50,50); n_sim = 10^3 @timev B_5 = sample1(Int, D′, n_sim, num_cat(D′), (1,2,3)); @timev B_6 = sample_simd(Int, D′, num_cat(D′), n_sim); @timev B_7 = sample2(Int, D′, n_sim, num_cat(D′), (1,)); @timev B_7_3 = sample3(Int, D′, n_sim, num_cat(D′), (1,)); @benchmark sample!($B_5, $D′, $(1,2,3)) @benchmark sample_simd!($B_6, $D′) @benchmark sample2!($B_7, $D′, $(1,2,3)) n_sim = 10^4 @timev B_8 = tsample1(Int, D′, n_sim, num_cat(D′), (1,2,3)); @timev B_9 = tsample_simd(Int, D′, num_cat(D′), n_sim); @timev B_10 = tsample2(Int, D′, n_sim, num_cat(D′), (1,2,3)); @timev B_11 = tsample3(Int, D′, n_sim, num_cat(D′), (1,2,3)); @timev B_12 = tsample2(Int, D′, 1000, num_cat(D′), (1,)); sum(B_8) == sum(B_9) == sum(B_10) ################################################################ # Examples: unequal probability mass A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] # A = [([1, 1000], [0.3, 0.7]), ([100,200,300,400], [0.2, 0.3, 0.4, 0.1]), ([200, 400, 600, 800, 1000, 900], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] D = fill(A, 100,50,50); @timev B = sample(Int, A, 1000, num_cat(A), (1,)); @timev B′ = sample(Int, A′, 1000, num_cat(A), (1,)); @timev sample!(B, A, (1,)) n_sim = 10^4 dims = (1,2,3) @timev B_1 = sample(Int, D, n_sim, num_cat(D), dims); @timev sample!(B_1, D); @code_warntype sample!(B_1, D) @timev B_2 = sample(Int, D′, n_sim, num_cat(D′), dims); @timev sample!(B_2, D′); @code_warntype sample!(B_2, D′) @timev vsample!(B_2, D′); @timev sample_orderN!(B_2, D′); A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = sample(Int, A, 10, num_cat(A), (1,)) # @timev B_1_4 = sample4(Int, D, n_sim, num_cat(D), dims); # @timev B_2_4 = sample4(Int, D′, n_sim, num_cat(D′), dims); # sum(B_1) == sum(B_2) == sum(B_1_4) == sum(B_2_4) # @timev sum(B_1, dims=2); # @timev sum(B_1_4, dims=1); @timev B_3 = tsample(Int, D, 100000, num_cat(D), (1,2,3)); @timev B_4 = tsample(Int, D′, 100000, num_cat(D), (1,2,3)); function countcategory(A::AbstractArray{T, N}) where {T<:Integer, N} mx = maximum(A) v = zeros(Int, mx) @inbounds @simd for i ∈ eachindex(A) v[A[i]] += 1 end v end #### actual SparseVector using SparseArrays Iₛ, ω = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sv = SparseVector(4, Iₛ, ω) @timev (; n, nzind, nzval) = sv sv1 = SparseVector(2, [1,2], [0.3, 0.7]) sv2 = SparseVector(4, [1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sv3 = SparseVector(6, [1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) A = [sv1, sv2, sv3] D = fill(A, 100,50,50); @timev B = sample(Int, D, 1000, dims=(1,2,3)); @timev sample!(B, D); # nzval must be in order to be a valid SparseVector sv1 = SparseVector(1000, [1, 1000], [0.3, 0.7]) sv2 = SparseVector(400, [100,200,300,400], [0.2, 0.3, 0.4, 0.1]) sv3 = SparseVector(1000, [200, 400, 600, 800, 900, 1000], [0.1, 0.1, 0.1, 0.1,0.5, 0.1]) A = [sv1, sv2, sv3] D = fill(A, 100,50,50); @timev B = sample(Int, D, 1000, dims=(1,2,3)); @timev sample!(B, D); @code_warntype sample!(B, D) ################################################################ # Limiting chunksize of U; single sparse vectors. # Equal probability mass A = [1,2,3,4,5,6] n_sim = 10^3 B = sample(Int, A, n_sim, num_cat(A), (1,)); @benchmark sample!($B, $A) @benchmark sample0!($B, $A) @benchmark sample2!($B, $A) @code_warntype sample!(B, A) @code_warntype sample2!(B, A) sum(B) @timev sample!(B, A) @timev sample2!(B, A) ω = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] Σω = cumsum(ω) # Unequal probability mass A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) # A = ([1,2,3,4,5,6], [0.1, 0.25, 0.05, 0.25,0.15, 0.2]) n_sim = 10^3 B = sample(Int, A, n_sim, num_cat(A), (1,)); @benchmark sample!($B, $A) @benchmark sample0!($B, $A) @benchmark sample2!($B, $A) @code_warntype sample!(B, A) @code_warntype sample2!(B, A) sum(B) @timev sample!(B, A) @timev sample2!(B, A) ################ A = [1,2] B = tsample(Int, A, 10^8); @timev tsample!(B, A); @timev tsample0!(B, A); @timev sample!(B, A); #### limiting chunksize, larger arrays # Z = [[rand(1:1000, 5) for _ = 1:3] for _ = 1:50, _ = 1:50, _ = 1:50, _ = 1:10]; A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] Z = fill(A, 100,50,50); n_sim = 10^4 @timev B = sample(Int, Z, n_sim, dims=(1,2,3)); # The smaller chunksize approach actually performs ≈5-8% worse. @timev sample!(B, Z); @timev sample2!(B, Z); A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] Z = fill(A, 100,50,50); n_sim = 10^4 # The smaller chunksize approach actually performs ≈5-8% worse. -- true again for nonequiprobable @timev B = sample(Int, Z, n_sim, dims=(1,2,3)); @timev sample!(B, Z); @timev vsample!(B, Z); @timev sample_orderN!(B, Z); ################################################################ # Marsaglia square histogram A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) n_sim = 10^3 B = sample(Int, A, n_sim, num_cat(A), (1,)); @benchmark sample!($B, $A) @benchmark sample_mars!($B, $A) @benchmark sample_mars2!($B, $A) @benchmark sample_mars3!($B, $A) @benchmark vsample!($B, $A) #### Marsaglia, larger arrays Z = fill(A, 100,50,50); W = [Z Z Z Z]; n_sim = 10^4; B = sample(Int, Z, n_sim, dims=:); @timev sample!(B, Z); @timev sample_mars!(B, Z); @timev sample_mars2!(B, Z); @timev vsample!(B, Z); @timev vsample!(B, W); E = zeros(Int, reverse(size(B))); @timev sample_mars_dim1!(E, Z); @timev sample_mars_dim1_4!(E, Z); a = [1,2,3,4,5,6] D′ = fill(a, 100,50,50); B = zeros(6, 10000); @benchmark sample_orderN!($B, $a) @benchmark sample!($B, $a) @benchmark vsample!($B, $a) @benchmark sample!($B, $D′) @benchmark vsample!($B, $D′) @timev vtsample!(B, D′); @timev tsample!(B, D′); B′ = zeros(Int, 6, 10^6); @timev vtsample!(B′, D′); @timev tsample!(B′, D′); # Threading with polyester using Polyester using VectorizationBase, Static VectorizationBase.num_cores() = static(48) B2 = zeros(Int, 6, 10^5); @timev vtsample!(B2, D′, 10^4); @timev vtsample!(B2, D′, 2 * 10^4); @timev vtsample!(B′, D′, 2 * 10^4); @timev vtsample!(B2, Z, chunksize=5000) # 1. using Random using LoopVectorization using Polyester using SparseArrays # 2. include's # 3. using VectorizationBase, Static VectorizationBase.num_cores() = static(48) B = zeros(Int, 6, 10^6); v2 = [[1,2,3,4], [1,2,3,4,5,6]] B2 = vtsample(Int, v2, 10^4, chunksize=500) @code_warntype vtsample!(B2, [[.5, .5], [.2, .8]], 500) @timev vtsample(Int, [.5, .5], 10000, chunksize=500) @timev vtsample(Int, [1,2], 10000, chunksize=500) @timev vtsample!(B, D′, chunksize=10000); @timev vtsample!(B, Z, chunksize=10000); @timev vtsample!(B, D′, chunksize=1000); @timev vtsample!(B, Z, chunksize=1000); @timev vtsample!(B, D′, chunksize=100); @timev vtsample!(B, Z, chunksize=100); B .= 0; sum(B, dims=2) sum(length, Z) * 6 * size(B, 2) ################################################################ # Experiment with using a view to enable @turbo use everywhere # Preliminary conclusion: not really much gain/loss in terms of time; # the axes call on the view invokes some unnecessary allocations which can be # alleviated by simply using axes(B, 2) # Ultimately, it is likely not worth it to attempt it, as it is not really SIMD fodder # due to the random nature of the indices; forcing SIMD will very likely # make performance worse for any reasonable number of categories. # Moreover, the random nature of the memory location being written to makes it an unsafe # operation -- silent problems, but corrupted memory nonetheless. ## Addendum # Memory would not be corrupted as each `j`-index is unique. function vsample2!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] Bv = view(B, :, :, IR) for Iₛ ∈ a n = length(Iₛ) vgenerate!(C, U, n) @turbo for j ∈ indices((B, C), (2, 1))#axes(B, 2)#indices((Bv, C), (2, 1)) # axes(Bv, 2) c = C[j] Bv[Iₛ[c], j] += one(S) end end end B end function vsample2!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Int}, N}) where {S<:Real, N′} where {N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ = A[IA] Bv = view(B, :, :, IR) n = length(Iₛ) vgenerate!(C, U, n) @turbo for j ∈ axes(B, 2) c = C[j] Bv[Iₛ[c], j] += one(S) end end B end A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]; D = fill(A, 10,10,10); B = vsample(Int, D, 10000, dims=(1,3)); @benchmark vsample!($B, $D) @benchmark vsample2!($B, $D) @benchmark vsample3!($B, $D) vsample2!(B, D); @timev vsample!(B, Z); @timev vsample2!(B, Z); # OB = OffsetArray(B, 1:6, 0:9999, 1:1, 1:1, 1:1); vsample3!(OB, D); OB2 = OffsetArray(B, 1:6, -5000:4999, 1:1, 1:1, 1:1); vsample3!(OB2, D) OB3 = OffsetArray(B, 1:6, 0:9999, 0:0, 0:0, 2:2); vsample3!(OB3, D) # much worse with @simd ivdep function vsample3!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a n = length(Iₛ) vgenerate!(C, U, n) for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[Iₛ[c], j, IR] += one(S) end end end B end function vsample3!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Int}, N}) where {S<:Real, N′} where {N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ = A[IA] n = length(Iₛ) vgenerate!(C, U, n) @simd ivdep for j ∈ eachindex(axes(B, 2), C) c = C[j] B[Iₛ[c], j, IR] += one(S) end end B end # Oddly, the fastest vsampler is non-allocating -- most likely due to # the elimination of store + access instructions associated with using a temporary array. function vsample2!(B::AbstractMatrix{S}, Iₛ::Vector{Int}) where {S<:Real} _check_reducedims(B, Iₛ) n = length(Iₛ) # C = vgenerate(n, size(B, 2)) @inbounds for j ∈ axes(B, 2) c = generate(n) B[Iₛ[c], j] += one(S) end B end function vsample3!(B::AbstractMatrix{S}, Iₛ::Vector{Int}) where {S<:Real} _check_reducedims(B, Iₛ) n = length(Iₛ) # C = vgenerate(n, size(B, 2)) @inbounds @simd ivdep for j ∈ axes(B, 2) c = generate(n) B[Iₛ[c], j] += one(S) end B end ################ # Dimension experiments #### Preliminary conclusion # As expected, when the number of categories is small, i.e. 𝒪(1), simulation index being # on the first dimension presents considerable advantages: ≈ 4x faster than by it being on # the second dimension. However, as the number of categories increases to 𝒪(10) (and beyond), # there is no difference in performance. Intuitively, this makes sense, as in the case of # 𝒪(1) categories the instruction pipelining would be able to recognize that the same # few chunks of memory are needed; even if it cannot figure it out, the same couple # pieces of memory would always be in the cache simply on the basis of demand. # as the number of categories increases, instruction pipelining would assuredly break down, # and, regardless, there would be more variability in the memory demand. Consequently, # this makes it increasingly unlikely that all require memory might be in the cache # at any given time. This implies a scaling with cache size -- if cache permits it, # then simulation index on first dimension will be superior. If not, then it's a wash. # Notably, for most practical applications, number of categories is probably sufficient to # make it a wash. Nonetheless, it is interesting to verify the theoretical prediction # of superior performance when placing simulation index on first dimension. #### A second look -- 1ˢᵗ dimension: simulation index, 2ⁿᵈ dimension: category index ## 1. # There are advantages to placing simulations on the 1ˢᵗ dimension when considering # scalar reductions, e.g. `+`, `max`, `min`; `mean`, `var` (variations on `mapreducethen`). # However, if manipulating a whole simulation, then it is advantageous to place simulations # on the 2ⁿᵈ dimension. Whole simulation transformations are more rare than scalar # transformations/reductions. Notably, non-scalar transformations/reductions are almost # inevitably bespoke affairs due to the difficult nature of a general programming model # for non-scalar operations (`mapslices` exists, but `mapsubslices` does not). # Thus, most users would invoke scalar operations, and for non-scalar, write their own. # It seems important to have the scalar reductions be fast by default, rather than # have the non-scalar be fast but rarely used. # ∴ If non-scalar transformations/reductions are desired, the burden is on the user # to implement them efficiently, either via permuting the dimensions, or some other scheme. # -- Another option is to provide both methods, as constructing the transpose # (1ˢᵗ: category, 2ⁿᵈ: simulation) directly may be useful. -- ## 2. # Placing simulation index on the 1ˢᵗ dimension and category index on the 2ⁿᵈ dimension # also enables performance gains during sampling, if the data permit. # Fundamentally, the performance gain during sampling with 1ˢᵗ: simulation, 2ⁿᵈ: category # occurs due to the fact that for a subset of categories, fewer memory blocks are needed # in the cache to complete 1,…,N simulations. Conversely, 1ˢᵗ: category, 2ⁿᵈ: simulation # guarantees that at least one block must be loaded for each simulation -- a full # traversal, with the block loaded on each simulation random by definition. # What if the number of categories is large? (under 1ˢᵗ: sim, 2ⁿᵈ: cat) # - In the worse case, all categories are involved, and one is randomly loading # columns -- an equivalent situation to the default under 1ˢᵗ: cat, 2ⁿᵈ: sim. # - Even if the number of categories being sampled is large, if it is some subset # of the total, then less than a full traversal occurs. # ∴ any time n_cat < N_cat, it is better to have simulation index on the 1ˢᵗ dimension. ## # Taken together, these two aspects provide strong motivation for 1ˢᵗ: sim, 2ⁿᵈ: cat. ################ function vsample2_dim1!(B::AbstractMatrix{S}, Iₛ::Vector{Int}) where {S<:Real} _check_reducedims(B, Iₛ) n = length(Iₛ) @inbounds for i ∈ axes(B, 1) c = generate(n) B[i, Iₛ[c]] += one(S) end B end function vsample3_dim1!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a n = length(Iₛ) vgenerate!(C, U, n) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end end B end function vsample3_dim1!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, p) ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end end B end vtsample_dim1!(B, A; chunksize::Int=5000) = vtsample_dim1!(B, A, chunksize) function vtsample_dim1!(B, A, chunksize::Int) _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) rs = splitranges(firstindex(B, 1):lastindex(B, 1), chunksize) @batch for r in rs _vsample_chunk_dim1!(B, A, keep, default, r) end return B end function _vsample_chunk_dim1!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, p) ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end end return B end n_cat = 10^3 D = [[rand(1:n_cat, 2), rand(1:n_cat, 4), rand(1:n_cat, 6)] for _ = 1:10, _ = 1:10, _ = 1:10]; E = [[(rand(1:n_cat, 2), normalize1!(rand(2))), (rand(1:n_cat, 4), normalize1!(rand(4))), (rand(1:n_cat, 6), normalize1!(rand(4)))] for _ = 1:10, _ = 1:10, _ = 1:10]; E2 = [[(rand(1:n_cat, 20), normalize1!(rand(20))), (rand(1:n_cat, 40), normalize1!(rand(40))), (rand(1:n_cat, 60), normalize1!(rand(40)))] for _ = 1:10, _ = 1:10, _ = 1:10]; # # Equal sized output. # B = zeros(Int, n_cat,n_cat,1,1,1); # @benchmark vsample!($B, $D) # @benchmark vsample3_dim1!($B, $D) # @benchmark vsample!($B, $E) # @benchmark vsample3_dim1!($B, $E) # @benchmark vsample!($B, $E2) # @benchmark vsample3_dim1!($B, $E2) # appropriately sized n_sim = 10^3 B_dim2 = zeros(Int, n_cat, n_sim); B_dim1 = zeros(Int, n_sim, n_cat); @benchmark vsample!($B_dim2, $D) @benchmark vsample3_dim1!($B_dim1, $D) @benchmark vsample!($B_dim2, $E) @benchmark vsample3_dim1!($B_dim1, $E) @benchmark vsample!($B_dim2, $E2) @benchmark vsample3_dim1!($B_dim1, $E2) # larger simulation number n_sim = 10^4 B_dim2 = zeros(Int, n_cat, n_sim); B_dim1 = zeros(Int, n_sim, n_cat); @benchmark vsample!($B_dim2, $D) @benchmark vsample3_dim1!($B_dim1, $D) @benchmark vsample!($B_dim2, $E) @benchmark vsample3_dim1!($B_dim1, $E) @benchmark vsample!($B_dim2, $E2) @benchmark vsample3_dim1!($B_dim1, $E2) # artificially inflated number of categories n_cat = 10^4 B_dim2 = zeros(Int, n_cat, n_sim); B_dim1 = zeros(Int, n_sim, n_cat); @benchmark vsample!($B_dim2, $D) @benchmark vsample3_dim1!($B_dim1, $D) @benchmark vsample!($B_dim2, $E) @benchmark vsample3_dim1!($B_dim1, $E) @benchmark vsample!($B_dim2, $E2) @benchmark vsample3_dim1!($B_dim1, $E2) # large difference in tile size n_cat, n_sim = 10^3, 10^5 B_dim2 = zeros(Int, n_cat, n_sim); B_dim1 = zeros(Int, n_sim, n_cat); @benchmark vsample!($B_dim2, $D) @benchmark vsample3_dim1!($B_dim1, $D) @benchmark vsample!($B_dim2, $E) @benchmark vsample3_dim1!($B_dim1, $E) @benchmark vsample!($B_dim2, $E2) @benchmark vsample3_dim1!($B_dim1, $E2) # no reduction n_cat, n_sim = 10^3, 10^3 B_dim2 = zeros(Int, n_cat, n_sim, 10, 10, 10); B_dim1 = zeros(Int, n_sim, n_cat, 10, 10, 10); @benchmark vsample!($B_dim2, $D) @benchmark vsample3_dim1!($B_dim1, $D) @benchmark vsample!($B_dim2, $E) @benchmark vsample3_dim1!($B_dim1, $E) @benchmark vsample!($B_dim2, $E2) @benchmark vsample3_dim1!($B_dim1, $E2) A₀ = 𝓃𝓂Is a₀ = A₀[findfirst(!isempty, A₀)] a = map(Is -> algorithm2_2(Is, (𝐰ₐ′, F)), a₀) 𝑓(Is, ws) = Is[1], algorithm2_2(Is, ws) 𝑓(Is) = 𝑓(Is, (𝐰ₐ′, F)) map(𝑓, a₀) 𝑚𝑓(x) = isempty(x) ? Vector{Tuple{Vector{Int}, Vector{Float64}}}() : map(𝑓, x) A = map(𝑚𝑓, A₀); # simulation index on dim2 @timev B = vsample(Int, A, 10^3, dims=:); @timev vsample!(B, A); @timev Bt = vtsample(Int, A, 10^5, dims=:); @timev vtsample!(Bt, A); # simulation index on dim1 B_dim1 = zeros(Int, 10^3, 13797); @timev vsample3_dim1!(B_dim1, A); Bt_dim1 = zeros(Int, 10^5, 13797); @timev vtsample_dim1!(Bt_dim1, A); # partial reduction B = vsample(Int, A, 10^2, dims=(2,3,4,5)); # threading examples n_cat, n_sim = 10^3, 10^5 B_dim2 = zeros(Int, n_cat, n_sim, 1, 1, 1); B_dim1 = zeros(Int, n_sim, n_cat, 1, 1, 1); @benchmark vtsample!($B_dim2, $D, chunksize=10000) @benchmark vtsample_dim1!($B_dim1, $D, chunksize=10000) @benchmark vtsample!($B_dim2, $E, chunksize=10000) @benchmark vtsample_dim1!($B_dim1, $E, chunksize=10000) @benchmark vtsample!($B_dim2, $E2, chunksize=10000) @benchmark vtsample_dim1!($B_dim1, $E2, chunksize=10000) @benchmark vtsample!($B_dim2, $D, chunksize=5000) @benchmark vtsample_dim1!($B_dim1, $D, chunksize=5000) @benchmark vtsample!($B_dim2, $E, chunksize=5000) @benchmark vtsample_dim1!($B_dim1, $E, chunksize=5000) @benchmark vtsample!($B_dim2, $E2, chunksize=5000) @benchmark vtsample_dim1!($B_dim1, $E2, chunksize=5000) n_cat, n_sim = 10^4, 10^5 B_dim2 = zeros(Int, n_cat, n_sim, 1, 1, 1); B_dim1 = zeros(Int, n_sim, n_cat, 1, 1, 1); # @benchmark vtsample!($B_dim2, $D, chunksize=10000) # @benchmark vtsample_dim1!($B_dim1, $D, chunksize=10000) @benchmark vtsample!($B_dim2, $E, chunksize=10000) @benchmark vtsample_dim1!($B_dim1, $E, chunksize=10000) @benchmark vtsample!($B_dim2, $E2, chunksize=10000) @benchmark vtsample_dim1!($B_dim1, $E2, chunksize=10000) # @benchmark vtsample!($B_dim2, $D, chunksize=5000) # @benchmark vtsample_dim1!($B_dim1, $D, chunksize=5000) @benchmark vtsample!($B_dim2, $E, chunksize=5000) @benchmark vtsample_dim1!($B_dim1, $E, chunksize=5000) @benchmark vtsample!($B_dim2, $E2, chunksize=5000) @benchmark vtsample_dim1!($B_dim1, $E2, chunksize=5000) # @turbo on the additions: small speed gain, but more temporaries -- probably not worth # it given the potential problems with non-unit strides when working across multiple # dimensions. B = B_dim2; keep, default = Broadcast.shapeindexer(axes(B)[3:end]) rs = splitranges(firstindex(B, 2):lastindex(B, 2), 10000); 𝒥 = rs[1] B .= 0; @benchmark _vsample_chunk!($B_dim2, $E, $keep, $default, $𝒥) @benchmark _vsample_chunk2!($B_dim2, $E, $keep, $default, $𝒥) @benchmark _vsample_chunk!($B_dim2, $E2, $keep, $default, $𝒥) @benchmark _vsample_chunk2!($B_dim2, $E2, $keep, $default, $𝒥) @benchmark _vsample_chunk_dim1!($B_dim1, $E, $keep, $default, $𝒥) @benchmark _vsample_chunk_dim1_2!($B_dim1, $E, $keep, $default, $𝒥) @benchmark _vsample_chunk_dim1!($B_dim1, $E2, $keep, $default, $𝒥) @benchmark _vsample_chunk_dim1_2!($B_dim1, $E2, $keep, $default, $𝒥) function _vsample_chunk2!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} C, U = _genstorage_init(Float64, length(𝒥)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] Bv = view(B, :, 𝒥, IR) for (Iₛ, p) ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) @turbo for j ∈ indices((Bv, C), (2, 1)) c = C[j] Bv[Iₛ[c], j] += one(S) end end end return B end function _vsample_chunk_dim1_2!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] Bv = view(B, ℐ, :, IR) for (Iₛ, p) ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) @turbo for i ∈ indices((Bv, C), (1, 1)) c = C[i] Bv[i, Iₛ[c]] += one(S) end end end return B end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
29176
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ function sample1(::Type{S}, A::AbstractArray{Vector{Tuple{Vector{Int}, Vector{T}}}, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T<:AbstractFloat, N} Dᴬ = size(A) Dᴮ = tuple(n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))..., n_sim) B = similar(A, S, Dᴮ) fill!(B, zero(S)) sample1!(B, A, dims) end function sample1!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Tuple{Vector{Int}, Vector{T}}}, N}, dims::NTuple{P, Int}) where {S<:Real, N′} where {P} where {T<:AbstractFloat, N} keep = ntuple(d -> d ∉ dims, Val(N)) default = ntuple(d -> firstindex(A, d), Val(N)) for j ∈ axes(B, N′) for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, ω) ∈ a c = categorical(ω) B[Iₛ[c], IR, j] += one(S) end end end B end function sample1(::Type{S}, A::AbstractArray{Vector{Vector{Int}}, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {N} Dᴬ = size(A) Dᴮ = tuple(n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))..., n_sim) B = similar(A, S, Dᴮ) fill!(B, zero(S)) sample1!(B, A, dims) end function sample1!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Vector{Int}}, N}, dims::NTuple{P, Int}) where {S<:Real, N′} where {P} where {N} keep = ntuple(d -> d ∉ dims, Val(N)) default = ntuple(d -> firstindex(A, d), Val(N)) for j ∈ axes(B, N′) for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a c = rand(Iₛ) B[c, IR, j] += one(S) end end end B end function tsample1(::Type{S}, A::AbstractArray{Vector{Vector{Int}}, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {N} Dᴬ = size(A) Dᴮ = tuple(n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))..., n_sim) B = similar(A, S, Dᴮ) fill!(B, zero(S)) tsample!(B, A, dims) end function tsample1!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Vector{Int}}, N}, dims::NTuple{P, Int}) where {S<:Real, N′} where {P} where {N} keep = ntuple(d -> d ∉ dims, Val(N)) default = ntuple(d -> firstindex(A, d), Val(N)) Threads.@threads for j ∈ axes(B, N′) for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a c = rand(Iₛ) B[c, IR, j] += one(S) end end end B end # As the input array becomes large, SIMD PRNG sampling tends to be better # due to the fact that each element of A is accessed only once. # -- There is always the option of sampling across the j-indices of B # and placing dimensions of A on the 3rd...end positions. # If annotated with @inbounds and @simd, this is as fast (or faster) than # the simple `sample_simd` approach. function sample2(::Type{S}, A::AbstractArray{Vector{Tuple{Vector{Int}, Vector{T}}}, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T<:AbstractFloat, N} Dᴬ = size(A) Dᴮ = tuple(n_cat, n_sim, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = similar(A, S, Dᴮ) fill!(B, zero(S)) sample2!(B, A, dims) end function sample2!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Tuple{Vector{Int}, Vector{T}}}, N}, dims::NTuple{P, Int}) where {S<:Real, N′} where {P} where {T<:AbstractFloat, N} keep = ntuple(d -> d ∉ dims, Val(N)) default = ntuple(d -> firstindex(A, d), Val(N)) C = Vector{Int}(undef, size(B, 2)) U = Vector{Float64}(undef, size(B, 2)) Σp = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, ω) ∈ a resize!(Σp, length(ω)) cumsum!(Σp, ω) categorical!(C, U, Σp) @simd for j ∈ axes(B, 2) # ArrayInterface.indices((B, C), (2, 1)) c = C[j] B[Iₛ[c], j, IR] += one(S) end end end B end function sample2(::Type{S}, A::AbstractArray{Vector{Vector{Int}}, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {N} Dᴬ = size(A) Dᴮ = tuple(n_cat, n_sim, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = similar(A, S, Dᴮ) fill!(B, zero(S)) sample2!(B, A, dims) end function sample2!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Vector{Int}}, N}, dims::NTuple{P, Int}) where {S<:Real, N′} where {P} where {N} keep = ntuple(d -> d ∉ dims, Val(N)) default = ntuple(d -> firstindex(A, d), Val(N)) C = Vector{Int}(undef, size(B, 2)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a rand!(C, Iₛ) @simd for j ∈ axes(B, 2) c = C[j] B[c, j, IR] += one(S) end end end B end function tsample2(::Type{S}, A::AbstractArray{Vector{Vector{Int}}, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {N} Dᴬ = size(A) Dᴮ = tuple(n_cat, n_sim, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = similar(A, S, Dᴮ) fill!(B, zero(S)) tsample2!(B, A, dims) end function tsample2!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Vector{Int}}, N}, dims::NTuple{P, Int}) where {S<:Real, N′} where {P} where {N} keep = ntuple(d -> d ∉ dims, Val(N)) default = ntuple(d -> firstindex(A, d), Val(N)) tsample2!(B, A, keep, default, firstindex(B, 2):size(B, 2)) end function tsample2!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Vector{Int}}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C = Vector{Int}(undef, L) # similar(𝒥, Int) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a rand!(C, Iₛ) @simd for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[c, j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample2!(B, A, keep, default, start:h) tsample2!(B, A, keep, default, (h + 1):stop) end return B end end function sample3(::Type{S}, A::AbstractArray{Vector{Vector{Int}}, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {N} Dᴬ = size(A) Dᴮ = tuple(n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))..., n_sim) B = similar(A, S, Dᴮ) fill!(B, zero(S)) sample3!(B, A, dims) end function sample3!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Vector{Int}}, N}, dims::NTuple{P, Int}) where {S<:Real, N′} where {P} where {N} keep = ntuple(d -> d ∉ dims, Val(N)) default = ntuple(d -> firstindex(A, d), Val(N)) C = Vector{Int}(undef, size(B, N′)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a rand!(C, Iₛ) @simd for j ∈ axes(B, N′) c = C[j] B[c, IR, j] += one(S) end end end B end function tsample3(::Type{S}, A::AbstractArray{Vector{Vector{Int}}, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {N} Dᴬ = size(A) Dᴮ = tuple(n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))..., n_sim) B = similar(A, S, Dᴮ) fill!(B, zero(S)) tsample3!(B, A, dims) end function tsample3!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Vector{Int}}, N}, dims::NTuple{P, Int}) where {S<:Real, N′} where {P} where {N} keep = ntuple(d -> d ∉ dims, Val(N)) default = ntuple(d -> firstindex(A, d), Val(N)) tsample3!(B, A, keep, default, firstindex(B, N′):size(B, N′)) end function tsample3!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Vector{Int}}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C = Vector{Int}(undef, L) # similar(𝒥, Int) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a rand!(C, Iₛ) @simd for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[c, IR, j] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample3!(B, A, keep, default, start:h) tsample3!(B, A, keep, default, (h + 1):stop) end return B end end ################ # Sampler which has simulation index on first dimension, categories on second dimension # Follows sample2's convention otherwise # Alas, this does not make much difference. # While only a few categories would potentially occur on each rand! + increment # (the innermost body + loop), these categories are random, hence, the instruction pipeline # is most likely unable to do useful prediction. Finding the appropriate columns # myself would not be very useful either, as this would require a view (which costs) # in addition to other costs. function sample4(::Type{S}, A::AbstractArray{Vector{Tuple{Vector{Int}, Vector{T}}}, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T<:AbstractFloat, N} Dᴬ = size(A) Dᴮ = tuple(n_sim, n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = similar(A, S, Dᴮ) fill!(B, zero(S)) sample4!(B, A, dims) end function sample4!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Tuple{Vector{Int}, Vector{T}}}, N}, dims::NTuple{P, Int}) where {S<:Real, N′} where {P} where {T<:AbstractFloat, N} keep = ntuple(d -> d ∉ dims, Val(N)) default = ntuple(d -> firstindex(A, d), Val(N)) C = Vector{Int}(undef, size(B, 1)) U = Vector{Float64}(undef, size(B, 1)) Σp = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, ω) ∈ a resize!(Σp, length(ω)) cumsum!(Σp, ω) categorical!(C, U, Σp) @simd for j ∈ axes(B, 1) # ArrayInterface.indices((B, C), (2, 1)) c = C[j] B[j, Iₛ[c], IR] += one(S) end end end B end function sample4(::Type{S}, A::AbstractArray{Vector{Vector{Int}}, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {N} Dᴬ = size(A) Dᴮ = tuple(n_sim, n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = similar(A, S, Dᴮ) fill!(B, zero(S)) sample4!(B, A, dims) end function sample4!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Vector{Int}}, N}, dims::NTuple{P, Int}) where {S<:Real, N′} where {P} where {N} keep = ntuple(d -> d ∉ dims, Val(N)) default = ntuple(d -> firstindex(A, d), Val(N)) C = Vector{Int}(undef, size(B, 1)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a rand!(C, Iₛ) @simd for j ∈ axes(B, 1) c = C[j] B[j, c, IR] += one(S) end end end B end ################ # Non-allocating versions # Oddly, the performance of the non-allocating variants differs on the # array of vector / array of array of vector cases (compared to the single sparse vector case). # As the arrays become large, O(10^4), for the Vector{Int} cases, # the temporary array is faster by 20-25%. For the Tuple{Vector{Int}, Vector{<:AbstractFloat}} # case, the temporary array is 10% slower. # This needs more extensive benchmarking to determine which is optimal -- the answer # very likely depends on the scale of the problem (n_sim and n_cat) and very likely # the distributions of probability mass (more uniform being worse?). function sample0!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Tuple{Vector{Int}, Vector{T}}}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) Σω = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, ω) ∈ a resize!(Σω, length(ω)) cumsum!(Σω, ω) for j ∈ axes(B, 2) c = rand_invcdf(Σω) B[Iₛ[c], j, IR] += one(S) end end end B end function sample0!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) Σω = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, ω = A[IA] resize!(Σω, length(ω)) cumsum!(Σω, ω) for j ∈ axes(B, 2) c = rand_invcdf(Σω) B[Iₛ[c], j, IR] += one(S) end end B end function sample0!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Vector{Int}}, N}) where {S<:Real, N′} where {N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a for j ∈ axes(B, 2) c = rand(Iₛ) B[c, j, IR] += one(S) end end end B end ################ # Limiting chunksize # The smaller chunksize approach actually performs ≈5-8% worse for both # the equiprobable and nonequiprobable cases. function sample2!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Tuple{Vector{Int}, Vector{T}}}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) Σω = Vector{T}() q, r = divrem(size(B, 2), 1024) if q == 0 C = Vector{Int}(undef, r) U = Vector{Float64}(undef, r) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, ω) ∈ a resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for j ∈ axes(B, 2) c = C[j] B[Iₛ[c], j, IR] += one(S) end end end else C = Vector{Int}(undef, 1024) U = Vector{Float64}(undef, 1024) ax = axes(B, 2) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, ω) ∈ a resize!(C, 1024) resize!(U, 1024) resize!(Σω, length(ω)) cumsum!(Σω, ω) J = iterate(ax) for _ = 1:q categorical!(C, U, Σω) for c ∈ C j, js = J B[Iₛ[c], j, IR] += one(S) J = iterate(ax, j) end end if r != 0 resize!(C, r) resize!(U, r) categorical!(C, U, Σω) for c ∈ C j, js = J B[Iₛ[c], j, IR] += one(S) J = iterate(ax, j) end end end end end B end function sample2!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Vector{Int}}, N}) where {S<:Real, N′} where {N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) q, r = divrem(size(B, 2), 1024) if q == 0 C = Vector{Int}(undef, r) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a rand!(C, Iₛ) for j ∈ axes(B, 2) c = C[j] B[c, j, IR] += one(S) end end end else C = Vector{Int}(undef, 1024) ax = axes(B, 2) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a resize!(C, 1024) J = iterate(ax) for _ = 1:q rand!(C, Iₛ) for c ∈ C j, js = J B[c, j, IR] += one(S) J = iterate(ax, j) end end if r != 0 resize!(C, r) rand!(C, Iₛ) for c ∈ C j, js = J B[c, j, IR] += one(S) J = iterate(ax, j) end end end end end B end @inline function _unsafe_sample!(B::AbstractArray{S}, Iₛ, Σω, U, ax, J, k, s₀) where {S<:Real} @inbounds for i ∈ eachindex(U) j, js = J u = U[i] c = 1 s = s₀ while s < u && c < k c += 1 s = Σω[c] end B[Iₛ[c], j] += one(S) J = iterate(ax, j) end J end # limiting the chunksize of U # Other than saving on the memory allocation, this is equivalent speed to the simpler method. function sample2!(B::AbstractArray{S, N′}, A::Tuple{Vector{Int}, Vector{T}}) where {S<:Real, N′} where {T<:AbstractFloat} Iₛ, ω = A Σω = cumsum(ω) k = length(ω) s₀ = Σω[1] q, r = divrem(size(B, 2), 1024) if q == 0 U = rand(r) @inbounds for j ∈ axes(B, 2) u = U[j] c = 1 s = s₀ while s < u && c < k c += 1 s = Σω[c] end B[Iₛ[c], j] += one(S) end else U = Vector{Float64}(undef, 1024) ax = axes(B, 2) J = iterate(ax) for _ = 1:q rand!(U) J = _unsafe_sample!(B, Iₛ, Σω, U, ax, J, k, s₀) end if r != 0 resize!(U, r) rand!(U) _unsafe_sample!(B, Iₛ, Σω, U, ax, J, k, s₀) end end B end ################ # Marsaglia square histogram method # The expected case: vectors of sparse vectors (as their bare components) function sample_mars!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C = Vector{Int}(undef, size(B, 2)) K, V = Vector{Int}(), Vector{T}() ix, q = Vector{Int}(), Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, ω) ∈ a n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) marsaglia_generate!(C, K, V) for j ∈ axes(B, 2) c = C[j] B[Iₛ[c], j, IR] += one(S) end end end B end # A simplification: an array of sparse vectors function sample_mars!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C = Vector{Int}(undef, size(B, 2)) K, V = Vector{Int}(), Vector{T}() ix, q = Vector{Int}(), Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, ω = A[IA] n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) marsaglia_generate!(C, K, V) for j ∈ axes(B, 2) c = C[j] B[Iₛ[c], j, IR] += one(S) end end B end function sample_mars_dim1_4!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C = Vector{Int}(undef, size(B, 1)) U = Vector{Float64}(undef, size(B, 1)) K, V = Vector{Int}(), Vector{T}() ix, q = Vector{Int}(), Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, ω = A[IA] n = length(ω) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) marsaglia!(K, V, q, ix, ω) marsaglia_generate5!(C, U, K, V) for j ∈ axes(B, 1) c = C[j] B[j, Iₛ[c], IR] += one(S) end # @inbounds for j ∈ axes(B, 1) # u = rand() # j′ = floor(Int, muladd(u, n, 1)) # c = u < V[j′] ? j′ : K[j′] # B[j, Iₛ[c], IR] += one(S) # end end B end # The simplest case: a sparse vector sample_mars(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = sample_mars(S, A, n_sim, n_cat, :) sample_mars(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = sample_mars(S, A, n_sim, n_cat, :) function sample_mars(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} where {N} B = zeros(S, n_cat, n_sim) sample_mars!(B, A) end function sample_mars!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{T}}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) Iₛ, ω = A # k = length(ω) # Σω = cumsum(ω) # s₀ = Σω[1] K, V = marsaglia(ω) n = length(K) @inbounds for j ∈ axes(B, 2) u = rand() j′ = floor(Int, muladd(u, n, 1)) c = u < V[j′] ? j′ : K[j′] B[Iₛ[c], j] += one(S) end # C = Vector{Int}(undef, size(B, 2)) # # marsaglia_generate!(C, K, V) # marsaglia_generate4!(C, K, V) # @inbounds for j ∈ eachindex(axes(B, 2), C) # B[Iₛ[C[j]], j] += one(S) # end B end #### Threading experiments """ splitranges(start, stop, chunksize) Divide the range `start:stop` into segments, each of size `chunksize`. The last segment will contain the remainder, `(start - stop + 1) % chunksize`, if it exists. """ function splitranges(start::Int, stop::Int, Lc::Int) L = stop - start + 1 n, r = divrem(L, Lc) ranges = Vector{UnitRange{Int}}(undef, r == 0 ? n : n + 1) l = start @inbounds for i = 1:n l′ = l l += Lc ranges[i] = l′:(l - 1) end if r != 0 @inbounds ranges[n + 1] = (stop - r + 1):stop end return ranges end """ splitranges(ur::UnitRange{Int}, chunksize) Divide the range `ur` into segments, each of size `chunksize`. """ splitranges(ur::UnitRange{Int}, Lc::Int) = splitranges(ur.start, ur.stop, Lc) function _vtsample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} L = length(𝒥) C = Vector{Int}(undef, L) U = Vector{Float64}(undef, L) K, V = Vector{Int}(), Vector{Float64}() ix, q = Vector{Int}(), Vector{Float64}() ω = Vector{Float64}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a n = length(Iₛ) resize!(K, n); resize!(V, n); resize!(ix, n); resize!(q, n) resize!(ω, n) fill!(ω, inv(n)) marsaglia!(K, V, q, ix, ω) vmarsaglia_generate!(C, U, K, V) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end end return B end function vtsample_poly!(B, A, sz::Int) _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) rs = splitranges(firstindex(B, 2):size(B, 2), sz) @batch for r in rs _vtsample_chunk!(B, A, keep, default, r) end B end function vtsample!(B, A; tiling=:batch, chunksize=5000) tiling === :batch || tiling === :recursive || throw(ArgumentError(lazy"tiling must be either :batch or :recursive, got $tiling")) if tiling !== :batch vtsample!(B, A) else vtsample_poly!(B, A, chunksize) end B end ################ # Reference sampler which is as simple as possible. function sample_simd!(B::Matrix{T}, A::Vector{Vector{Int}}) where {T<:Real} c = Vector{Int}(undef, size(B, 2)) @inbounds for m ∈ eachindex(A) Iₛ = A[m] if length(Iₛ) == 1 @inbounds i = Iₛ[1] @inbounds @simd ivdep for j ∈ axes(B, 2) B[i, j] += one(T) end else rand!(c, Iₛ) @inbounds @simd for j ∈ axes(B, 2) i = c[j] B[i, j] += one(T) end end end return B end sample_simd(::Type{T}, A::Vector{Vector{Int}}, I::Int, J::Int) where {T<:Real} = sample_simd!(zeros(T, I, J), A) function sample_simd!(B::Matrix{T}, A::Array{Vector{Vector{Int}}, N}) where {T<:Real} where {N} c = Vector{Int}(undef, size(B, 2)) @inbounds for n ∈ eachindex(A) a = A[n] for m ∈ eachindex(a) Iₛ = a[m] if length(Iₛ) == 1 @inbounds i = Iₛ[1] @inbounds @simd ivdep for j ∈ axes(B, 2) B[i, j] += one(T) end else rand!(c, Iₛ) @inbounds @simd for j ∈ axes(B, 2) i = c[j] B[i, j] += one(T) end end end end return B end sample_simd(::Type{T}, A::Array{Vector{Vector{Int}}, N}, I::Int, J::Int) where {T<:Real, N} = sample_simd!(zeros(T, I, J), A) function tsample_simd!(B::Matrix{T}, A::Vector{Vector{Int}}, 𝒥::UnitRange{Int}) where {T<:Real} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 c = Vector{Int}(undef, L) @inbounds for m ∈ eachindex(A) Iₛ = A[m] if length(Iₛ) == 1 @inbounds i = Iₛ[1] @inbounds @simd ivdep for j ∈ 𝒥 B[i, j] += one(T) end else rand!(c, Iₛ) @inbounds @simd for l ∈ eachindex(𝒥) i = c[l] B[i, 𝒥[l]] += one(T) end end end return B else H = (start + stop) >> 1 @sync begin Threads.@spawn tsample_simd!(B, A, start:H) tsample_simd!(B, A, (H + 1):stop) end return B end return B end tsample_simd!(B::Matrix{T}, A::Vector{Vector{Int}}) where {T<:Real} = tsample_simd!(B, A, 1:size(B, 2)) tsample_simd(::Type{T}, A::Vector{Vector{Int}}, I::Int, J::Int) where {T<:Real} = tsample_simd!(zeros(T, I, J), A, 1:J) function tsample_simd!(B::Matrix{T}, A::Array{Vector{Vector{Int}}, N}, 𝒥::UnitRange{Int}) where {T<:Real} where {N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 c = Vector{Int}(undef, L) @inbounds for n ∈ eachindex(A) a = A[n] for m ∈ eachindex(a) Iₛ = a[m] if length(Iₛ) == 1 @inbounds i = Iₛ[1] @inbounds @simd ivdep for j ∈ 𝒥 B[i, j] += one(T) end else rand!(c, Iₛ) @inbounds @simd for l ∈ eachindex(𝒥) i = c[l] B[i, 𝒥[l]] += one(T) end end end end return B else H = (start + stop) >> 1 @sync begin Threads.@spawn tsample_simd!(B, A, start:H) tsample_simd!(B, A, (H + 1):stop) end return B end end tsample_simd!(B::Matrix{T}, A::Array{Vector{Vector{Int}}, N}) where {T<:Real} where {N} = tsample_simd!(B, A, 1:size(B, 2)) tsample_simd(::Type{T}, A::Array{Vector{Vector{Int}}, N}, I::Int, J::Int) where {T<:Real} where {N} = tsample_simd!(zeros(T, I, J), A, 1:J)
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
15422
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # mirror of sampler.jl; separate file for variants on threading # The bare minimum for `sample` interface-- covers all 4 other definitions. tsample(::Type{S}, A, n_sim, n_cat; dims=:) where {S} = tsample(S, A, n_sim, n_cat, dims) tsample(::Type{S}, A, n_sim; dims=:) where {S} = tsample(S, A, n_sim, num_cat(A), dims) tsample(::Type{S}, A, n_sim::Int, n_cat::Int, dims::Int) where {S} = tsample(S, A, n_sim, n_cat, (dims,)) function tsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T, N} Dᴬ = size(A) Dᴮ = tuple(n_cat, n_sim, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = fill!(similar(A, S, Dᴮ), zero(S)) tsample!(B, A) end function tsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T, N} B = fill!(similar(A, S, (n_cat, n_sim)), zero(S)) tsample!(B, A) end # for recursive spawning function tsample!(B, A) _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) tsample!(B, A, keep, default, firstindex(B, 2):size(B, 2)) end # The expected case: vectors of sparse vectors (as their bare components) function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C = Vector{Int}(undef, L) U = Vector{Float64}(undef, L) Σω = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, ω) ∈ a resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of sparse vectors function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {T<:AbstractFloat, N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C = Vector{Int}(undef, L) U = Vector{Float64}(undef, L) Σω = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, ω = A[IA] resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a sparse vector tsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = tsample(S, A, n_sim, n_cat, :) tsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = tsample(S, A, n_sim, n_cat, :) function tsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_cat, n_sim) tsample!(B, A) end function tsample!(B::AbstractMatrix, A::Tuple{Vector{Int}, Vector{<:AbstractFloat}}) _check_reducedims(B, A) tsample!(B, A, firstindex(B, 2):size(B, 2)) end function tsample!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{T}}, 𝒥::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 Iₛ, ω = A k = length(ω) Σω = cumsum(ω) s₀ = Σω[1] @inbounds for j ∈ 𝒥 u = rand() c = 1 s = s₀ while s < u && c < k c += 1 s = Σω[c] end B[Iₛ[c], j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, start:h) tsample!(B, A, (h + 1):stop) end return B end end ################ # Specialized method for eltype(A)::Vector{Vector{Int}} # or, in other words, where the probability mass on each element is 1 / length(Iₛ) function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C = Vector{Int}(undef, L) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a rand!(C, Iₛ) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[c, j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of sparse vectors function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Int}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C = Vector{Int}(undef, L) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ = A[IA] rand!(C, Iₛ) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[c, j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a sparse vector function tsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {N} B = zeros(S, n_cat, n_sim) tsample!(B, A) end # Trivial parallelism is preferable here, but it's not safe! # These are questionable methods (though, the function barrier approach is safe). # @inline function _tsample!(B::AbstractMatrix{S}, A::Vector{Int}, j::Int) where {S<:Real} # c = rand(A) # @inbounds B[c, j] += one(S) # B # end # function tsample0!(B::AbstractMatrix{S}, A::Vector{Int}) where {S<:Real} # _check_reducedims(B, A) # # @inbounds Threads.@threads for j ∈ axes(B, 2) # # c = rand(A) # # B[c, j] += one(S) # # end # @inbounds Threads.@threads for j ∈ axes(B, 2) # _tsample!(B, A, j) # end # B # end function tsample!(B::AbstractMatrix, A::Vector{Int}) _check_reducedims(B, A) tsample!(B, A, firstindex(B, 2):size(B, 2)) end function tsample!(B::AbstractMatrix{S}, A::Vector{Int}, 𝒥::UnitRange{Int}) where {S<:Real} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 @inbounds for j ∈ 𝒥 c = rand(A) B[c, j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, start:h) tsample!(B, A, (h + 1):stop) end return B end end ################ # General case: dense vectors, the linear index of which indicates the category function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C = Vector{Int}(undef, L) U = Vector{Float64}(undef, L) Σω = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for ω ∈ a resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[c, j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of dense vectors function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{T}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {T<:AbstractFloat, N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C = Vector{Int}(undef, L) U = Vector{Float64}(undef, L) Σω = Vector{T}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) ω = A[IA] resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[c, j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a dense vector tsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = tsample(S, A, n_sim, n_cat, :) tsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = tsample(S, A, n_sim, n_cat, :) function tsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_cat, n_sim) tsample!(B, A) end function tsample!(B::AbstractMatrix, A::Vector{<:AbstractFloat}) _check_reducedims(B, A) tsample!(B, A, firstindex(B, 2):size(B, 2)) end function tsample!(B::AbstractMatrix{S}, A::Vector{T}, 𝒥::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 ω = A k = length(ω) Σω = cumsum(ω) s₀ = Σω[1] @inbounds for j ∈ 𝒥 u = rand() c = 1 s = s₀ while s < u && c < k c += 1 s = Σω[c] end B[c, j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, start:h) tsample!(B, A, (h + 1):stop) end return B end end ################ # General case: sparse vectors, the nzval of which indicates the category function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C = Vector{Int}(undef, L) U = Vector{Float64}(undef, L) Σω = Vector{Tv}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for sv ∈ a (; n, nzind, nzval) = sv Iₛ, ω = nzind, nzval resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # A simplification: an array of sparse vectors function tsample!(B::AbstractArray{S, N′}, A::AbstractArray{SparseVector{Tv, Ti}, N}, keep, default, 𝒥::UnitRange{Int}) where {S<:Real, N′} where {Tv<:AbstractFloat, Ti<:Integer, N} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1024 C = Vector{Int}(undef, L) U = Vector{Float64}(undef, L) Σω = Vector{Tv}() @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) sv = A[IA] (; n, nzind, nzval) = sv Iₛ, ω = nzind, nzval resize!(Σω, length(ω)) cumsum!(Σω, ω) categorical!(C, U, Σω) for l ∈ eachindex(C, 𝒥) c = C[l] j = 𝒥[l] B[Iₛ[c], j, IR] += one(S) end end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, keep, default, start:h) tsample!(B, A, keep, default, (h + 1):stop) end return B end end # The simplest case: a sparse vector tsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = tsample(S, A, n_sim, n_cat, :) tsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = tsample(S, A, n_sim, n_cat, :) function tsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_cat, n_sim) tsample!(B, A) end function tsample!(B::AbstractMatrix, A::SparseVector{<:AbstractFloat}) _check_reducedims(B, A) tsample!(B, A, firstindex(B, 2):size(B, 2)) end function tsample!(B::AbstractMatrix{S}, A::SparseVector{T}, 𝒥::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} (; start, stop) = 𝒥 L = stop - start + 1 if L ≤ 1048576 (; n, nzind, nzval) = A Iₛ, ω = nzind, nzval k = length(ω) Σω = cumsum(ω) s₀ = Σω[1] @inbounds for j ∈ 𝒥 u = rand() c = 1 s = s₀ while s < u && c < k c += 1 s = Σω[c] end B[Iₛ[c], j] += one(S) end return B else h = (start + stop) >> 1 @sync begin Threads.@spawn tsample!(B, A, start:h) tsample!(B, A, (h + 1):stop) end return B end end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
13473
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # A ∈ 𝔻ᴰ¹ˣᴰ²ˣᴰ³ˣ⋯ ; eltype(A) = Vector{Tuple{Vector{Int}, Vector{<:AbstractFloat}}} # (Iₛ, ω) OR (Iₛ, Σω) # Each sampling routine is identical: unpack the tuple, draw c ~ Categorical(ω) and # obtain the real category as Iₛ[c]. # This enables an encapsulation of all PVG-induced variability, hence, a consistent # interface for the sampler. # Technically, `sample` only needs to know ndims(A), not necessarily the element type. # The appropriate dispatch on element type is necessary for `sample!` # `sample` could instead use # A::AbstractArray{U, N} where {U<:Union{Vector{Tuple{Vector{Int}, Vector{T}}}, Tuple{Vector{Int}, Vector{T}}}} where {T<:AbstractFloat} # Actually, this should ideally support: # array of array of (sparse) vector # A::AbstractArray{T, N} where {N} where {T<:AbstractArray{S, M}} where {M} where {S<:Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}}} # array of (sparse) vector # A::AbstractArray{T, N} where {N} where {T<:Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}}} # (sparse) vector # A::Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}} # The bare minimum for `sample` interface-- covers all 4 other definitions. # vsample(::Type{S}, A, n_sim, n_cat; dims=:) where {S} = vsample(S, A, n_sim, n_cat, dims) # vsample(::Type{S}, A, n_sim; dims=:) where {S} = vsample(S, A, n_sim, num_cat(A), dims) # vsample(::Type{S}, A, n_sim::Int, n_cat::Int, dims::Int) where {S} = vsample(S, A, n_sim, n_cat, (dims,)) # function vsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T, N} # Dᴬ = size(A) # Dᴮ = tuple(n_cat, n_sim, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) # B = fill!(similar(A, S, Dᴮ), zero(S)) # vsample!(B, A) # end # function vsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T, N} # B = fill!(similar(A, S, (n_cat, n_sim)), zero(S)) # vsample!(B, A) # end #### A revised public interface vsample(::Type{S}, A, n_sim; dims=:, n_cat=nothing) where {S<:Real} = _vsample(S, A, n_sim, n_cat, dims) vsample(A, n_sim; dims=:, n_cat=nothing) = _vsample(Int, A, n_sim, n_cat, dims) _vsample(::Type{S}, A, n_sim, n_cat::Int, dims::Int) where {S<:Real} = _vsample(S, A, n_sim, n_cat, (dims,)) _vsample(::Type{S}, A, n_sim, ::Nothing, dims) where {S<:Real} = _vsample(S, A, n_sim, num_cat(A), dims) function _vsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T, N} Dᴬ = size(A) Dᴮ = tuple(n_cat, n_sim, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = fill!(similar(A, S, Dᴮ), zero(S)) vsample!(B, A) end function _vsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T, N} B = fill!(similar(A, S, (n_cat, n_sim)), zero(S)) vsample!(B, A) end ################ # The expected case: vectors of sparse vectors (as their bare components) function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, p) ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[Iₛ[c], j, IR] += one(S) end end end B end # A simplification: an array of sparse vectors (as bare components) function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, p = A[IA] n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[Iₛ[c], j, IR] += one(S) end end B end # The simplest case: a sparse vector (as bare components) _vsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} where {N} = _vsample(S, A, n_sim, n_cat, :) _vsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = _vsample(S, A, n_sim, n_cat, :) function _vsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_cat, n_sim) vsample!(B, A) end function vsample!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{T}}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) Iₛ, p = A K, V = sqhist(p) C = vgenerate(K, V, size(B, 2)) @inbounds for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[Iₛ[c], j] += one(S) end B end ################ # Specialized method for eltype(A)::Vector{Vector{Int}} # or, in other words, where the probability mass on each element is 1 / length(Iₛ) function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a n = length(Iₛ) vgenerate!(C, U, n) for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[Iₛ[c], j, IR] += one(S) end end end B end # A simplification: an array of sparse vectors function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Int}, N}) where {S<:Real, N′} where {N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ = A[IA] n = length(Iₛ) vgenerate!(C, U, n) for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[Iₛ[c], j, IR] += one(S) end end B end # # The simplest case: a sparse vector _vsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} = _vsample(S, A, n_sim, n_cat, :) _vsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {N} = _vsample(S, A, n_sim, n_cat, :) function _vsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} B = zeros(S, n_cat, n_sim) vsample!(B, A) end # Oddly, the fastest vsampler is non-allocating -- most likely due to # the elimination of store + access instructions associated with using a temporary array. function vsample!(B::AbstractMatrix{S}, Iₛ::Vector{Int}) where {S<:Real} _check_reducedims(B, Iₛ) n = length(Iₛ) C = vgenerate(n, size(B, 2)) @inbounds for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[Iₛ[c], j] += one(S) end B end ################ # General case: dense vectors, the linear index of which indicates the category function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for p ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[c, j, IR] += one(S) end end end B end # A simplification: an array of dense vectors function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{T}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) p = A[IA] n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[c, j, IR] += one(S) end end B end # The simplest case: a dense vector _vsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = _vsample(S, A, n_sim, n_cat, :) _vsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = _vsample(S, A, n_sim, n_cat, :) function _vsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} where {N} B = zeros(S, n_cat, n_sim) vsample!(B, A) end function vsample!(B::AbstractMatrix{S}, A::Vector{T}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) K, V = sqhist(A) C = vgenerate(K, V, size(B, 2)) @inbounds for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[c, j] += one(S) end B end ################ # General case: sparse vectors, the nzval of which indicates the category function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) K, V, q = _sqhist_init(Tv, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for sv ∈ a Iₛ, p = sv.nzind, sv.nzval n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[Iₛ[c], j, IR] += one(S) end end end B end # A simplification: an array of sparse vectors function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{SparseVector{Tv, Ti}, N}) where {S<:Real, N′} where {Tv<:AbstractFloat, Ti<:Integer, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 2)) K, V, q = _sqhist_init(Tv, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) sv = A[IA] Iₛ, p = sv.nzind, sv.nzval n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[Iₛ[c], j, IR] += one(S) end end B end # The simplest case: a sparse vector _vsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = _vsample(S, A, n_sim, n_cat, :) _vsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = _vsample(S, A, n_sim, n_cat, :) function _vsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} where {N} B = zeros(S, n_cat, n_sim) vsample!(B, A) end function vsample!(B::AbstractMatrix{S}, A::SparseVector{T}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) Iₛ, p = A.nzind, A.nzval K, V = sqhist(p) C = vgenerate(K, V, size(B, 2)) @inbounds for (j′, j) ∈ enumerate(axes(B, 2)) c = C[j′] B[Iₛ[c], j] += one(S) end B end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
885
module CategoricalMonteCarlo # using Test, BenchmarkTools using Random using SparseArrays using LoopVectorization using Polyester using MarsagliaDiscreteSamplers import MarsagliaDiscreteSamplers: _sqhist_init export num_cat export sample, sample!, tsample, tsample! export vsample, vsample!, vtsample, vtsample! # Ugly normalization names export algorithm2_1, algorithm2_1!, algorithm2_2, algorithm2_2!, algorithm3, algorithm3!, algorithm3_ratio, algorithm3_ratio!, algorithm2_1_algorithm3, algorithm2_1_algorithm3!, algorithm2_1_algorithm3_ratio, algorithm2_1_algorithm3_ratio!, algorithm4!, algorithm4, normalize1, normalize1! export pvg, pvg!, tpvg, tpvg! include("utils.jl") include("normalizations.jl") include("sampler.jl") include("tsampler_batch.jl") include("vsampler.jl") include("vtsampler_batch.jl") include("probabilityvectorgeneration.jl") end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
30674
# # Date created: 2022-06-15 # Author: aradclif # # ############################################################################################ _typeofinv(x) = typeof(inv(x)) _typeofinv(::Type{T}) where {T} = typeof(inv(one(T))) """ normalize1!(A::AbstractArray{<:Real}) Normalize the values in `A` such that `sum(A) ≈ 1` and `0 ≤ A[i] ≤ 1` ∀i. This is not quite the L¹-norm, which would require that `abs(A[i])` be used. It is assumed that `0 ≤ A[i] < Inf` ∀i. `Inf` values are not handled and will result in `NaN`'s. See also: [`normalize1`](@ref) ```jldoctest julia> normalize1!([1.0, 2.0, 3.0]) 3-element Vector{Float64}: 0.16666666666666666 0.3333333333333333 0.5 julia> normalize1!([1.0, 2.0, Inf]) 3-element Vector{Float64}: 0.0 0.0 NaN julia> normalize1!([1.0, 2.0, NaN]) # NaN propagates, as expected 3-element Vector{Float64}: NaN NaN NaN julia> normalize1!([1.0, -2.0, 3.0]) # not the L¹-norm 3-element Vector{Float64}: 0.5 -1.0 1.5 ``` """ function normalize1!(A::AbstractArray{T}) where {T<:Real} s = zero(T) @inbounds @simd for i ∈ eachindex(A) s += A[i] end c = inv(s) @inbounds @simd for i ∈ eachindex(A) A[i] *= c end A end """ normalize1!(B::AbstractArray{<:Real}, A::AbstractArray{<:Real}) Normalize the values in `A` such that `sum(B) ≈ 1` and `0 ≤ B[i] ≤ 1` ∀i, storing the result in `B`. It is assumed that `A[i] ≥ 0` ∀i. """ function normalize1!(B::AbstractArray{T}, A::AbstractArray{S}) where {T<:Real, S<:Real} s = zero(S) @inbounds @simd for i ∈ eachindex(A) s += A[i] end c = inv(s) @inbounds @simd for i ∈ eachindex(A, B) B[i] = A[i] * c end B end """ normalize1(A::AbstractArray{<:Real}) Return an array of equal size which satisfies `sum(B) ≈ 1` and `0 ≤ B[i] 1` ∀i. It is assumed that `A[i] ≥ 0` ∀i. See also: [`normalize1!`](@ref) """ normalize1(A::AbstractArray{T}) where {T<:Real} = normalize1!(similar(A, _typeofinv(T)), A) function vnormalize1!(A::AbstractArray{T}) where {T<:Base.IEEEFloat} s = zero(T) @turbo for i ∈ eachindex(A) s += A[i] end c = inv(s) @turbo for i ∈ eachindex(A) A[i] *= c end A end function normalize1!(B::AbstractArray{T}, A::AbstractArray{T}) where {T<:Base.IEEEFloat} s = zero(T) @turbo for i ∈ eachindex(A) s += A[i] end c = inv(s) @turbo for i ∈ eachindex(A, B) B[i] = A[i] * c end B end normalize1(A::AbstractArray{<:Base.IEEEFloat}) = normalize1!(similar(A), A) ################ # @noinline function _check_algorithm2_1(I::Vector{Int}, x) # mn, mx = extrema(I) # f, l = firstindex(x), lastindex(x) # mn ≥ f || throw(BoundsError(x, mn)) # mx ≤ l || throw(BoundsError(x, mx)) # end #### Algorithm 2.1. # I ∈ ℕᴺ, 𝐰 ∈ ℝᴰ; I ⊆ 1,…,D # -> ω ∈ ℝᴺ, ωᵢ = 𝐰ᵢ / ∑ⱼ 𝐰ⱼ; j ∈ I """ algorithm2_1!(p::Vector{T}, I::Vector{Int}, w::Vector{<:Real}) where {T<:Real} Fill `p` with the probabilities that result from normalizing the weights `w[I]`. Note that `T` must be a type which is able to hold the result of `inv(one(T))`. See also: [`algorithm2_1`](@ref) """ function algorithm2_1!(p::Vector{T}, I::Vector{Int}, w::Vector{S}) where {T<:Real, S<:Real} checkbounds(w, I) s = zero(S) @inbounds @simd ivdep for i ∈ eachindex(I, p) w̃ = w[I[i]] s += w̃ p[i] = w̃ end # c = inv(s) # Guarantees type stability at negligible expense compared to what is gained c = one(T) / s @inbounds @simd for i ∈ eachindex(p) p[i] *= c end p end """ algorithm2_1(I::Vector{Int}, w::Vector{<:Real}) Create a vector of probabilities by normalizing the weights selected by `I` from `w`. It is assumed that `0 ≤ wᵢ < Inf, i ∈ I`. Mathematically, given: I ∈ ℕᴺ, 𝐰 ∈ ℝᴰ; I ⊆ {1,…,D} The iᵗʰ term will be computed as: pᵢ = 𝐰ᵢ / ∑ⱼ 𝐰ⱼ; j ∈ I See also: [`algorithm2_1!`](@ref), [`algorithm2_2`](@ref) # Examples ```jldoctest julia> I = [1, 5, 2]; w = [5, 4, 3, 2, 1]; julia> algorithm2_1(I, w) 3-element Vector{Float64}: 0.5 0.1 0.4 julia> algorithm2_1([1, 1, 2], Rational.(w)) 3-element Vector{Rational{Int64}}: 5//14 5//14 2//7 julia> w[2] = -w[2]; julia> algorithm2_1(I, w) # Nonsense results if `wᵢ` constraints violated 3-element Vector{Float64}: 2.5 0.5 -2.0 julia> algorithm2_1(I, [5, 4, 3, 2, Inf]) 3-element Vector{Float64}: 0.0 NaN 0.0 julia> algorithm2_1(I, [5, NaN, 3, 2, 1]) 3-element Vector{Float64}: NaN NaN NaN ``` """ algorithm2_1(I::Vector{Int}, w::Vector{T}) where {T<:Real} = algorithm2_1!(similar(I, _typeofinv(T)), I, w) #### Algorithm 2.2 # I₁ ∈ ℕᴺ, I₂ ∈ ℕᴺ, …, Iₘ ∈ ℕᴺ; 𝐰₁ ∈ ℝᴰ¹, 𝐰₂ ∈ ℝᴰ², …, 𝐰ₘ ∈ ℝᴰᵐ # -> ω ∈ ℝᴺ, ωᵢ = ∏ₘ₌₁ᴹ 𝐰ₘ[Iₘ[i]] / ∑ᵢ₌₁ᴺ ∏ₘ₌₁ᴹ 𝐰ₘ[Iₘ[i]] # Mathematically, given: # I₁ ∈ ℕᴺ , 𝐰₁ ∈ ℝᴰ¹ # I₂ ∈ ℕᴺ , 𝐰₂ ∈ ℝᴰ² # ⋮ , ⋮ # Iₘ ∈ ℕᴺ , 𝐰ₘ ∈ ℝᴰᵐ # The iᵗʰ term will be computed as: # pᵢ = ∏ₘ₌₁ᴹ 𝐰ₘ[Iₘ[i]] / ∑ⱼ₌₁ᴺ ∏ₘ₌₁ᴹ 𝐰ₘ[Iₘ[j]] _typeofprod(ws::NTuple{N, Vector{<:Real}}) where {N} = promote_type(Int, typeof(mapreduce(first, *, ws))) _typeofprod(ws::NTuple{N, Vector{T}}) where {N} where {T<:Real} = promote_type(Int, T) function algorithm2_2_quote(M::Int) Is = Expr(:tuple) ws = Expr(:tuple) bc = Expr(:tuple) for m = 1:M push!(Is.args, Symbol(:I_, m)) push!(ws.args, Symbol(:w_, m)) push!(bc.args, Expr(:call, :checkbounds, Symbol(:w_, m), Symbol(:I_, m))) end block = Expr(:block) loop = Expr(:for, Expr(:(=), :j, Expr(:call, :eachindex, ntuple(i -> Symbol(:I_, i), M)..., :p)), block) e = Expr(:call, :*) for m = 1:M push!(e.args, Expr(:ref, Symbol(:w_, m), Expr(:ref, Symbol(:I_, m), :j))) end push!(block.args, Expr(:(=), :t, e)) push!(block.args, Expr(:(=), :s, Expr(:call, :+, :s, :t))) push!(block.args, Expr(:(=), Expr(:ref, :p, :j), :t)) return quote $Is = Is $ws = ws $bc # s = zero(T) s = zero(_typeofprod(ws)) @inbounds @simd ivdep $loop c = one(S) / s @inbounds @simd ivdep for j ∈ eachindex(p) p[j] *= c end return p end end @generated function algorithm2_2!(p::Vector{S}, Is::NTuple{M, Vector{Int}}, ws::NTuple{M, Vector{<:Real}}) where {M} where {S<:Real} algorithm2_2_quote(M) end """ algorithm2_2!(p::Vector{T}, Is::NTuple{M, Vector{Int}}, ws::NTuple{M, Vector{<:Real}}) where {T<:Real, M} Fill `p` with the probabilities that result from normalizing the element-wise product of weights selected by the index set, `Is[m]`, respective to each weight vector, `ws[m]`. Note that `T` must be a type which is able to hold the result of `inv(one(T))`. See also: [`algorithm2_2`](@ref) # Examples ```jldoctest julia> Is = ([1,2,3], [4,5,6], [7,8,9]); ws = ([1,2,3], fill(1/6, 6), fill(1//10, 9)); julia> algorithm2_2!(zeros(3), Is, ws) 3-element Vector{Float64}: 0.16666666666666666 0.3333333333333333 0.5 julia> algorithm2_2!(zeros(Rational{Int}, 3), Is, (ws[1], fill(1//6, 6), ws[3])) 3-element Vector{Rational{Int64}}: 1//6 1//3 1//2 ``` """ algorithm2_2!(p::Vector{T}, Is::Tuple{Vector{Int}}, ws::Tuple{Vector{<:Real}}) where {T<:Real} = algorithm2_1!(p, (@inbounds Is[1]), (@inbounds ws[1])) """ algorithm2_2(Is::NTuple{M, Vector{Int}}, ws::NTuple{M, Vector{<:Real}}) where {M} Compute the product of weights selected by the respective index sets `Is`, then normalize the resultant weight vector to probabilities. Mathematically, given: I₁ ∈ ℕᴺ , 𝐰₁ ∈ ℝᴰ¹, 0 ≤ 𝐰₁ᵢ < Inf, i ∈ I₁ I₂ ∈ ℕᴺ , 𝐰₂ ∈ ℝᴰ², 0 ≤ 𝐰₂ᵢ < Inf, i ∈ I₂ ⋮ , ⋮ Iₘ ∈ ℕᴺ , 𝐰ₘ ∈ ℝᴰᵐ, 0 ≤ 𝐰ₘᵢ < Inf, i ∈ Iₘ The iᵗʰ term will be computed as: pᵢ = ∏ₘ₌₁ᴹ 𝐰ₘ[Iₘ[i]] / ∑ⱼ₌₁ᴺ ∏ₘ₌₁ᴹ 𝐰ₘ[Iₘ[j]] See also: [`algorithm2_2!`](@ref), [`algorithm2_1`](@ref) # Examples ```jldoctest julia> Is = ([1,2,3], [4,5,6], [7,8,9]); ws = ([1.0, 2.0, 3.0], fill(0.5, 6), fill(0.1, 9)); julia> algorithm2_2(Is, ws) 3-element Vector{Float64}: 0.16666666666666666 0.3333333333333333 0.5 julia> w = ws[1][Is[1]] .* ws[2][Is[2]] .* ws[3][Is[3]] # unnormalized 3-element Vector{Float64}: 0.05 0.1 0.15000000000000002 julia> w ./= sum(w) # normalized 3-element Vector{Float64}: 0.16666666666666666 0.3333333333333333 0.5 ``` """ algorithm2_2(Is::NTuple{M, Vector{Int}}, ws::NTuple{M, Vector{<:Real}}) where {M} = algorithm2_2!(Vector{_typeofinv(_typeofprod(ws))}(undef, maximum(length, Is)), Is, ws) # function algorithm2_2_weightonly_quote(M::Int) # Is = Expr(:tuple) # ws = Expr(:tuple) # bc = Expr(:tuple) # for m = 1:M # push!(Is.args, Symbol(:I_, m)) # push!(ws.args, Symbol(:w_, m)) # push!(bc.args, Expr(:call, :checkbounds, Symbol(:w_, m), Symbol(:I_, m))) # end # block = Expr(:block) # loop = Expr(:for, Expr(:(=), :j, Expr(:call, :eachindex, ntuple(i -> Symbol(:I_, i), M)..., :w′)), block) # e = Expr(:call, :*) # for m = 1:M # push!(e.args, Expr(:ref, Symbol(:w_, m), Expr(:ref, Symbol(:I_, m), :j))) # end # push!(block.args, Expr(:(=), Expr(:ref, :w′, :j), e)) # return quote # $Is = Is # $ws = ws # $bc # @inbounds @simd ivdep $loop # return w′ # end # end # """ # algorithm2_2_weightonly!(w, Is::NTuple{M, Vector{Int}}, ws::NTuple{M, Vector{T}}) where {M} where {T<:Real} # Compute the local product of weights, storing the result in `w`. # See also: [`algorithm2_2_weightonly`](@ref) # """ # @generated function algorithm2_2_weightonly!(w′, Is::NTuple{M, Vector{Int}}, ws::NTuple{M, Vector{T}}) where {M} where {T<:Real} # algorithm2_2_weightonly_quote(M) # end # """ # algorithm2_2_weightonly(Is::NTuple{M, Vector{Int}}, ws::NTuple{M, Vector{T}}) where {M} where {T<:Real} # Compute the local product of weights identified by the index sets `Is`, which select # the desired terms from the global weights `ws`. Mathematically, given: # I₁ ∈ ℕᴺ , 𝐰₁ ∈ ℝᴰ¹ # I₂ ∈ ℕᴺ , 𝐰₂ ∈ ℝᴰ² # ⋮ , ⋮ # Iₘ ∈ ℕᴺ , 𝐰ₘ ∈ ℝᴰᵐ # The iᵗʰ term will be computed as: # wᵢ′ = ∏ₘ₌₁ᴹ 𝐰ₘ[Iₘ[i]] = ∏ₘ₌₁ᴹ 𝐰ₘ,ⱼ : j = Iₘ[i] # See also: [`algorithm2_2_weightonly!`](@ref) # """ # algorithm2_2_weightonly(Is::NTuple{M, Vector{Int}}, ws::NTuple{M, Vector{T}}) where {M} where {T<:Real} = # algorithm2_2_weightonly!(Vector{T}(undef, maximum(length, Is)), Is, ws) ################ #### Algorithm 3. -- FillMass # 𝐰 ∈ ℝᴺ, u ∈ ℝ, 0 ≤ u ≤ 1 # -> p ∈ ℝᴺ, J = {i: wᵢ = 0} # pᵢ = # Case 1: if J ≠ ∅ # u / |J| if i ∈ J # (1 - u) * 𝐰ᵢ / ∑ᵢ₌₁ᴺ 𝐰ᵢ otherwise # Case 2: if J = 1,…,N # 1 / N ## Mathematically consistent handling of Case 2: # If 𝐰 = ̲0, then the ratio of masses is infinite, i.e. u / ∑ᵢ𝐰ᵢ = ∞, assuming that u > 0, # which may not necessarily be the case. If u > 0, then it seems reasonable to # handle case 2 as above, as one is effectively stating that u′ = u / N will be assigned # to each pᵢ′ intermediate, then the pᵢ's will be normalized to sum to 1 -- # that is, ∑ᵢ₌₁ᴺ pᵢ′ = ∑ᵢ₌₁ᴺ u′ = N * u′ ⟹ pᵢ = u′ / (N * u′) = 1 / N # This is the origin Case 2, but if one implements it as simple 1/N assignment, # behavior for u = 0 is not mathematically correct -- it should be undefined as it is 0/0. # I find the mathematically correct behavior much easier to reason about. # Moreover, this approach encourages a sensible treatment of r = ∞ in the alternative # which uses the ratio. In general, seems better to follow the math and return NaNs than # add spooky substitutions such as 1/N even when u=0 and 𝐰 = ̲0. ## Alternative using ratio # r ∈ ℝ # r = u / (1 - u) ⟹ u = r / (1 + r) # _r(u::T) where {T<:Real} = u / (one(T) - u) _u(r::T) where {T<:Real} = isinf(r) && !signbit(r) ? one(T) : r / (one(T) + r) _check_u01(u::S) where {S<:Real} = (zero(S) ≤ u ≤ one(S) || throw(DomainError(u, "u must be: $(zero(S)) ≤ u ≤ $(one(S))"))) """ algorithm3!(p::Vector{T}, u::Real) where {T<:Real} Normalize `p` to probabilities, spreading probability mass `u` across the 0 or more elements of `p` which are equal to zero. If all values of `w` are zero and `u ≠ 0`, `p` will be filled with uniform probability mass. Note that `T` must be a type which is able to hold the result of `inv(one(T))`. See also: [`algorithm3`](@ref), [`algorithm3_ratio!`](@ref) # Examples ```jldoctest julia> algorithm3!(Rational{Int}[0, 10, 5, 0], 0.5) 4-element Vector{Rational{Int64}}: 1//4 1//3 1//6 1//4 ``` """ function algorithm3!(p::Vector{T}, u::T) where {T<:Real} _check_u01(u) s = zero(T) z = 0 @inbounds @simd for i ∈ eachindex(p) pᵢ = p[i] s += pᵢ z += pᵢ == zero(T) end c = z == 0 ? inv(s) : (one(T) - u) / s # u′ = z == length(p) ? one(T) / z : u / z u′ = z == length(p) ? u / (u * z) : u / z @inbounds @simd for i ∈ eachindex(p) pᵢ = p[i] p[i] = pᵢ == zero(T) ? u′ : pᵢ * c end p end algorithm3!(p::Vector{T}, u::S) where {T<:Real, S<:Real} = algorithm3!(p, convert(T, u)) """ algorithm3!(p::Vector{T}, w::Vector{<:Real}, u::Real) where {T<:Real} Normalize `w` to probabilities, storing the result in `p`, spreading probability mass `0 ≤ u ≤ 1` across the 0 or more elements of `w` which are equal to zero. If all values of `w` are zero and `u ≠ 0`, `p` will be filled with uniform probability mass. Note that `T` must be a type which is able to hold the result of `inv(one(T))`. # Examples ```jldoctest julia> w = [0, 10, 5, 1]; u = 0.5; julia> algorithm3!(similar(w, Float64), w, u) 4-element Vector{Float64}: 0.5 0.3125 0.15625 0.03125 ``` """ function algorithm3!(p::Vector{S}, w::Vector{T}, u::S) where {S<:Real, T<:Real} _check_u01(u) s = zero(T) z = 0 @inbounds @simd for i ∈ eachindex(p, w) w̃ = w[i] s += w̃ p[i] = w̃ z += w̃ == zero(T) end c = z == 0 ? one(S) / s : (one(S) - u) / s # u′ = z == length(p) ? one(S) / z : u / z u′ = z == length(p) ? u / (u * z) : u / z @inbounds @simd for i ∈ eachindex(p) pᵢ = p[i] p[i] = pᵢ == zero(S) ? u′ : pᵢ * c end p end algorithm3!(p::Vector{S}, w::Vector{T}, u::U) where {S<:Real, T<:Real, U<:Real} = algorithm3!(p, w, convert(S, u)) """ algorithm3(w::Vector{<:Real}, u::Real) Return the vector of probabilities created by normalizing `w` to probabilities, then spreading the probability mass `0 ≤ u ≤ 1` across the 0 or more elements of `w` which are equal to zero. If all values of `w` are zero and `u ≠ 0`, a vector of uniform probability mass is returned. Mathematically, given: 𝐰 ∈ ℝᴺ, 0 ≤ wᵢ < ∞, u ∈ ℝ, 0 ≤ u ≤ 1, J = {i : 𝐰ᵢ = 0} ``` pᵢ = Case 1: if J ≠ ∅ u / |J| if i ∈ J (1 - u) * 𝐰ᵢ / ∑ᵢ₌₁ᴺ 𝐰ᵢ otherwise Case 2: if J = {1,…,N} u / (u * N) Equivalent to 1/N if 𝐰 ≠ ̲0 ``` See also: [`algorithm3!`](@ref), [`algorithm3_ratio`](@ref) # Examples ```jldoctest julia> algorithm3([0, 10, 5, 0], 0.5) 4-element Vector{Float64}: 0.25 0.3333333333333333 0.16666666666666666 0.25 julia> algorithm3(Rational{Int}[0, 0, 0], 0.25) # 𝐰 = ̲0 3-element Vector{Rational{Int64}}: 1//3 1//3 1//3 julia> algorithm3([0, 0], 0.0) # 𝐰 = ̲0 and u = 0 2-element Vector{Float64}: NaN NaN julia> algorithm3([1, 2, 3], 0.9) # in absence of 0's, just normalize 3-element Vector{Float64}: 0.16666666666666666 0.3333333333333333 0.5 ``` """ algorithm3(p::Vector{T}, u::S) where {T<:Real, S<:Real} = algorithm3!(similar(p, promote_type(_typeofinv(T))), p, u) # algorithm3!(similar(p, promote_type(_typeofinv(T), _typeofinv(S))), p, u) #### Algorithm 3, in terms of ratio """ algorithm3_ratio!(p::Vector{T}, r::Real) where {T<:Real} Normalize `p` to probabilities, then spread the probability mass `u = r / (1 + r)` across the 0 or more elements of `p` such that the ratio of the sum of (inititally) zero elements to the sum of the non-zero elements is equal to `r`. Note that `T` must be a type which is able to hold the result of `inv(one(T))`. See also: [`algorithm3_ratio`](@ref), [`algorithm3!`](@ref) """ algorithm3_ratio!(p, r) = algorithm3!(p, _u(r)) """ algorithm3_ratio!(p::Vector{T}, w::Vector{<:Real}, r::Real) where {T<:Real} Normalize `w` to probabilities, storing the result in `p`, then spread the probability mass `u = r / (1 + r)` across the 0 or more elements of `w` such that the ratio of the sum of (inititally) zero elements to the sum of non-zero elements is equal to `r`. Note that `T` must be a type which is able to hold the result of `inv(one(T))`. # Examples ```jldoctest julia> w = [0, 10, 5, 1]; r = 1.0; julia> algorithm3_ratio!(similar(w, Float64), w, r) 4-element Vector{Float64}: 0.5 0.3125 0.15625 0.03125 ``` """ algorithm3_ratio!(p, w, r) = algorithm3!(p, w, _u(r)) """ algorithm3_ratio(w::Vector{<:Real}, r::Real) Return a vector of probabilities created by normalizing `w` to probabilities, then spread the probability mass `u = r / (1 + r)` across the 0 or more elements of `w` which are equal to zero such that the ratio of the sum of (inititally) zero elements to the sum of non-zero elements is equal to `r`. If all values of `w` are zero and `r ≠ 0`, a vector of uniform probability mass is returned. Mathematically, given: 𝐰 ∈ ℝᴺ, 0 ≤ wᵢ < ∞, r ∈ ℝ, 0 ≤ r ≤ Inf, J = {i : 𝐰ᵢ = 0} ``` pᵢ = Case 1: if J ≠ ∅ (r / (1+r)) / |J| if i ∈ J (1 / (1+r)) * 𝐰ᵢ / ∑ᵢ₌₁ᴺ 𝐰ᵢ otherwise Case 2: if J = {1,…,N} r / (r * N) Equivalent to 1/N if 𝐰 ≠ ̲0 ``` See also: [`algorithm3_ratio!`](@ref), [`algorithm3`](@ref) # Examples ```jldoctest julia> w = Rational{Int}[1, 0, 3, 0, 5]; r = 3; julia> p = algorithm3_ratio(w, r) 5-element Vector{Rational{Int64}}: 1//36 3//8 1//12 3//8 5//36 julia> r′ = sum(p[findall(iszero, w)]) / sum(p[findall(!iszero, w)]); (r′, r′ == r) (3//1, true) julia> algorithm3(w, r / (1 + r)) # Note equivalence 5-element Vector{Rational{Int64}}: 1//36 3//8 1//12 3//8 5//36 julia> algorithm3_ratio(w, Inf) # r = Inf ⟹ u = 1 5-element Vector{Rational{Int64}}: 0//1 1//2 0//1 1//2 0//1 ``` """ algorithm3_ratio(p, r) = algorithm3(p, _u(r)) ################ #### Algorithm 2.1. + Algorithm 3. (fused) # A weight is assigned to i = 1,…,k components, and there are unknown components k+1,…,N. # The unknown components are of the same category, and the probability mass of the category is # known; alternatively, the ratio (between unknown/known) of probability masses may be specified. # r = unknown/known = (∑ᵢ₌ₖ₊₁ᴺ pᵢ) / ∑ᵢ₌₁ᵏ pᵢ = (∑ᵢ₌ₖ₊₁ᴺ wᵢ) / ∑ᵢ₌₁ᵏ wᵢ ⟹ # r∑ᵢ₌₁ᵏ wᵢ = ∑ᵢ₌ₖ₊₁ᴺ wᵢ ⟹ r∑ᵢ₌₁ᵏ = w′, wᵢ = w′ / (N - k), i=k+1,…,N # r = u / (1 - u) ⟹ u = r / (1 + r) ⟹ # pᵢ = u / (N - k), i=k+1,…,N # pᵢ = (1 - u) wᵢ / ∑ᵢ₌₁ᵏ wᵢ, i = 1,…,k """ algorithm2_1_algorithm3!(p::Vector{T}, I::Vector{Int}, w::Vector{<:Real}, u::Real) where {T<:Real} Normalize `w[I]` to probabilities, storing the result in `p`, then spreading probability mass `0 ≤ u ≤ 1` across the 0 or more elements of `w[I]` which are equal to zero. If all values of `w[I]` are zero and `u ≠ 0`, `p` will be filled with uniform probability mass. Note that `T` must be a type which is able to hold the result of `inv(one(T))`. See also: [`algorithm2_1_algorithm3`](@ref) # Examples ```jldoctest julia> I = [1, 2, 5, 6]; w = [10, 0, 30, 40, 0, 20]; u = 0.5; julia> algorithm2_1_algorithm3!(similar(I, Rational{Int}), I, w, u) 4-element Vector{Rational{Int64}}: 1//6 1//4 1//4 1//3 ``` """ function algorithm2_1_algorithm3!(p::Vector{S}, I::Vector{Int}, w::Vector{T}, u::S) where {S<:Real, T<:Real} _check_u01(u) checkbounds(w, I) s = zero(T) z = 0 @inbounds @simd ivdep for i ∈ eachindex(p, I) w̃ = w[I[i]] s += w̃ p[i] = w̃ z += w̃ == zero(T) end c = z == 0 ? one(S) / s : (one(S) - u) / s # u′ = z == length(p) ? one(S) / z : u / z u′ = z == length(p) ? u / (u * z) : u / z @inbounds @simd for i ∈ eachindex(p) pᵢ = p[i] p[i] = pᵢ == zero(S) ? u′ : pᵢ * c end p end algorithm2_1_algorithm3!(p::Vector{S}, I::Vector{Int}, w::Vector{T}, u::U) where {S<:Real, T<:Real, U<:Real} = algorithm2_1_algorithm3!(p, I, w, convert(S, u)) """ algorithm2_1_algorithm3(I::Vector{Int}, w::Vector{<:Real}, u::Real) Return a vector of probabilities, normalizing the components selected from `w` by the index set `I`, then spreading the probability mass `0 ≤ u ≤ 1` across the 0 or more elements which are equal to zero. If all values of `w[I]` are zero and `u ≠ 0`, a vector of uniform probability mass is returned. Equivalent to `algorithm3!(algorithm2_1(I, w), u)` but more efficient. See also: [`algorithm2_1_algorithm3!`](@ref), [`algorithm2_1`](@ref), [`algorithm3`](@ref) # Examples ```jldoctest julia> I = [1, 2, 5, 6]; w = [10, 0, 30, 40, 0, 20]; u = 0.5; julia> algorithm2_1_algorithm3(I, w, u) 4-element Vector{Float64}: 0.16666666666666666 0.25 0.25 0.3333333333333333 julia> algorithm3!(algorithm2_1(I, w), u) 4-element Vector{Float64}: 0.16666666666666666 0.25 0.25 0.3333333333333333 ``` """ algorithm2_1_algorithm3(I::Vector{Int}, w::Vector{T}, u::S) where {T<:Real, S<:Real} = algorithm2_1_algorithm3!(similar(I, _typeofinv(T)), I, w, u) #### Algorithm 2.1 fused with Algorithm 3 ratio """ algorithm2_1_algorithm3!(p::Vector{T}, I::Vector{Int}, w::Vector{<:Real}, u::Real) where {T<:Real} Normalize `w[I]` to probabilities, storing the result in `p`, then spreading probability mass `u = r / (1 + r)` across the 0 or more elements of `w[I]` which are equal to zero such that the ratio of the sum of (inititally) zero elements to the sum of non-zero elements is equal to `r`. Note that `T` must be a type which is able to hold the result of `inv(one(T))`. See also: [`algorithm2_1_algorithm3_ratio`](@ref), [`algorithm2_1!`](@ref), [`algorithm3_ratio!`](@ref) # Examples ```jldoctest julia> I = [1, 2, 5, 6]; w = [10, 0, 30, 40, 0, 20]; r = 1.0; julia> algorithm2_1_algorithm3_ratio!(similar(I, Rational{Int}), I, w, r) 4-element Vector{Rational{Int64}}: 1//6 1//4 1//4 1//3 ``` """ algorithm2_1_algorithm3_ratio!(p, I, w, r) = algorithm2_1_algorithm3!(p, I, w, _u(r)) """ algorithm2_1_algorithm3_ratio(I::Vector{Int}, w::Vector{<:Real}, r::Real) Return a vector of probabilities, normalizing the components selected from `w` by the index set `I`, then spreading the probability mass `u = r / (1 + r)` across the 0 or more elements of `w[I]` which are equal to zero such that the ratio of the sum of (inititally) zero elements to the sum of non-zero elements is equal to `r`. If all values of `w[I]` are zero and `r ≠ 0`, a vector of uniform probability mass is returned. Equivalent to `algorithm3_ratio!(algorithm2_1(I, w), u)` but more efficient. See also: [`algorithm2_1_algorithm3_ratio!`](@ref), [`algorithm2_1`](@ref), [`algorithm3_ratio`](@ref) # Examples ```jldoctest julia> I = [1, 2, 5, 6]; w = [10, 0, 30, 40, 0, 20]; r = 1.0; julia> algorithm2_1_algorithm3_ratio(I, w, r) 4-element Vector{Float64}: 0.16666666666666666 0.25 0.25 0.3333333333333333 julia> algorithm3_ratio!(algorithm2_1(I, w), r) 4-element Vector{Float64}: 0.16666666666666666 0.25 0.25 0.3333333333333333 ``` """ algorithm2_1_algorithm3_ratio(I, w, r) = algorithm2_1_algorithm3(I, w, _u(r)) ################ # Algorithm 4 # A weight is assigned to each i, and the w₁'s are normalized to probabilities. # Then, a subset of the i's, denoted I′, is selected for re-weighting by a quantity # which is undefined for I ∖ I′. # I = {1,…,N} # J₁ = {i: 𝐰₁ᵢ = 0} I₁′ = {i: 𝐰₁ᵢ ≠ 0} = I ∖ J₁ # J₂ = {i: 𝐰₂ᵢ = 0} I₂′ = {i: 𝐰₂ᵢ ≠ 0} = I ∖ J₂ # 𝐰₁ ∈ ℝᴺ : the initial weights # 𝐰₂ ∈ ℝᴺ : the quantity which is undefined for J₂ = I ∖ I₂′; undefined shall be encoded # by a value of zero in 𝐰₂. # pᵢ = 𝐰₁ᵢ / ∑ₗ₌₁ᴺ 𝐰₁ₗ, i ∈ I ∖ I₂′ # mᵏⁿᵒʷⁿ = ∑ᵢ pᵢ, i ∈ I ∖ I₂′ # mᵘⁿᵈᵉᶠⁱⁿᵉᵈ = 1 - mᵏⁿᵒʷⁿ = (∑ᵢ 𝐰₁ᵢ, i ∈ I₂′) / ∑ₗ₌₁ᴺ 𝐰₁ₗ # pᵢ = mᵘⁿᵈᵉᶠⁱⁿᵉᵈ * 𝐰₂ᵢ / ∑ₗ₌₁ᴺ 𝐰₂ₗ, i ∈ I₂′ # In other words, # pᵢ = (𝐰₂ᵢ * ∑ₗ 𝐰₁ₗ, l ∈ I₂′) / (∑ₗ₌₁ᴺ 𝐰₂ₗ * ∑ₗ₌₁ᴺ 𝐰₁ₗ) i ∈ I₂′ ## As cases, for clarity # pᵢ = 𝐰₁ᵢ / ∑ₗ₌₁ᴺ 𝐰₁ₗ i ∈ I ∖ I₂′ # pᵢ = (𝐰₂ᵢ * ∑ₗ 𝐰₁ₗ, l ∈ I₂′) / (∑ₗ₌₁ᴺ 𝐰₂ₗ * ∑ₗ₌₁ᴺ 𝐰₁ₗ) i ∈ I₂′ # general, but must be protected against 𝐰₁ = ̲0 and/or 𝐰₂ = ̲0, which cause /0 error. # Essentially, if # s₁ = ∑ₗ₌₁ᴺ 𝐰₁ₗ # s₂ = ∑ₗ₌₁ᴺ 𝐰₂ₗ # and if s₁ = 0, then s₁ must be set equal to 1 to keep the terms defined. # The same argument applies to s₂. # An alternative line of reasoning suggests that it is preferable to be # mathematically consistent and let /0 cause the expected behavior (NaNs). # Mathematical consistency is much easier to reason about, as the definition # of the algorithm clearly implies that if 𝐰₁ = ̲0, then everything that follows # involves division by 0. # _c1c2(::Type{T}, s₁′, s₁, s₂) where {T} = convert(T, inv(s₁)), convert(T, s₁′ / (s₁ * s₂)) """ algorithm4!(𝐰₁::Vector{T}, 𝐰₂::Vector{<:Real}) where {T<:Real} Fill `𝐰₁` with the probabilities which result from `algorithm4(𝐰₁, 𝐰₂)`. Note that `T` must be a type which is able to hold the result of `inv(one(T))`. See also: [`algorithm4`](@ref) """ function algorithm4!(w₁::Vector{T}, w₂::Vector{U}) where {T<:Real, U<:Real} s₁′ = zero(T) s₁ = zero(T) s₂ = zero(U) @inbounds @simd for i ∈ eachindex(w₁, w₂) w₁ᵢ = w₁[i] w₂ᵢ = w₂[i] s₁′ += w₂ᵢ == zero(U) ? zero(T) : w₁ᵢ s₁ += w₁ᵢ s₂ += w₂ᵢ end c₁ = inv(s₁) c₂ = s₁′ / (s₁ * s₂) # Unlike below, the potential instability is unavoidable here. # c₁, c₂ = _c1c2(T, s₁′, s₁, s₂) @inbounds @simd for i ∈ eachindex(w₁, w₂) w₁ᵢ = w₁[i] w₂ᵢ = w₂[i] w₁[i] = w₂ᵢ == zero(U) ? c₁ * w₁ᵢ : c₂ * w₂ᵢ end w₁ end """ algorithm4!(p::Vector{T}, 𝐰₁::Vector{<:Real}, 𝐰₂::Vector{<:Real}) where {T<:Real} Fill `p` with the probabilities which result from `algorithm4(𝐰₁, 𝐰₂)`. Note that `T` must be a type which is able to hold the result of `inv(one(T))`. """ function algorithm4!(p::Vector{S}, w₁::Vector{T}, w₂::Vector{U}) where {S<:Real, T<:Real, U<:Real} s₁′ = zero(T) s₁ = zero(T) s₂ = zero(U) @inbounds @simd for i ∈ eachindex(w₁, w₂) w₁ᵢ = w₁[i] w₂ᵢ = w₂[i] s₁′ += w₂ᵢ == zero(U) ? zero(T) : w₁ᵢ s₁ += w₁ᵢ s₂ += w₂ᵢ end # c₁ = inv(s₁) # c₂ = s₁′ / (s₁ * s₂) # Equivalent, but improves type stability at expensive of inv(::Rational) not being used. # Note, however, inv(::Rational) occurs at most once, whereas the instability in the loop # incurs overhead length(p) times. c₁ = one(S) / s₁ c₂ = s₁′ * c₁ / s₂ @inbounds @simd for i ∈ eachindex(p, w₁, w₂) w₁ᵢ = w₁[i] w₂ᵢ = w₂[i] p[i] = w₂ᵢ == zero(U) ? c₁ * w₁ᵢ : c₂ * w₂ᵢ end p end """ algorithm4(𝐰₁::Vector{<:Real}, 𝐰₂::Vector{<:Real}) Return a vector of probabilities constructed according to the following algorithm: Define: I = {1,…,N} J₁ = {i: 𝐰₁ᵢ = 0}, I₁′ = {i: 𝐰₁ᵢ ≠ 0} = I ∖ J₁ J₂ = {i: 𝐰₂ᵢ = 0}, I₂′ = {i: 𝐰₂ᵢ ≠ 0} = I ∖ J₂ 𝐰₁ ∈ ℝᴺ : initial weights, 0 ≤ 𝐰₁ᵢ < Inf 𝐰₂ ∈ ℝᴺ : augment weights, 0 ≤ 𝐰₂ᵢ < Inf; a value of zero indicates no re-weight Then: pᵢ = 𝐰₁ᵢ / ∑ₗ₌₁ᴺ 𝐰₁ₗ, i ∈ I ∖ I₂′ pᵢ = (𝐰₂ᵢ * ∑ₗ 𝐰₁ₗ, l ∈ I₂′) / (∑ₗ₌₁ᴺ 𝐰₂ₗ * ∑ₗ₌₁ᴺ 𝐰₁ₗ), i ∈ I₂′ This algorithm can produce a wide variety of probability vectors as the result of the various combinations of intersections which can be formed from J₁, J₂, I₁′, and I₂′. However, complexity of outputs aside, the motivating concept is quite simple: take a vector of weights, `𝐰₁` and re-weight some subset (I₂′) of those weights using a second set of weights, `𝐰₂`, while preserving the proportion of probability mass derived from `𝐰₁`. That is, given `p = algorithm4(𝐰₁, 𝐰₂)`, the following relationships are preserved: `sum(p[J₂]) ≈ sum(𝐰₁[J₂]) / sum(𝐰₁[I₁′])`, `sum(w₁[J₂]) / sum(w₁[I₂′]) ≈ sum(p[J₂]) / sum(p[I₂′])`. See also: [`algorithm4!`](@ref) # Examples ```jldoctest julia> w₁ = [1, 1, 1, 1, 0]; julia> algorithm4(w₁, [2, 1, 3, 4, 0]) # J₁ ∩ I₂′ = ∅ 5-element Vector{Float64}: 0.2 0.1 0.30000000000000004 0.4 0.0 julia> algorithm4(w₁, [2, 1, 3, 0, 5]) # J₂ = [4] not re-weighted; I₂′ re-weighted 5-element Vector{Float64}: 0.13636363636363635 0.06818181818181818 0.20454545454545453 0.25 0.3409090909090909 julia> w₁ = [1, 1, 1, 0, 0]; julia> algorithm4(w₁, [2, 1, 3, 4, 0]) # J₂ = [5] not re-weighted; I₂′ re-weighted 5-element Vector{Float64}: 0.2 0.1 0.30000000000000004 0.4 0.0 julia> w₁ = [1, 1, 0, 1, 0]; julia> algorithm4(w₁, [0, 1, 0, 4, 0]) # J₂ = [1,3,5] not re-weighted; I₂′ re-weighted 5-element Vector{Float64}: 0.3333333333333333 0.13333333333333333 0.0 0.5333333333333333 0.0 julia> algorithm4(w₁, [0, 0, 3, 4, 0]) # J₂ = [1,2,5] not re-weighted; I₂′ re-weighted 5-element Vector{Float64}: 0.3333333333333333 0.3333333333333333 0.14285714285714285 0.19047619047619047 0.0 julia> algorithm4(w₁, [2, 0, 3, 0, 0]) # J₂ = [2,4,5] not re-weighted; I₂′ re-weighted 5-element Vector{Float64}: 0.13333333333333333 0.3333333333333333 0.2 0.3333333333333333 0.0 ``` """ algorithm4(w₁::Vector{T}, w₂::Vector{U}) where {T<:Real, U<:Real} = algorithm4!(similar(w₁, promote_type(_typeofinv(T), _typeofinv(U))), w₁, w₂) ################################################################ # A type interface? # abstract type AbstractNormalizer end # struct Alg1 <: AbstractNormalizer end # struct Alg2_1 <: AbstractNormalizer end # struct Alg2_2 <: AbstractNormalizer end # struct Alg3 <: AbstractNormalizer end # struct Alg3Ratio <: AbstractNormalizer end # struct Alg2_1_Alg3 <: AbstractNormalizer end # struct Alg2_1_Alg3Ratio <: AbstractNormalizer end # struct Alg4 <: AbstractNormalizer end # retalg(A::T) where {T<:AbstractNormalizer} = retalg(T) # function retalg(::Type{T}) where {T<:AbstractNormalizer} # if T === Alg1 # return identity # elseif T === Alg2_1 # return algorithm2_1 # elseif T === Alg2_2 # return algorithm2_2 # elseif T === Alg3 # return algorithm3 # elseif T === Alg3Ratio # return algorithm3_ratio # elseif T === Alg2_1_Alg3 # return algorithm2_1_algorithm3 # elseif T === Alg2_1_Alg3Ratio # return algorithm2_1_algorithm3_ratio # elseif T === Alg4 # return algorithm4 # end # end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
8061
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # # A ∈ 𝔻ᴰ¹ˣᴰ²ˣᴰ³ˣ⋯ ; eltype(A) = Vector{T} where {T} # # T = Vector{Int} if 1. or 2.1. # # NTuple{M, Vector{Int}} where {M} if 2.2. # # (I, 𝐰₂)::Tuple{Vector{Int}, Vector{Float64}} if elaborate # # The abstract PVG algorithm interface may use a composition of types representing # # simple algorithms, ultimately producing a function from a composed type. # # composed type -> function -> PVG # # Alternatively, one can provide an arbitrary function for the PVG; this enables # # arbitrarily complex algorithms which cannot easily be expressed as some # # composition of simple algorithms. Simple algorithms necessitate a clear flow # # from state to state, whereas in practice, one may wish to re-use a partial # # state from an earlier step, so that a simple composition such as f ∘ g ∘ h # # would fail. # # The input to the function applied to each element of each element of A # # will generally have a signature which accepts a single argument. # # The result of the function applied to each element of each element of A # # should always be Tuple{Vector{Int}, Vector{<:AbstractFloat}} # # The expected case: eltype(A) as above # function pvg(f::Function, A::AbstractArray{Vector{T}, N}, ws) where {T, N} # map(a -> map(x -> f(x, ws), a), A) # end # function pvg!(f::Function, B::AbstractArray{Vector{Tuple{Vector{Int}, Vector{S}}}, N}, A::AbstractArray{Vector{T}, N}, ws) where {T, N} where {S<:AbstractFloat} # for i ∈ eachindex(B, A) # B[i] = map(x -> f(x, ws), A[i]) # end # B # end # function pvg(f::Function, A::AbstractArray{Vector{T}, N}, ws::Tuple{}) where {T, N} # map(a -> map(f, a), A) # end # function pvg!(f::Function, B::AbstractArray{Vector{Tuple{Vector{Int}, Vector{S}}}, N}, A::AbstractArray{Vector{T}, N}, ws::Tuple{}) where {T, N} where {S<:AbstractFloat} # for i ∈ eachindex(B, A) # B[i] = map(f, A[i]) # end # B # end # # A simplification: an array of T, rather than Vector{T} # pvg(f::Function, A::AbstractArray{T, N}, ws) where {T, N} = map(x -> f(x, ws), A) # function pvg!(f::Function, B::AbstractArray{Tuple{Vector{Int}, Vector{S}}, N}, A::AbstractArray{T, N}, ws) where {T, N} where {S<:AbstractFloat} # for i ∈ eachindex(B, A) # B[i] = f(A[i], ws) # end # B # end # pvg(f::Function, A::AbstractArray{T, N}, ws::Tuple{}) where {T, N} = map(f, A) # function pvg!(f::Function, B::AbstractArray{Tuple{Vector{Int}, Vector{S}}, N}, A::AbstractArray{T, N}, ws::Tuple{}) where {T, N} where {S<:AbstractFloat} # for i ∈ eachindex(B, A) # B[i] = f(A[i]) # end # B # end # # cumulative option: f(I, 𝐰) -> (Iₛ, ω), then g(Iₛ, ω) -> (Iₛ, Σω) # # g(Iₛ, ω) = Iₛ, cumsum(ω) # or, Iₛ, cumsum!(ω) # # g(f, I, 𝐰) = g(f(Iₛ, ω)) # g ∘ f # _g(Iₛ, ω) = Iₛ, cumsum(ω) # _g((Iₛ, ω)) = _g(Iₛ, ω) # # an optimized case for Algorithm_1 # function _g(Iₛ::Vector{Int}) # N = length(Iₛ) # c = inv(N) # Σω = Vector{Float64}(undef, N) # @inbounds @simd for i ∈ eachindex(Σω) # Σω[i] = i * c # end # Iₛ, Σω # end # pvg_cumulative(f, A, ws) = pvg(_g ∘ f, A, ws) # pvg_cumulative!(f, B, A, ws) = pvg!(_g ∘ f, B, A, ws) # # Example: elaborate case (I, 𝐰₂) # # I ∈ ℕᴺ 𝐰₂ ∈ ℝᴺ # # | | # # v v # # A::AbstractArray{Vector{Tuple{Vector{Int}, Vector{Float64}}}, N} where {N} # function f(I, 𝐰₂, 𝐰, u) # Iₛ = I # 𝐰₁ = Algorithm_2_1(I, 𝐰) # ω₁ = Algorithm_4(𝐰₁, 𝐰₂) # ω = Algorithm_3(ω₁, u) # Iₛ, ω # end # f((I, 𝐰₂), 𝐰, u) = f(I, 𝐰₂, 𝐰, u) # # closure to provide u; u could also just be hard-coded into original function definition # f((I, 𝐰₂), 𝐰) = f(I, 𝐰₂, 𝐰, 0.5) # This meets the necessary signature for pvg # # Example: simple Algorithm 2.2. # # I₁ ∈ ℕᴺ I₂ ∈ ℝᴺ # # | | # # v v # # A::AbstractArray{Vector{Tuple{Vector{Int}, Vector{Int}}}, N} where {N} # function f(Is, ws) # Iₛ = Is[1] # ω = Algorithm_2_2(Is, ws) # Iₛ, ω # end # # Example: Algorithm 1. # # I₁ ∈ ℕᴺ # # | # # v # # A::AbstractArray{Vector{Vector{Int}}, N} where {N} # function f(Iₛ) # N = length(Iₛ) # ω = fill(inv(N), N) # Iₛ, ω # end # # This is best handled by a special dispatch as it can be an optimized case. It is # # also a nice opportunity to be able to just sample such cases. # # However, if one wants to sample using the in-order traversal, it may be most # # efficient to use pre-computed Σω's. Thus, generating a ::Tuple{Vector{Int}, Vector{Float64}} # # for each is a good utility for Algorithm 1. If one wants to sample without allocation, # # the "default" sample algorithm can be made to dispatch on eltype(A)::Vector{Vector{Int}} # # Benchmarking is needed to determine if pre-computed Σω's are faster than `rand(Iₛ)` # # Example: Algorithm 2.1. + Algorithm 3. # # Iₛ ∈ ℕᴺ # # | # # v # # A::AbstractArray{Vector{Vector{Int}}, N} where {N} # function f(I, 𝐰, u) # Iₛ = I # ω₁ = Algorithm_2_1(I, 𝐰) # ω = Algorithm_3(ω₁, u) # Iₛ, ω # end # f(I, 𝐰) = f(I, 𝐰, 0.5) # closure to provide u; or just hard code u ################################################################ # 2022-08-07: revised pvg; useful approach to dealing with type instability # of what could otherwise be phrased as map(a -> map(x -> (x, f(x)), a), A) # Needs to be adapted to handle AbstractArray{<:AbstractArray{T}} and also AbstractArray{T} # ultimately, if `f` is type-stable, then one can just use `map` to create a homogeneously # typed output; such an approach is more flexible, and defers the details to the user. _typeoffirstnonempty(f, A) = typeof(f(first(A[findfirst(!isempty, A)]))) function pvg(f, A::AbstractArray{T, N}) where {T<:AbstractArray{S, M}, N} where {S, M} Tₒ = _typeoffirstnonempty(f, A) B = initialize(Array{Tₒ, M}, size(A)) pvg!(f, B, A) end function pvg!(f, B, A) for i ∈ eachindex(B, A) !isempty(A[i]) && (B[i] = map(f, A[i])) end B end function tpvg(f, A::AbstractArray{T, N}) where {T<:AbstractArray{S, M}, N} where {S, M} Tₒ = _typeoffirstnonempty(f, A) B = tinitialize(Array{Tₒ, M}, size(A)) tpvg!(f, B, A) end function tpvg!(f, B, A) Threads.@threads for i ∈ eachindex(B, A) # !isempty(A[i]) && (B[i] = map(f, A[i])) if isempty(A[i]) empty!(B[i]) else B[i] = map(f, A[i]) end end B end # Array of array constructors function initialize!(A::AbstractArray{T, N}) where {T<:AbstractArray, N} for i ∈ eachindex(A) A[i] = T() end A end function initialize!(A::AbstractArray{T, N}) where {T<:Number, N} for i ∈ eachindex(A) A[i] = zero(T) end A end _initialize(::Type{T}, dims::NTuple{N, Int}) where {T<:AbstractArray} where {N} = initialize!(Array{T, N}(undef, dims)) _initialize(::Type{T}, dims::NTuple{N, Int}) where {T<:Number} where {N} = zeros(T, dims) initialize(::Type{T}, dims::NTuple{N, Int}) where {T} where {N} = _initialize(T, dims) initialize(::Type{T}, dims::Vararg{Integer, N}) where {T} where {N} = initialize(T, (map(Int, dims)...,)) function tinitialize!(A::AbstractArray{T, N}) where {T, N} @sync for slc ∈ eachslice(A, dims=N) Threads.@spawn initialize!(slc) end A end tinitialize(::Type{T}, dims::NTuple{N, Int}) where {T<:AbstractArray} where {N} = tinitialize!(Array{T, N}(undef, dims)) tinitialize(::Type{T}, dims::Vararg{Integer, N}) where {T} where {N} = tinitialize(T, (map(Int, dims)...,))
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
15084
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # A ∈ 𝔻ᴰ¹ˣᴰ²ˣᴰ³ˣ⋯ ; eltype(A) = Vector{Tuple{Vector{Int}, Vector{<:AbstractFloat}}} # (Iₛ, ω) OR (Iₛ, Σω) # Each sampling routine is identical: unpack the tuple, draw c ~ Categorical(ω) and # obtain the real category as Iₛ[c]. # This enables an encapsulation of all PVG-induced variability, hence, a consistent # interface for the sampler. # Technically, `sample` only needs to know ndims(A), not necessarily the element type. # The appropriate dispatch on element type is necessary for `sample!` # `sample` could instead use # A::AbstractArray{U, N} where {U<:Union{Vector{Tuple{Vector{Int}, Vector{T}}}, Tuple{Vector{Int}, Vector{T}}}} where {T<:AbstractFloat} # Actually, this should ideally support: # array of array of (sparse) vector # A::AbstractArray{T, N} where {N} where {T<:AbstractArray{S, M}} where {M} where {S<:Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}}} # array of (sparse) vector # A::AbstractArray{T, N} where {N} where {T<:Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}}} # (sparse) vector # A::Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}} # The bare minimum for `sample` interface-- covers all 4 other definitions. # sample(::Type{S}, A, n_sim, n_cat; dims=:) where {S} = sample(S, A, n_sim, n_cat, dims) # sample(::Type{S}, A, n_sim; dims=:) where {S} = sample(S, A, n_sim, num_cat(A), dims) # sample(::Type{S}, A, n_sim::Int, n_cat::Int, dims::Int) where {S} = sample(S, A, n_sim, n_cat, (dims,)) # function sample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T, N} # Dᴬ = size(A) # Dᴮ = tuple(n_sim, n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) # B = fill!(similar(A, S, Dᴮ), zero(S)) # sample!(B, A) # end # function sample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T, N} # B = fill!(similar(A, S, (n_sim, n_cat)), zero(S)) # sample!(B, A) # end #### A revised public interface """ sample(::Type{T<:Real}=Int, A::AbstractArray, n_sim::Int; [dims=:], [n_cat=nothing]) Draw `n_sim` samples from the distribution which corresponds to the sum of the independent categorical distributions defined by the probability mass vector(s), `A`, storing the result in an array of `eltype` `T` of sufficient size and dimension as determined by the input, `A`, the number of categories, `n_cat`, and the (potential) reduction dimensions, `dims`. `dims` is an optional keyword argument used to specify an in-place sum on the indices of `A` (if `A` is an array of arrays). `n_cat` may be optionally specified, or inferred from the available data; validity indices will be checked regardless. In the simplest case, `A` may be a single probability mass vector, for which compatible types are `AbstractVector{<:Real}`, `SparseVector{<:Real, <:Integer}`, and `AbstractVector{Tuple{<:Integer, <:Real}}`, the last of which is simply another representation of a sparse vector. In the second case, `A` may be an `AbstractArray{V, N} where {V, N}` in which `V` is any of the types specified above for a single probability mass vector. That is, an array of vectors. In the third case, `A` may be an `AbstractArray{W, M} where {W<:AbstractArray{V, N}, M} where {V, N}` in which `W` is any of the types specified in the second case. In other words, an array of arrays of vectors. See also: [`tsample`](@ref), [`vsample`](@ref), [`vtsample`](@ref) """ sample(::Type{S}, A, n_sim; dims=:, n_cat=nothing) where {S<:Real} = _sample(S, A, n_sim, n_cat, dims) sample(A, n_sim; dims=:, n_cat=nothing) = _sample(Int, A, n_sim, n_cat, dims) _sample(::Type{S}, A, n_sim, n_cat::Int, dims::Int) where {S<:Real} = _sample(S, A, n_sim, n_cat, (dims,)) _sample(::Type{S}, A, n_sim, ::Nothing, dims) where {S<:Real} = _sample(S, A, n_sim, num_cat(A), dims) function _sample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T, N} Dᴬ = size(A) Dᴮ = tuple(n_sim, n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = fill!(similar(A, S, Dᴮ), zero(S)) sample!(B, A) end function _sample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T, N} B = fill!(similar(A, S, (n_sim, n_cat)), zero(S)) sample!(B, A) end ################ # The expected case: vectors of sparse vectors (as their bare components) """ sample!(B::AbstractArray, A::AbstractArray) Draw samples, summing (and potentially reducing) in-place into `B`. The shape of `B` determines the extent of reduction performed. See also: [`tsample!`](@ref), [`vsample!`](@ref), [`vtsample!`](@ref) """ function sample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, p) ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end end B end # A simplification: an array of sparse vectors (as bare components) function sample!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, p = A[IA] n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end B end # The simplest case: a sparse vector (as bare components) _sample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = _sample(S, A, n_sim, n_cat, :) _sample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = _sample(S, A, n_sim, n_cat, :) function _sample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) sample!(B, A) end function sample!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{T}}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) Iₛ, p = A K, V = sqhist(p) C = generate(K, V, size(B, 1)) @inbounds for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c]] += one(S) end B end ################ # Specialized method for eltype(A)::Vector{Vector{Int}} # or, in other words, where the probability mass on each element is 1 / length(Iₛ) function sample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a n = length(Iₛ) generate!(C, U, n) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end end B end # A simplification: an array of sparse vectors function sample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Int}, N}) where {S<:Real, N′} where {N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ = A[IA] n = length(Iₛ) generate!(C, U, n) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end B end # # The simplest case: a sparse vector _sample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} = _sample(S, A, n_sim, n_cat, :) _sample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {N} = _sample(S, A, n_sim, n_cat, :) function _sample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} B = zeros(S, n_sim, n_cat) sample!(B, A) end # Oddly, the fastest sampler is non-allocating -- most likely due to # the elimination of store + access instructions associated with using a temporary array. function sample!(B::AbstractMatrix{S}, Iₛ::Vector{Int}) where {S<:Real} _check_reducedims(B, Iₛ) n = length(Iₛ) C = generate(n, size(B, 1)) @inbounds for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c]] += one(S) end B end ################ # General case: dense vectors, the linear index of which indicates the category function sample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for p ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, c, IR] += one(S) end end end B end # A simplification: an array of dense vectors function sample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{T}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) p = A[IA] n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, c, IR] += one(S) end end B end # The simplest case: a dense vector _sample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = _sample(S, A, n_sim, n_cat, :) _sample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = _sample(S, A, n_sim, n_cat, :) function _sample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) sample!(B, A) end function sample!(B::AbstractMatrix{S}, A::Vector{T}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) K, V = sqhist(A) C = generate(K, V, size(B, 1)) @inbounds for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, c] += one(S) end B end ################ # General case: sparse vectors, the nzval of which indicates the category function sample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(Tv, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for sv ∈ a Iₛ, p = sv.nzind, sv.nzval n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end end B end # A simplification: an array of sparse vectors function sample!(B::AbstractArray{S, N′}, A::AbstractArray{SparseVector{Tv, Ti}, N}) where {S<:Real, N′} where {Tv<:AbstractFloat, Ti<:Integer, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(Tv, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) sv = A[IA] Iₛ, p = sv.nzind, sv.nzval n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end B end # The simplest case: a sparse vector _sample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = _sample(S, A, n_sim, n_cat, :) _sample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = _sample(S, A, n_sim, n_cat, :) function _sample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) sample!(B, A) end function sample!(B::AbstractMatrix{S}, A::SparseVector{T}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) Iₛ, p = A.nzind, A.nzval K, V = sqhist(p) C = generate(K, V, size(B, 1)) @inbounds for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c]] += one(S) end B end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
14320
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # mirror of sampler.jl; separate file for variants on threading # The bare minimum for `sample` interface-- covers all 4 other definitions. """ tsample(::Type{T<:Real}=Int, A::AbstractArray, n_sim::Int; [dims=:], [n_cat=nothing], [chunksize=5000]) See `sample` for full documentation; identical in behavior except that thread-based parallelism is used to accelerate the computation. The optional `chunksize` argument provides a fixed upper bound for the number of simulations to be passed to each thread. Depending on number of independent categorical distributions to be sample and number of simulations to be performed, a `chunksize` of `5000` is likely too conservative. The user is encouraged to try smaller chunk sizes. See also: [`sample`](@ref), [`vsample`](@ref), [`vtsample`](@ref) """ tsample(::Type{S}, A, n_sim; dims=:, n_cat=nothing, chunksize=5000) where {S<:Real} = _tsample(S, A, n_sim, n_cat, dims, chunksize) tsample(A, n_sim; dims=:, n_cat=nothing, chunksize=5000) = _tsample(Int, A, n_sim, n_cat, dims, chunksize) _tsample(::Type{S}, A, n_sim, n_cat::Int, dims::Int, chunksize) where {S<:Real} = _tsample(S, A, n_sim, n_cat, (dims,), chunksize) _tsample(::Type{S}, A, n_sim, ::Nothing, dims, chunksize) where {S<:Real} = _tsample(S, A, n_sim, num_cat(A), dims, chunksize) function _tsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}, chunksize::Int) where {S<:Real} where {P} where {T, N} Dᴬ = size(A) Dᴮ = tuple(n_sim, n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = fill!(similar(A, S, Dᴮ), zero(S)) tsample!(B, A, chunksize) end function _tsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon, chunksize::Int) where {S<:Real} where {T, N} B = fill!(similar(A, S, (n_sim, n_cat)), zero(S)) tsample!(B, A, chunksize) end """ tsample!(B::AbstractArray, A::AbstractArray; [chunksize=5000]) Identical to `sample!` except that thread-based parallelism is used to accelerate the computation. See also: [`sample!`](@ref), [`vsample!`](@ref), [`vtsample!`](@ref) """ tsample!(B, A; chunksize::Int=5000) = tsample!(B, A, chunksize) function tsample!(B, A, chunksize::Int) _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) rs = splitranges(firstindex(B, 1):lastindex(B, 1), chunksize) @batch for r in rs _sample_chunk!(B, A, keep, default, r) end return B end ################ # The expected case: vectors of sparse vectors (as their bare components) function _sample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, p) ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end end return B end # A simplification: an array of sparse vectors function _sample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {T<:AbstractFloat, N} L = length(ℐ) C, U = _genstorage_init(Float64, L) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, p = A[IA] n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end return B end # The simplest case: a sparse vector _tsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::Int, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} = _tsample(S, A, n_sim, n_cat, :, chunksize) _tsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} where {N} = _tsample(S, A, n_sim, n_cat, :, chunksize) function _tsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, ::Colon, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) tsample!(B, A, chunksize) end function tsample!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{<:AbstractFloat}}, chunksize::Int) where {S<:Real} _check_reducedims(B, A) rs = splitranges(firstindex(B, 1):lastindex(B, 1), chunksize) @batch for r in rs _sample_chunk!(B, A, r) end return B end function _sample_chunk!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{T}}, ℐ::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} Iₛ, p = A K, V = sqhist(p) C = generate(K, V, length(ℐ)) @inbounds for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c]] += one(S) end return B end function _sample_chunk!(B::AbstractMatrix{S}, A::Tuple{AbstractVector{Int}, AbstractVector{T}}, ℐ::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} Iₛ, p = A n = length(Iₛ) Iₛp = (copyto!(Vector{Int}(undef, n), Iₛ), copyto!(Vector{T}(undef, n), p)) _sample_chunk!(B, Iₛp, ℐ) end ################ # Specialized method for eltype(A)::Vector{Vector{Int}} # or, in other words, where the probability mass on each element is 1 / length(Iₛ) function _sample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} C, U = _genstorage_init(Float64, length(ℐ)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a n = length(Iₛ) generate!(C, U, n) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end end return B end # A simplification: an array of sparse vectors function _sample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Int}, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {N} C, U = _genstorage_init(Float64, length(ℐ)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ = A[IA] n = length(Iₛ) generate!(C, U, n) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end return B end # The simplest case: a sparse vector _tsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::Int, chunksize::Int) where {S<:Real} = _tsample(S, A, n_sim, n_cat, :, chunksize) _tsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}, chunksize::Int) where {S<:Real} where {N} = _tsample(S, A, n_sim, n_cat, :, chunksize) function _tsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, ::Colon, chunksize::Int) where {S<:Real} B = zeros(S, n_sim, n_cat) tsample!(B, A, chunksize) end function tsample!(B::AbstractMatrix{S}, A::Vector{Int}, chunksize::Int) where {S<:Real} _check_reducedims(B, A) rs = splitranges(firstindex(B, 1):lastindex(B, 1), chunksize) @batch for r in rs _sample_chunk!(B, A, r) end return B end function _sample_chunk!(B::AbstractMatrix{S}, A::AbstractVector{Int}, ℐ::UnitRange{Int}) where {S<:Real} Iₛ = A n = length(Iₛ) C = generate(n, length(ℐ)) @inbounds for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c]] += one(S) end return B end ################ # General case: dense vectors, the linear index of which indicates the category function _sample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for p ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, c, IR] += one(S) end end end return B end # A simplification: an array of dense vectors function _sample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{T}, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {T<:AbstractFloat, N} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) p = A[IA] n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, c, IR] += one(S) end end return B end # The simplest case: a dense vector _tsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::Int, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} = _tsample(S, A, n_sim, n_cat, :, chunksize) _tsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} where {N} = _tsample(S, A, n_sim, n_cat, :, chunksize) function _tsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, ::Colon, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) tsample!(B, A, chunksize) end function tsample!(B::AbstractMatrix{S}, A::Vector{T}, chunksize::Int) where {S<:Real, T<:AbstractFloat} _check_reducedims(B, A) rs = splitranges(firstindex(B, 1):lastindex(B, 1), chunksize) @batch for r in rs _sample_chunk!(B, A, r) end return B end function _sample_chunk!(B::AbstractMatrix{S}, A::AbstractVector{T}, ℐ::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} p = copyto!(Vector{T}(undef, length(A)), A) K, V = sqhist(p) C = generate(K, V, length(ℐ)) @inbounds for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, c] += one(S) end return B end ################ # General case: sparse vectors, the nzval of which indicates the category function _sample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(Tv, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for sv ∈ a Iₛ, p = sv.nzind, sv.nzval n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end end return B end # A simplification: an array of sparse vectors function _sample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{SparseVector{Tv, Ti}, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {Tv<:AbstractFloat, Ti<:Integer, N} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(Tv, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) sv = A[IA] Iₛ, p = sv.nzind, sv.nzval n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) generate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end return B end # The simplest case: a sparse vector _tsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::Int, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} = _tsample(S, A, n_sim, n_cat, :, chunksize) _tsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} where {N} = _tsample(S, A, n_sim, n_cat, :, chunksize) function _tsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, ::Colon, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) tsample!(B, A, chunksize) end function tsample!(B::AbstractMatrix{S}, A::SparseVector{<:AbstractFloat}, chunksize::Int) where {S<:Real} _check_reducedims(B, A) rs = splitranges(firstindex(B, 1):lastindex(B, 1), chunksize) @batch for r in rs _sample_chunk!(B, A, r) end return B end function _sample_chunk!(B::AbstractMatrix{S}, A::SparseVector{T}, ℐ::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} Iₛ, p = A.nzind, A.nzval K, V = sqhist(p) C = generate(K, V, length(ℐ)) @inbounds for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c]] += one(S) end return B end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
7570
# # Date created: 2022-06-13 # Author: aradclif # # ############################################################################################ # Conveniences const _mni64 = typemin(Int) const _mxi64 = typemax(Int) _maximum_maybe(x::AbstractVector{T}) where {T<:Integer} = isempty(x) ? zero(T) : maximum(x) _maximum_maybe((x, y)::Tuple{Vector{Int}, Vector{T}}) where {T<:AbstractFloat} = _maximum_maybe(x) num_cat(A::AbstractArray{R, N}) where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} = isempty(A) ? 0 : maximum(num_cat, A, init=_mni64) num_cat(A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}) where {T<:AbstractFloat, N} = isempty(A) ? 0 : maximum(_maximum_maybe, A, init=_mni64) num_cat(A::Tuple{Vector{Int}, Vector{T}}) where {T<:AbstractFloat} = _maximum_maybe(A) num_cat(A::AbstractArray{R, N}) where {R<:AbstractArray{Vector{Int}, M}, N} where {M} = isempty(A) ? 0 : maximum(num_cat, A, init=_mni64) num_cat(A::AbstractArray{Vector{Int}, N}) where {N} = isempty(A) ? 0 : maximum(_maximum_maybe, A, init=_mni64) num_cat(A::Vector{Int}) = _maximum_maybe(A) num_cat(A::AbstractArray{R, N}) where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} = isempty(A) ? 0 : maximum(num_cat, A, init=_mni64) num_cat(A::AbstractArray{Vector{T}, N}) where {T<:AbstractFloat, N} = isempty(A) ? 0 : maximum(length, A, init=_mni64) num_cat(A::Vector{T}) where {T<:AbstractFloat} = length(A) num_cat(A::AbstractArray{R, N}) where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} = isempty(A) ? 0 : maximum(num_cat, A, init=_mni64) num_cat(A::AbstractArray{SparseVector{Tv, Ti}, N}) where {Tv<:AbstractFloat, Ti<:Integer, N} = isempty(A) ? 0 : maximum(length, A, init=_mni64) num_cat(A::SparseVector{Tv, Ti}) where {Tv<:AbstractFloat, Ti<:Integer} = length(A) _minimum_maybe(x::AbstractVector{T}) where {T<:Integer} = isempty(x) ? one(T) : minimum(x) _minimum_maybe((x, y)::Tuple{Vector{Int}, Vector{T}}) where {T<:AbstractFloat} = _minimum_maybe(x) num_cat_min(A::AbstractArray{R, N}) where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} = isempty(A) ? 1 : minimum(num_cat_min, A, init=_mxi64) num_cat_min(A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}) where {T<:AbstractFloat, N} = isempty(A) ? 1 : minimum(_minimum_maybe, A, init=_mxi64) num_cat_min(A::Tuple{Vector{Int}, Vector{T}}) where {T<:AbstractFloat} = _minimum_maybe(A) num_cat_min(A::AbstractArray{R, N}) where {R<:AbstractArray{Vector{Int}, M}, N} where {M} = isempty(A) ? 1 : minimum(num_cat_min, A, init=_mxi64) num_cat_min(A::AbstractArray{Vector{Int}, N}) where {N} = isempty(A) ? 1 : minimum(_minimum_maybe, A, init=_mxi64) num_cat_min(A::Vector{Int}) = _minimum_maybe(A) num_cat_min(A::AbstractArray{R, N}) where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} = 1 num_cat_min(A::AbstractArray{Vector{T}, N}) where {T<:AbstractFloat, N} = 1 num_cat_min(A::Vector{T}) where {T<:AbstractFloat} = 1 num_cat_min(A::AbstractArray{R, N}) where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} = 1 num_cat_min(A::AbstractArray{SparseVector{Tv, Ti}, N}) where {Tv<:AbstractFloat, Ti<:Integer, N} = 1 num_cat_min(A::SparseVector{Tv, Ti}) where {Tv<:AbstractFloat, Ti<:Integer} = 1 #### # _extrema_maybe(x::AbstractVector{T}) where {T<:Real} = isempty(x) ? (zero(T), zero(T)) : extrema(x) # bounds_cat(A::AbstractArray{R, N}) where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} = # (((lb1, ub1), (lb2, ub2)) = extrema(bounds_cat, A, init=((1,1), (0,0))); # extrema((lb1, ub1, lb2, ub2))) # bounds_cat(A::AbstractArray{T, N}) where {T<:Tuple{Vector{Int}, Vector{<:AbstractFloat}}, N} = # (((lb1, ub1), (lb2, ub2)) = extrema(bounds_cat, A, init=((1,1), (0,0))); # extrema((lb1, ub1, lb2, ub2))) # bounds_cat(A::Tuple{Vector{Int}, Vector{T}}) where {T<:AbstractFloat} = _extrema_maybe(A[1]) # bounds_cat(A::AbstractArray{R, N}) where {R<:AbstractArray{Vector{Int}, M}, N} where {M} = # (((lb1, ub1), (lb2, ub2)) = extrema(bounds_cat, A, init=((1,1), (0,0))); # extrema((lb1, ub1, lb2, ub2))) # bounds_cat(A::AbstractArray{Vector{Int}, N}) where {N} = # (((lb1, ub1), (lb2, ub2)) = extrema(_extrema_maybe, A, init=((1,1), (0,0))); # extrema((lb1, ub1, lb2, ub2))) # bounds_cat(A::Vector{Int}) = _extrema_maybe(A) # bounds_cat(A::AbstractArray{R, N}) where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} = (n = num_cat(A); n ≥ 1 ? (1, n) : (0, 0)) # bounds_cat(A::AbstractArray{Vector{T}, N}) where {T<:AbstractFloat, N} = (n = num_cat(A); n ≥ 1 ? (1, n) : (0, 0)) # bounds_cat(A::Vector{T}) where {T<:AbstractFloat} = (n = num_cat(A); n ≥ 1 ? (1, n) : (0, 0)) # bounds_cat(A::AbstractArray{R, N}) where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} = # (n = num_cat(A); n ≥ 1 ? (1, n) : (0, 0)) # bounds_cat(A::AbstractArray{SparseVector{Tv, Ti}, N}) where {Tv<:AbstractFloat, Ti<:Integer, N} = # (n = num_cat(A); n ≥ 1 ? (1, n) : (0, 0)) # bounds_cat(A::SparseVector{T}) where {T<:AbstractFloat} = (n = num_cat(A); n ≥ 1 ? (1, n) : (0, 0)) # bounds_cat(x::Int, y::Int) = (x, y) bounds_cat(A::T) where {T} = bounds_cat(num_cat_min(A), num_cat(A)) ################ function _checkindex_reducedims(ax, lb, ub) (checkindex(Bool, ax, lb) && checkindex(Bool, ax, ub)) || throw(DimensionMismatch("cannot sample from categories on range $(lb:ub) into array with first dimension $(ax)")) true end @noinline function _check_reducedims(B, A) Rdims = axes(B)[3:end] lb, ub = bounds_cat(A) _checkindex_reducedims(axes(B, 2), lb, ub) length(Rdims) ≤ ndims(A) || throw(DimensionMismatch("cannot reduce $(ndims(A))-dimensional array to $(length(Rdims)) trailing dimensions")) for i ∈ eachindex(Rdims) Ri, Ai = Rdims[i], axes(A, i) length(Ri) == 1 || Ri == Ai || throw(DimensionMismatch("reduction on array with indices $(axes(A)) with output with indices $(Rdims)")) end true end for Tₐ ∈ (Tuple{Vector{Int}, Vector{<:AbstractFloat}}, Vector{Int}, Vector{<:AbstractFloat}, SparseVector{<:AbstractFloat}) @eval @noinline function _check_reducedims(B, A::$Tₐ) lb, ub = bounds_cat(A) _checkindex_reducedims(axes(B, 2), lb, ub) true end end ################################################################ """ splitranges(start, stop, chunksize) Divide the range `start:stop` into segments, each of size `chunksize`. The last segment will contain the remainder, `(start - stop + 1) % chunksize`, if it exists. """ function splitranges(start::Int, stop::Int, Lc::Int) L = stop - start + 1 n, r = divrem(L, Lc) ranges = Vector{UnitRange{Int}}(undef, r == 0 ? n : n + 1) l = start @inbounds for i = 1:n l′ = l l += Lc ranges[i] = l′:(l - 1) end if r != 0 @inbounds ranges[n + 1] = (stop - r + 1):stop end return ranges end """ splitranges(ur::UnitRange{Int}, chunksize) Divide the range `ur` into segments, each of size `chunksize`. """ splitranges(ur::UnitRange{Int}, Lc::Int) = splitranges(ur.start, ur.stop, Lc) ################################################################ # sampler accessories _largesmall_init(n::Int) = Vector{Int}(undef, n), Vector{Int}(undef, n) @inline _genstorage_init(T::Type{<:AbstractFloat}, n::Int) = Vector{Int}(undef, n), Vector{T}(undef, n)
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
14017
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # A ∈ 𝔻ᴰ¹ˣᴰ²ˣᴰ³ˣ⋯ ; eltype(A) = Vector{Tuple{Vector{Int}, Vector{<:AbstractFloat}}} # (Iₛ, ω) OR (Iₛ, Σω) # Each sampling routine is identical: unpack the tuple, draw c ~ Categorical(ω) and # obtain the real category as Iₛ[c]. # This enables an encapsulation of all PVG-induced variability, hence, a consistent # interface for the sampler. # Technically, `sample` only needs to know ndims(A), not necessarily the element type. # The appropriate dispatch on element type is necessary for `sample!` # `sample` could instead use # A::AbstractArray{U, N} where {U<:Union{Vector{Tuple{Vector{Int}, Vector{T}}}, Tuple{Vector{Int}, Vector{T}}}} where {T<:AbstractFloat} # Actually, this should ideally support: # array of array of (sparse) vector # A::AbstractArray{T, N} where {N} where {T<:AbstractArray{S, M}} where {M} where {S<:Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}}} # array of (sparse) vector # A::AbstractArray{T, N} where {N} where {T<:Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}}} # (sparse) vector # A::Union{Vector{Int}, Tuple{Vector{Int}, Vector{<:AbstractFloat}}} # The bare minimum for `sample` interface-- covers all 4 other definitions. # vsample(::Type{S}, A, n_sim, n_cat; dims=:) where {S} = vsample(S, A, n_sim, n_cat, dims) # vsample(::Type{S}, A, n_sim; dims=:) where {S} = vsample(S, A, n_sim, num_cat(A), dims) # vsample(::Type{S}, A, n_sim::Int, n_cat::Int, dims::Int) where {S} = vsample(S, A, n_sim, n_cat, (dims,)) # function vsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T, N} # Dᴬ = size(A) # Dᴮ = tuple(n_sim, n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) # B = fill!(similar(A, S, Dᴮ), zero(S)) # vsample!(B, A) # end # function vsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T, N} # B = fill!(similar(A, S, (n_sim, n_cat)), zero(S)) # vsample!(B, A) # end #### A revised public interface """ vsample(::Type{T<:Real}=Int, A::AbstractArray, n_sim::Int; [dims=:], [n_cat=nothing]) See `sample` for full documentation; identical in behavior except that the underlying Marsaglia sampler uses `LoopVectorization`. Sometimes, but not always, confers speedup. See also: [`vtsample`](@ref), [`sample`](@ref), [`tsample`](@ref) """ vsample(::Type{S}, A, n_sim; dims=:, n_cat=nothing) where {S<:Real} = _vsample(S, A, n_sim, n_cat, dims) vsample(A, n_sim; dims=:, n_cat=nothing) = _vsample(Int, A, n_sim, n_cat, dims) _vsample(::Type{S}, A, n_sim, n_cat::Int, dims::Int) where {S<:Real} = _vsample(S, A, n_sim, n_cat, (dims,)) _vsample(::Type{S}, A, n_sim, ::Nothing, dims) where {S<:Real} = _vsample(S, A, n_sim, num_cat(A), dims) function _vsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}) where {S<:Real} where {P} where {T, N} Dᴬ = size(A) Dᴮ = tuple(n_sim, n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = fill!(similar(A, S, Dᴮ), zero(S)) vsample!(B, A) end function _vsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T, N} B = fill!(similar(A, S, (n_sim, n_cat)), zero(S)) vsample!(B, A) end ################ # The expected case: vectors of sparse vectors (as their bare components) """ vsample!(B::AbstractArray, A::AbstractArray) Identical to `sample!` except that except that the underlying Marsaglia sampler uses `LoopVectorization`. See also: [`vtsample!`](@ref), [`sample!`](@ref), [`tsample!`](@ref) """ function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, p) ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end end B end # A simplification: an array of sparse vectors (as bare components) function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, p = A[IA] n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end B end # The simplest case: a sparse vector (as bare components) _vsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = _vsample(S, A, n_sim, n_cat, :) _vsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = _vsample(S, A, n_sim, n_cat, :) function _vsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) vsample!(B, A) end function vsample!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{T}}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) Iₛ, p = A K, V = sqhist(p) C = vgenerate(K, V, size(B, 1)) @inbounds for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c]] += one(S) end B end ################ # Specialized method for eltype(A)::Vector{Vector{Int}} # or, in other words, where the probability mass on each element is 1 / length(Iₛ) function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a n = length(Iₛ) vgenerate!(C, U, n) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end end B end # A simplification: an array of sparse vectors function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Int}, N}) where {S<:Real, N′} where {N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ = A[IA] n = length(Iₛ) vgenerate!(C, U, n) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end B end # # The simplest case: a sparse vector _vsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} = _vsample(S, A, n_sim, n_cat, :) _vsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {N} = _vsample(S, A, n_sim, n_cat, :) function _vsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} B = zeros(S, n_sim, n_cat) vsample!(B, A) end # Oddly, the fastest vsampler is non-allocating -- most likely due to # the elimination of store + access instructions associated with using a temporary array. function vsample!(B::AbstractMatrix{S}, Iₛ::Vector{Int}) where {S<:Real} _check_reducedims(B, Iₛ) n = length(Iₛ) C = vgenerate(n, size(B, 1)) @inbounds for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c]] += one(S) end B end ################ # General case: dense vectors, the linear index of which indicates the category function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for p ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, c, IR] += one(S) end end end B end # A simplification: an array of dense vectors function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{T}, N}) where {S<:Real, N′} where {T<:AbstractFloat, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) p = A[IA] n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, c, IR] += one(S) end end B end # The simplest case: a dense vector _vsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = _vsample(S, A, n_sim, n_cat, :) _vsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = _vsample(S, A, n_sim, n_cat, :) function _vsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) vsample!(B, A) end function vsample!(B::AbstractMatrix{S}, A::Vector{T}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) K, V = sqhist(A) C = vgenerate(K, V, size(B, 1)) @inbounds for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, c] += one(S) end B end ################ # General case: sparse vectors, the nzval of which indicates the category function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}) where {S<:Real, N′} where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(Tv, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for sv ∈ a Iₛ, p = sv.nzind, sv.nzval n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end end B end # A simplification: an array of sparse vectors function vsample!(B::AbstractArray{S, N′}, A::AbstractArray{SparseVector{Tv, Ti}, N}) where {S<:Real, N′} where {Tv<:AbstractFloat, Ti<:Integer, N} _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) C, U = _genstorage_init(Float64, size(B, 1)) K, V, q = _sqhist_init(Tv, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) sv = A[IA] Iₛ, p = sv.nzind, sv.nzval n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c], IR] += one(S) end end B end # The simplest case: a sparse vector _vsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::Int) where {S<:Real} where {T<:AbstractFloat} = _vsample(S, A, n_sim, n_cat, :) _vsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}) where {S<:Real} where {T<:AbstractFloat} where {N} = _vsample(S, A, n_sim, n_cat, :) function _vsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, ::Colon) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) vsample!(B, A) end function vsample!(B::AbstractMatrix{S}, A::SparseVector{T}) where {S<:Real} where {T<:AbstractFloat} _check_reducedims(B, A) Iₛ, p = A.nzind, A.nzval K, V = sqhist(p) C = vgenerate(K, V, size(B, 1)) @inbounds for (i′, i) ∈ enumerate(axes(B, 1)) c = C[i′] B[i, Iₛ[c]] += one(S) end B end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
14522
# # Date created: 2022-06-12 # Author: aradclif # # ############################################################################################ # mirror of sampler.jl; separate file for variants on threading # The bare minimum for `sample` interface-- covers all 4 other definitions. """ tsample(::Type{T<:Real}=Int, A::AbstractArray, n_sim::Int; [dims=:], [n_cat=nothing], [chunksize=5000]) See `sample` for full documentation; identical in behavior except that thread-based parallelism is used to accelerate the computation; also, the underlying Marsaglia sampler uses `LoopVectorization`. The optional `chunksize` argument provides a fixed upper bound for the number of simulations to be passed to each thread. Depending on number of independent categorical distributions to be sample and number of simulations to be performed, a `chunksize` of `5000` is likely too conservative. The user is encouraged to try smaller chunk sizes. See also: [`vsample`](@ref), [`sample`](@ref), [`tsample`](@ref) """ vtsample(::Type{S}, A, n_sim; dims=:, n_cat=nothing, chunksize=5000) where {S<:Real} = _vtsample(S, A, n_sim, n_cat, dims, chunksize) vtsample(A, n_sim; dims=:, n_cat=nothing, chunksize=5000) = _vtsample(Int, A, n_sim, n_cat, dims, chunksize) _vtsample(::Type{S}, A, n_sim, n_cat::Int, dims::Int, chunksize) where {S<:Real} = _vtsample(S, A, n_sim, n_cat, (dims,), chunksize) _vtsample(::Type{S}, A, n_sim, ::Nothing, dims, chunksize) where {S<:Real} = _vtsample(S, A, n_sim, num_cat(A), dims, chunksize) function _vtsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, dims::NTuple{P, Int}, chunksize::Int) where {S<:Real} where {P} where {T, N} Dᴬ = size(A) Dᴮ = tuple(n_sim, n_cat, ntuple(d -> d ∈ dims ? 1 : Dᴬ[d], Val(N))...) B = fill!(similar(A, S, Dᴮ), zero(S)) vtsample!(B, A, chunksize) end function _vtsample(::Type{S}, A::AbstractArray{T, N}, n_sim::Int, n_cat::Int, ::Colon, chunksize::Int) where {S<:Real} where {T, N} B = fill!(similar(A, S, (n_sim, n_cat)), zero(S)) vtsample!(B, A, chunksize) end """ tsample!(B::AbstractArray, A::AbstractArray; [chunksize=5000]) Identical to `sample!` except that thread-based parallelism is used to accelerate the computation. also, the underlying Marsaglia sampler uses `LoopVectorization`. See also: [`vsample!`](@ref), [`sample!`](@ref), [`tsample!`](@ref) """ vtsample!(B, A; chunksize::Int=5000) = vtsample!(B, A, chunksize) function vtsample!(B, A, chunksize::Int) _check_reducedims(B, A) keep, default = Broadcast.shapeindexer(axes(B)[3:end]) rs = splitranges(firstindex(B, 1):lastindex(B, 1), chunksize) @batch for r in rs _vsample_chunk!(B, A, keep, default, r) end return B end ################ # The expected case: vectors of sparse vectors (as their bare components) function _vsample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Tuple{Vector{Int}, Vector{T}}, M}, N} where {T<:AbstractFloat, M} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for (Iₛ, p) ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end end return B end # A simplification: an array of sparse vectors function _vsample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{Tuple{Vector{Int}, Vector{T}}, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {T<:AbstractFloat, N} L = length(ℐ) C, U = _genstorage_init(Float64, L) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ, p = A[IA] n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end return B end # The simplest case: a sparse vector _vtsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::Int, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} = _vtsample(S, A, n_sim, n_cat, :, chunksize) _vtsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} where {N} = _vtsample(S, A, n_sim, n_cat, :, chunksize) function _vtsample(::Type{S}, A::Tuple{Vector{Int}, Vector{T}}, n_sim::Int, n_cat::Int, ::Colon, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) vtsample!(B, A, chunksize) end function vtsample!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{<:AbstractFloat}}, chunksize::Int) where {S<:Real} _check_reducedims(B, A) rs = splitranges(firstindex(B, 1):lastindex(B, 1), chunksize) @batch for r in rs _vsample_chunk!(B, A, r) end return B end function _vsample_chunk!(B::AbstractMatrix{S}, A::Tuple{Vector{Int}, Vector{T}}, ℐ::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} Iₛ, p = A K, V = sqhist(p) C = vgenerate(K, V, length(ℐ)) @inbounds for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c]] += one(S) end return B end function _vsample_chunk!(B::AbstractMatrix{S}, A::Tuple{AbstractVector{Int}, AbstractVector{T}}, ℐ::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} Iₛ, p = A n = length(Iₛ) Iₛp = (copyto!(Vector{Int}(undef, n), Iₛ), copyto!(Vector{T}(undef, n), p)) _vsample_chunk!(B, Iₛp, ℐ) end ################ # Specialized method for eltype(A)::Vector{Vector{Int}} # or, in other words, where the probability mass on each element is 1 / length(Iₛ) function _vsample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Vector{Int}, M}, N} where {M} C, U = _genstorage_init(Float64, length(ℐ)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for Iₛ ∈ a n = length(Iₛ) vgenerate!(C, U, n) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end end return B end # A simplification: an array of sparse vectors function _vsample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{Int}, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {N} C, U = _genstorage_init(Float64, length(ℐ)) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) Iₛ = A[IA] n = length(Iₛ) vgenerate!(C, U, n) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end return B end # The simplest case: a sparse vector _vtsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::Int, chunksize::Int) where {S<:Real} = _vtsample(S, A, n_sim, n_cat, :, chunksize) _vtsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}, chunksize::Int) where {S<:Real} where {N} = _vtsample(S, A, n_sim, n_cat, :, chunksize) function _vtsample(::Type{S}, A::Vector{Int}, n_sim::Int, n_cat::Int, ::Colon, chunksize::Int) where {S<:Real} B = zeros(S, n_sim, n_cat) vtsample!(B, A, chunksize) end function vtsample!(B::AbstractMatrix{S}, A::Vector{Int}, chunksize::Int) where {S<:Real} _check_reducedims(B, A) rs = splitranges(firstindex(B, 1):lastindex(B, 1), chunksize) @batch for r in rs _vsample_chunk!(B, A, r) end return B end function _vsample_chunk!(B::AbstractMatrix{S}, A::AbstractVector{Int}, ℐ::UnitRange{Int}) where {S<:Real} Iₛ = A n = length(Iₛ) C = vgenerate(n, length(ℐ)) @inbounds for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c]] += one(S) end return B end ################ # General case: dense vectors, the linear index of which indicates the category function _vsample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{Vector{T}, M}, N} where {T<:AbstractFloat, M} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for p ∈ a n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, c, IR] += one(S) end end end return B end # A simplification: an array of dense vectors function _vsample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{Vector{T}, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {T<:AbstractFloat, N} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(T, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) p = A[IA] n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, c, IR] += one(S) end end return B end # The simplest case: a dense vector _vtsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::Int, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} = _vtsample(S, A, n_sim, n_cat, :, chunksize) _vtsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} where {N} = _vtsample(S, A, n_sim, n_cat, :, chunksize) function _vtsample(::Type{S}, A::Vector{T}, n_sim::Int, n_cat::Int, ::Colon, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) vtsample!(B, A, chunksize) end function vtsample!(B::AbstractMatrix{S}, A::Vector{T}, chunksize::Int) where {S<:Real, T<:AbstractFloat} _check_reducedims(B, A) rs = splitranges(firstindex(B, 1):lastindex(B, 1), chunksize) @batch for r in rs _vsample_chunk!(B, A, r) end return B end function _vsample_chunk!(B::AbstractMatrix{S}, A::AbstractVector{T}, ℐ::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} p = copyto!(Vector{T}(undef, length(A)), A) K, V = sqhist(p) C = vgenerate(K, V, length(ℐ)) @inbounds for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, c] += one(S) end return B end ################ # General case: sparse vectors, the nzval of which indicates the category function _vsample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{R, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {R<:AbstractArray{SparseVector{Tv, Ti}, M}, N} where {Tv<:AbstractFloat, Ti<:Integer, M} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(Tv, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) a = A[IA] for sv ∈ a Iₛ, p = sv.nzind, sv.nzval n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end end return B end # A simplification: an array of sparse vectors function _vsample_chunk!(B::AbstractArray{S, N′}, A::AbstractArray{SparseVector{Tv, Ti}, N}, keep, default, ℐ::UnitRange{Int}) where {S<:Real, N′} where {Tv<:AbstractFloat, Ti<:Integer, N} C, U = _genstorage_init(Float64, length(ℐ)) K, V, q = _sqhist_init(Tv, 0) large, small = _largesmall_init(0) @inbounds for IA ∈ CartesianIndices(A) IR = Broadcast.newindex(IA, keep, default) sv = A[IA] Iₛ, p = sv.nzind, sv.nzval n = length(p) resize!(K, n); resize!(V, n); resize!(large, n); resize!(small, n); resize!(q, n) sqhist!(K, V, large, small, q, p) vgenerate!(C, U, K, V) for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c], IR] += one(S) end end return B end # The simplest case: a sparse vector _vtsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::Int, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} = _vtsample(S, A, n_sim, n_cat, :, chunksize) _vtsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, dims::NTuple{N, Int}, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} where {N} = _vtsample(S, A, n_sim, n_cat, :, chunksize) function _vtsample(::Type{S}, A::SparseVector{T}, n_sim::Int, n_cat::Int, ::Colon, chunksize::Int) where {S<:Real} where {T<:AbstractFloat} B = zeros(S, n_sim, n_cat) vtsample!(B, A, chunksize) end function vtsample!(B::AbstractMatrix{S}, A::SparseVector{<:AbstractFloat}, chunksize::Int) where {S<:Real} _check_reducedims(B, A) rs = splitranges(firstindex(B, 1):lastindex(B, 1), chunksize) @batch for r in rs _vsample_chunk!(B, A, r) end return B end function _vsample_chunk!(B::AbstractMatrix{S}, A::SparseVector{T}, ℐ::UnitRange{Int}) where {S<:Real} where {T<:AbstractFloat} Iₛ, p = A.nzind, A.nzval K, V = sqhist(p) C = vgenerate(K, V, length(ℐ)) @inbounds for l ∈ eachindex(C, ℐ) c = C[l] i = ℐ[l] B[i, Iₛ[c]] += one(S) end return B end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
37343
# Tests of normalization methods @testset "misc type inference functions" begin @test _typeofinv(1) === Float64 @test _typeofinv(Int) === Float64 @test _typeofinv(Rational{Int64}) === Rational{Int64} # ws = ([1,2,3], fill(1/6, 6), fill(1//10, 9)) @test _typeofprod(ws) === Float64 ws2 = (ws[1], fill(1//6, 6), ws[3]) @test _typeofprod(ws2) === Rational{Int64} for T ∈ (Int8, Int16, Int32) ws3 = (Int8[1,2,3], T[4,5,6], Int8[7,8,9]) @test _typeofprod(ws3) === Int64 end end @testset "_u, _checku01" begin @test _u(1.0) === 0.5 @test _u(1//1) === 1//2 @test _u(0) === 0.0 @test _u(Inf) === 1.0 @test _u(-1) === -Inf @test _u(-Inf) === -NaN for T ∈ (Int64, UInt, Float64, BigInt, BigFloat, Rational{Int}) @test_nowarn _check_u01(zero(T)) @test_nowarn _check_u01(one(T)) @test_throws DomainError _check_u01(T(2)) end @test_nowarn _check_u01(0.5) @test_nowarn _check_u01(1//2) @test_nowarn _check_u01(big(0.5)) end @testset "algorithm 2.1" begin w = [7.3, 10.2, 5.1, 2.7, 2.89] p = [1.0] for i ∈ eachindex(w) @test algorithm2_1([i], w) ≈ p end for I ∈ ([1, 2], [1, 3], [1, 4], [1, 5], [2, 3], [2, 4], [2, 5], [3, 4], [3, 5], [4, 5], [1, 2, 3], [1, 2, 4], [1, 2, 5], [2, 3, 4], [2, 3, 5], [3, 4, 5], [1, 2, 3, 4], [1, 2, 3, 5], [2, 3, 4, 5], [1, 2, 3, 4, 5]) @test algorithm2_1(I, w) ≈ w[I] ./ sum(w[I]) end @test algorithm2_1([1, 1, 1], w) ≈ fill(1/3, 3) # non-Float64 Type handling I1 = [1] I2 = [1, 2] for T ∈ (Float16, Float32, Rational{Int16}, Rational{Int32}, Rational{Int64}, Rational{Int128}) 𝑤 = T.(w) @test @inferred algorithm2_1(I1, 𝑤) ≈ T[1] @test @inferred algorithm2_1(I2, 𝑤) ≈ 𝑤[I2] ./ sum(𝑤[I2]) end # Aberrant behavior @testset "weight < 0" begin w = [-5, 4, 3, 2, 1]; I = [1, 5, 2]; @test algorithm2_1(I, w) == [-Inf, Inf, Inf] I = [2, 3] @test algorithm2_1(I, w) == [4/7, 3/7] I = [1, 2, 2] @test algorithm2_1(I, w) == [-prevfloat(5/3), 4/3, 4/3] end # zeros behavior w = zeros(5) I = [1, 2, 3] @test all(algorithm2_1(I, w) .=== [-NaN, -NaN, -NaN]) w[3] = 5 @test algorithm2_1(I, w) == [0.0, 0.0, 1.0] @testset "NaN handling (lack thereof)" begin # things which generate NaNs by propagation w = [2.0, 10.0, 5.0, 1.0, NaN] I = [1, 3, 5] @test all(algorithm2_1(I, w) .=== [NaN, NaN, NaN]) I = [1, 5, 5] @test all(algorithm2_1(I, w) .=== [NaN, NaN, NaN]) I = [5, 5, 5] @test all(algorithm2_1(I, w) .=== [NaN, NaN, NaN]) # propagating NaNs with signbit set w = [2.0, 10.0, 5.0, 1.0, -NaN] I = [1, 3, 5] @test all(algorithm2_1(I, w) .=== [-NaN, -NaN, -NaN]) I = [1, 5, 5] @test all(algorithm2_1(I, w) .=== [-NaN, -NaN, -NaN]) I = [5, 5, 5] @test all(algorithm2_1(I, w) .=== [-NaN, -NaN, -NaN]) end @testset "±Inf handling (lack thereof)" begin w = [2.0, 10.0, 5.0, 1.0, Inf] I = [1, 3, 5] @test all(algorithm2_1(I, w) .=== [0.0, 0.0, -NaN]) w = [2.0, 10.0, 5.0, 1.0, prevfloat(Inf)] I = [1, 3, 5] p = algorithm2_1(I, w) @test all(!isnan, p) @test all(!isinf, p) # integer overflow w = [5, 4, 3, 1, typemax(Int) - 1] I = [1, 3, 5] @test algorithm2_1(I, w) == [-5.421010862427522e-19, -3.2526065174565133e-19, -1.0] I = [4, 5] # on the cusp of overflow @test algorithm2_1(I, w) == [1.0842021724855044e-19, 1.0] end @testset "sweep precision (SIMD rounding)" begin rng = Xoshiro(1234) w = rand(rng, 256) p = Vector{Float64}(undef, 0) for m = -10:10 w .*= 10.0^m for i = 1:8 for j = -1:1 n = (1 << i) + j I = rand(1:256, n) resize!(p, n) algorithm2_1!(p, I, w) @test all(!iszero, p) @test sum(p) ≈ 1.0 end end end end @testset "algorithm 2.1, type handling" begin w1 = [5/9, 1/9, 3/9, 0, 0] w2 = [5//9, 1//9, 3//9, 0, 0] w3 = [5, 1, 3, 0, 0] p1 = [prevfloat(5/6), prevfloat(1/6), 0.0] p2 = [5//6, 1//6, 0] p3 = [prevfloat(5/6), 1/6, 0.0] I = [1, 2, 5] @test @inferred algorithm2_1(I, w1) == p1 @test @inferred algorithm2_1(I, w2) == p2 @test @inferred algorithm2_1(I, w3) == p3 p = similar(p1) @test @inferred algorithm2_1!(p, I, w1) ≈ p3 @test @inferred algorithm2_1!(p, I, w2) ≈ p3 @test @inferred algorithm2_1!(p, I, w3) ≈ p3 # for T ∈ (Float16, Float32, Float64, BigFloat, Int8, Int16, Int32, Int64, BigInt, Rational{Int8}, Rational{Int16}, Rational{Int32}, Rational{Int64}, Rational{Int128}, Rational{BigInt}) 𝑤 = T.(w3) p′ = @inferred algorithm2_1(I, 𝑤) @test p′ ≈ p3 end end end # _alg2_2(Is, ws) = (p = (.*)(getindex.(ws, Is)...); p .* inv(sum(p))) @testset "algorithm 2.2" begin ws = ([1,2,3], fill(1/6, 6), fill(1//10, 9)) ws2 = (ws[1], fill(1//6, 6), ws[3]) Is = ([1,2,3], [4,5,6], [7,8,9]) p = [1.0] p2 = [1//1] for i ∈ 1:3 I = [i] @test @inferred algorithm2_2((I, I, I), ws) == p @test @inferred algorithm2_2((I, I, I), ws2) == p2 end p′ = [1/6, 1/3, 1/2] p2′ = [1//6, 1//3, 1//2] @test @inferred algorithm2_2(Is, ws) == p′ @test @inferred algorithm2_2(Is, ws2) == p2′ # Float products I = [1,2,3] w = fill(1/2,6) for n = 1:10 Is = ntuple(_ -> I, n) ws = ntuple(_ -> w, n) p = @inferred algorithm2_2(Is, ws) @test all(p .≈ 1/3) end # Rational products w2 = fill(1//2,6) for n = 1:10 Is = ntuple(_ -> I, n) ws = ntuple(_ -> w2, n) p = @inferred algorithm2_2(Is, ws) @test all(p .== 1//3) end # Mixed products w3 = fill(1,6) for n = 2:2:12 Is = ntuple(_ -> I, n) ws = tuple(ntuple(_ -> w, n >> 1)..., ntuple(_ -> w2, n >> 1)...) p = @inferred algorithm2_2(Is, ws) @test all(p .≈ 1/3) ws = tuple(ntuple(_ -> w, n >> 1)..., ntuple(_ -> w3, n >> 1)...) p = @inferred algorithm2_2(Is, ws) @test all(p .≈ 1/3) Base.setindex(ws, w2, lastindex(ws)) p = @inferred algorithm2_2(Is, ws) @test all(p .≈ 1/3) end # Aberrant behavior @testset "weight < 0" begin w = [-5, 4, 3, 2, 1]; I = [1, 5, 2]; Is = (I, [1,2,3], [1,2,3]) ws = (w, fill(1/6, 6), fill(1//10, 9)) @test algorithm2_2(Is, ws) == [-Inf, Inf, Inf] Is = Base.setindex(Is, [2,3,4], 1) @test algorithm2_2(Is, ws) == nextfloat.([4/9, 3/9, 2/9]) Is = Base.setindex(Is, [1,2,2], 1) @test algorithm2_2(Is, ws) == [-prevfloat(5/3), 4/3, 4/3] end # zeros behavior w = zeros(5) ws = (w, w, w) I = [1, 2, 3] Is = (I, I, I) @test all(algorithm2_2(Is, ws) .=== [-NaN, -NaN, -NaN]) w[3] = 5 @test algorithm2_2(Is, ws) == [0.0, 0.0, 1.0] @testset "NaN handling (lack thereof)" begin # things which generate NaNs by propagation w = [2.0, 10.0, 5.0, 1.0, NaN] ws = (w, w, w) I = [1, 3, 5] Is = (I, I, I) @test all(algorithm2_2(Is, ws) .=== [NaN, NaN, NaN]) I = [1, 5, 5] Is = (I, I, I) @test all(algorithm2_2(Is, ws) .=== [NaN, NaN, NaN]) I = [5, 5, 5] Is = (I, I, I) @test all(algorithm2_2(Is, ws) .=== [NaN, NaN, NaN]) # propagating NaNs with signbit set w = [2.0, 10.0, 5.0, 1.0, -NaN] ws = (w, w, w) I = [1, 3, 5] Is = (I, I, I) @test all(algorithm2_2(Is, ws) .=== [-NaN, -NaN, -NaN]) I = [1, 5, 5] Is = (I, I, I) @test all(algorithm2_2(Is, ws) .=== [-NaN, -NaN, -NaN]) I = [5, 5, 5] Is = (I, I, I) @test all(algorithm2_2(Is, ws) .=== [-NaN, -NaN, -NaN]) end @testset "±Inf handling (lack thereof)" begin I = [1, 3, 5] Is = (I, I, I) w = [2.0, 10.0, 5.0, 1.0, Inf] ws = (w, w, w) @test all(algorithm2_2(Is, ws) .=== [0.0, 0.0, -NaN]) w = [2.0, 10.0, 5.0, 1.0, prevfloat(Inf)] ws = (w, w, w) p = algorithm2_2(Is, ws) @test all(p .=== [0.0, 0.0, -NaN]) @test all(!isinf, p) # integer overflow w = [5, 4, 3, 1, typemax(Int) - 1] ws = (w, w, w) @test algorithm2_2(Is, ws) == [0.8680555555555555, 0.1875, -0.05555555555555555] end @testset "sweep precision (SIMD rounding)" begin rng = Xoshiro(0xd123456789abcdef) w = rand(rng, 256) p = Vector{Float64}(undef, 0) for M = 2:5 for m = -10:10 w .*= 10.0^m ws = ntuple(_ -> w, M) for i = 1:8 for j = -1:1 n = (1 << i) + j Is = ntuple(_ -> rand(rng, 1:256, n), M) resize!(p, n) algorithm2_2!(p, Is, ws) @test all(!iszero, p) @test sum(p) ≈ 1.0 end end end end end end @testset "algorithm 3" begin w = [0.0, 10.0, 5.0, 0.0, 2.5] p = [0.25, 0.2857142857142857, 0.14285714285714285, 0.25, 0.07142857142857142] p′ = algorithm3(w, 0.5) @test sum(p′) ≈ 1.0 @test p ≈ p′ @test algorithm3(w, 1.0) == [0.5, 0.0, 0.0, 0.5, 0.0] # u not 0 ≤ u ≤ 1 p = [0.75, -0.2857142857142857, -0.14285714285714285, 0.75, -0.07142857142857142] @test_throws DomainError algorithm3(w, 1.5) @test_throws DomainError algorithm3(w, -0.5) # @test p ≈ p′ # @test sum(p′) ≈ 1.0 # throws where appropriate @test_throws DimensionMismatch algorithm3!(p, zeros(6), 0.5) @test_throws DimensionMismatch algorithm3!(zeros(6), p, 0.5) # zeros behavior p = algorithm3(w, 0.0) @test p == [0.0, 0.5714285714285714, 0.2857142857142857, 0.0, 0.14285714285714285] algorithm3!(p, 1.0) @test p == [0.5, 0.0, 0.0, 0.5, 0.0] algorithm3!(p, 0.0) @test p == [0.5, 0.0, 0.0, 0.5, 0.0] p .= 0 algorithm3!(p, 1.0) @test p == fill(0.2, 5) p .= 0 algorithm3!(p, 0.0) @test all(p .=== -NaN) # w = zeros(5) for u ∈ (nextfloat(0.0), eps(), 0.5, prevfloat(1.0), 1.0) algorithm3!(p, w, u) @test p == fill(0.2, 5) end @testset "NaN handling (lack thereof)" begin # things which generate NaNs w = [0.0, 10.0, 5.0, 0.0, NaN] p = [0.25, NaN, NaN, 0.25, NaN] p′ = algorithm3(w, 0.5) @test isequal(p′, p) @test p′[1] + p′[4] == 0.5 w = [1.0, 2.0, 3.0, NaN] p′ = algorithm3(w, 0.5) @test isequal(p′, fill(NaN, 4)) end @testset "±Inf handling (lack thereof)" begin w = [0.0, 10.0, 5.0, 0.0, Inf] p = [0.25, 0.0, 0.0, 0.25, NaN] p′ = algorithm3(w, 0.5) @test isequal(p′, p) @test sum(p′[1:4]) == 0.5 w = [0.0, 10.0, 5.0, 0.0, -Inf] p = [0.25, -0.0, -0.0, 0.25, -NaN] p′ = algorithm3(w, 0.5) @test all(p′ .=== p) @test sum(p′[1:4]) == 0.5 w = [1.0, 2.0, 3.0, Inf] p = [0.0, 0.0, 0.0, NaN] p′ = algorithm3(w, 0.5) @test isequal(p′, p) # Inf * inv(Inf) -> NaN w = [0.0, 0.0, 0.0, Inf] p′ = algorithm3(w, 0.5) @test sum(p′[1:3]) ≈ 0.5 @test isnan(p′[4]) # usually === -NaN, but maybe not always w = [0.0, 0.0, 0.0, -Inf] p′ = algorithm3(w, 0.5) @test sum(p′[1:3]) ≈ 0.5 @test isnan(p′[4]) # usually === -NaN, but maybe not always end @testset "algorithm 3, sweep precision (SIMD rounding)" begin rng = Xoshiro(1234) w = rand(rng, 256) w[rand(rng, 1:256, 64)] .= 0 x = copyto!(similar(w), w) p = similar(w) for u = 0.000001:.000001:0.999999 w .= x .* u algorithm3!(p, w, u) @test all(!iszero, p) @test sum(p) ≈ 1.0 end # When u = 1.0, all non-zero elements become zero u = 1.0 w .= x .* u algorithm3!(p, w, u) @test count(iszero, p) == 197 # PRNG initialized as above gives 59 non-zeros @test sum(p) ≈ 1.0 # When u = 0.0, all non-zero elements are normalized but zero elements remain same u = 0.0 w .= x algorithm3!(p, w, u) @test count(iszero, p) == 59 @test sum(p) ≈ 1.0 # Large values -- potentially sensitive to very small u's around 1e-6 for u = 0.0001:0.0001:0.9999 for i = 1:20 n = 1 << i w .= x .* n algorithm3!(p, w, u) @test all(!iszero, p) @test sum(p) ≈ 1.0 end end end @testset "algorithm 3, type handling" begin p = [5/9, 1/9, 3/9, 0, 0] p2 = [5//9, 1//9, 3//9, 0, 0] # conversions should work and be type stable throughout @inferred algorithm3!(p, 0.5) @inferred algorithm3!(p2, 1) # while Integer <: Real, it clearly cannot be normalized in a meaningful manner p3 = [5, 1, 3, 0, 0] @test_throws InexactError algorithm3!(p3, 0) @test_throws InexactError algorithm3!(p3, 1) @test_throws DomainError algorithm3!(p3, 999) # w = [5/9, 1/9, 3/9, 0, 0] w2 = [5//9, 1//9, 3//9, 0, 0] w3 = [5, 1, 3, 0, 0] p = [5/9, 1/9, 3/9, 0, 0] p2 = [5//9, 1//9, 3//9, 0, 0] p3 = [5, 1, 3, 0, 0] @inferred algorithm3!(p, w, 0.5) @inferred algorithm3!(p, w3, 0.5) @inferred algorithm3!(p2, w2, 0.5) @inferred algorithm3!(p2, w2, 1//2) @test_throws InexactError algorithm3!(p3, w, 1) @test_throws InexactError algorithm3!(p3, w3, 1) @test_throws InexactError algorithm3!(p3, w2, 1) @test_throws InexactError algorithm3!(p3, w2, 1) ps = (algorithm3(w3, 1//2), algorithm3(w3, 0.5), algorithm3(w3, 1), algorithm3(w3, 0)) for T ∈ (Float16, Float32, Float64, BigFloat, Int8, Int16, Int32, Int64, BigInt, Rational{Int8}, Rational{Int16}, Rational{Int32}, Rational{Int64}, Rational{Int128}, Rational{BigInt}) 𝑤 = T.(w3) for (i, u) ∈ enumerate((1//2, 0.5, 1, 0)) p = @inferred algorithm3(𝑤, u) @test p ≈ ps[i] end end end end @testset "normalize1" begin # An aberrant case w = [-1.0, 1.0, 1.0] p = normalize1(w) @test p == w @test sum(p) ≈ 1.0 w = abs.(w) p = normalize1(w) @test p == [1/3, 1/3, 1/3] @test sum(p) ≈ 1.0 # The usual things A = [1.0 2.0; 3.0 4.0] B = normalize1(A) @test sum(B) ≈ 1.0 normalize1!(B) @test sum(B) ≈ 1.0 A[1] = -1.0 normalize1!(B, A) @test sum(B) ≈ 1.0 @test any(≤(0.0), B) @testset "NaN handling (lack thereof)" begin # things which generate NaNs A = zeros(2,2) @test isequal(normalize1(A), fill(NaN, 2,2)) # preexisting NaN(s) A = [0.0 NaN; 0.0 1.0] @test isequal(normalize1(A), fill(NaN, 2,2)) A = fill(NaN, 2,2) @test isequal(normalize1(A), fill(NaN, 2,2)) A = fill(-NaN, 2,2) B = normalize1(A) @test all(A .=== B) @test all(A .=== -NaN) end @testset "±Inf handling (lack thereof)" begin A = [Inf Inf; Inf Inf] @test isequal(normalize1(A), fill(NaN, 2,2)) A = [0.0 Inf; 0.0 1.0] @test isequal(normalize1(A), [0.0 NaN; 0.0 0.0]) A = [0.0 -Inf; 0.0 1.0] B = normalize1(A) @test isequal(B, [-0.0 NaN; -0.0 -0.0]) @test isequal(B, [-0.0 -NaN; -0.0 -0.0]) # is the negative bit on the NaN set? (it should be) @test B[1,2] === -NaN @test B[1,2] !== NaN # propagating negative bit normalize1!(A, B) @test all(A .=== -NaN) end # Sweeps: rounding errors, etc. rng = Xoshiro(0x434b089281805289) for i = 1:20 for j = -1:1 n = (1 << i) + j w = rand(rng, n) p = normalize1(w) @test all(!iszero, p) @test sum(p) ≈ 1.0 @test p ≈ normalize1(p) # x = copyto!(similar(w), w) # for u = 0.0001:0.0001:0.9999 # w .= x .* u # normalize1!(p, w) # @test all(!iszero, p) # @test sum(p) ≈ 1.0 # end end end x = rand(rng, 256) x[rand(rng, 1:256, 64)] .= 0 y = normalize1(x) @test count(iszero, x) == count(iszero, y) @test sum(y) ≈ 1.0 w = similar(x) # Large values for i = 1:20 n = 1 << i w .= x .* n normalize1!(y, w) @test sum(y) ≈ 1.0 w[129:256] .*= n >> i normalize1!(y, w) @test sum(y) ≈ 1.0 end # Small values nz = count(iszero, x) for u = 0.000001:0.000001:0.999999 w .= x .* u normalize1!(y, w) @test sum(y) ≈ 1.0 @test count(iszero, y) == nz # mixed with large w[129:256] .*= 10^6 @test sum(y) ≈ 1.0 @test count(iszero, y) == nz end end @testset "normalizations, 0 unchanged component" begin I′ = [1, 2, 3, 4, 5] w = [2, 1, 3, 4, 5] w₂ = [10, 20, 10, 20, 10] p = algorithm2_1(I′, w) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q == [1/7, 2/7, 1/7, 2/7, 1/7] end @testset "normalizations, 1 unchanged component" begin I′ = [1, 2, 3, 4, 5] w = [2, 1, 3, 4, 5] w₂ = [10, 20, 0, 20, 10] p = algorithm2_1(I′, w) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q[3] == p[3] @test q[1] + q[2] + q[4] + q[5] == p[1] + p[2] + p[4] + p[5] end @testset "normalizations, 2 unchanged components" begin I′ = [1, 2, 3, 4, 5] w = [2, 1, 3, 4, 5] w₂ = [10, 20, 0, 0, 10] p = algorithm2_1(I′, w) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q[3] == p[3] && q[4] == p[4] @test q[1] + q[2] + q[5] == p[1] + p[2] + p[5] end @testset "normalizations, all unchanged components" begin I′ = [1, 2, 3, 4, 5] w = [2, 1, 3, 4, 5] w₂ = [0, 0, 0, 0, 0] p = algorithm2_1(I′, w) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test all(q .== p) end @testset "normalizations, 0 unchanged component, 0 unknown component" begin I′ = [1, 2, 3, 4, 5] w = [2, 1, 3, 4, 5] u = 1/2 w₂ = [10, 20, 10, 20, 10] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q == [1/7, 2/7, 1/7, 2/7, 1/7] end @testset "normalizations, 0 unchanged component, 1 unknown component" begin I′ = [1, 2, 3, 4, 5] w = [2, 1, 3, 4, 0] u = 1/2 w₂ = [10, 20, 10, 20, 10] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q == [1/7, 2/7, 1/7, 2/7, 1/7] end @testset "normalizations, 0 unchanged component, 2 unknown component" begin I′ = [1, 2, 3, 4, 5] w = [2, 1, 3, 0, 0] u = 1/2 w₂ = [10, 20, 10, 20, 10] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q == [1/7, 2/7, 1/7, 2/7, 1/7] end @testset "normalizations, 0 unchanged component, all unknown component" begin I′ = [1, 2, 3, 4, 5] w = [0, 0, 0, 0, 0] u = 1/2 w₂ = [10, 20, 10, 20, 10] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q == [1/7, 2/7, 1/7, 2/7, 1/7] end @testset "normalizations, 1 unchanged component, 1 unknown component" begin I′ = [1, 2, 3, 4, 5] w = [2, 1, 3, 4, 0] u = 1/2 w₂ = [10, 20, 0, 20, 10] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q == [0.14166666666666666, 0.2833333333333333, 0.15000000000000002, 0.2833333333333333, 0.14166666666666666] w₂ = [10, 20, 10, 20, 0] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q[5] == u end @testset "normalizations, 1 unchanged component, 2 unknown component" begin I′ = [1, 2, 3, 4, 5] w = [2, 1, 3, 0, 0] u = 1/2 w₂ = [10, 20, 0, 20, 10] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q == [1/8, 1/4, 1/4, 1/4, 1/8] w₂ = [10, 20, 10, 20, 0] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q[5] == u / 2 @test q == [1/8, 1/4, 1/8, 1/4, 1/4] end @testset "normalizations, 2 unchanged component, 2 unknown component" begin I′ = [1, 2, 3, 4, 5] w = [2, 1, 3, 0, 0] u = 1/2 w₂ = [10, 20, 0, 0, 10] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q == [1/8, 1/4, 1/4, 1/4, 1/8] w₂ = [10, 20, 10, 0, 0] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q[5] == u / 2 @test q == [1/8, 1/4, 1/8, 1/4, 1/4] w₂ = [0, 20, 10, 20, 0] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q[5] == u / 2 @test q == [0.16666666666666666, 0.2333333333333333, 0.11666666666666665, 0.2333333333333333, 0.25] end @testset "normalizations, all unchanged component, 2 unknown component" begin I′ = [1, 2, 3, 4, 5] w = [2, 1, 3, 0, 0] u = 1/2 w₂ = [0, 0, 0, 0, 0] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q == p end @testset "normalizations, 2 unchanged component, all unknown component" begin I′ = [1, 2, 3, 4, 5] w = [0, 0, 0, 0, 0] u = 1/2 w₂ = [10, 20, 0, 0, 10] p = algorithm2_1_algorithm3(I′, w, u) @test sum(p) ≈ 1 q = algorithm4(p, w₂) @test sum(q) ≈ 1 @test q ≈ [0.15, 0.3, 0.2, 0.2, 0.15] end # @testset "Monte Carlo, re-weighted: 1 unchanged component" begin # I′ = [1, 2, 3, 4, 5] # w = [2, 1, 3, 4, 5] # w₂ = [10, 20, 0, 20, 10] # A = weightedmcadd1(Int, (I′, w₂), w, 10) # Σ = sum(A, dims=1) # @test all(==(1), Σ) # weightedmcadd1!(A, (I′, w₂), w) # Σ = sum(A, dims=1) # @test all(==(2), Σ) # end # @testset "Monte Carlo, re-weighted: all unchanged component" begin # I′ = [1, 2, 3, 4, 5] # w = [2, 1, 3, 4, 5] # w₂ = [0, 0, 0, 0, 0] # A = weightedmcadd1(Int, (I′, w₂), w, 10) # Σ = sum(A, dims=1) # @test all(==(1), Σ) # weightedmcadd1!(A, (I′, w₂), w) # Σ = sum(A, dims=1) # @test all(==(2), Σ) # end # @testset "Monte Carlo, re-weighted: 1 unchanged component, 0 unknown component" begin # I′ = [1, 2, 3, 4, 5] # w = [2, 1, 3, 4, 5] # w₂ = [10, 20, 0, 20, 10] # u = 1/2 # A = weightedmcadd1(Int, (I′, w₂), w, u, 10) # Σ = sum(A, dims=1) # @test all(==(1), Σ) # weightedmcadd1!(A, (I′, w₂), w, u) # Σ = sum(A, dims=1) # @test all(==(2), Σ) # end # @testset "Monte Carlo, re-weighted: 1 unchanged component, 1 unknown component" begin # I′ = [1, 2, 3, 4, 5] # w = [2, 1, 3, 4, 0] # w₂ = [10, 20, 0, 20, 10] # u = 1/2 # A = weightedmcadd1(Int, (I′, w₂), w, u, 10) # Σ = sum(A, dims=1) # @test all(==(1), Σ) # weightedmcadd1!(A, (I′, w₂), w, u) # Σ = sum(A, dims=1) # @test all(==(2), Σ) # w = [2, 1, 0, 4, 5] # A = weightedmcadd1(Int, (I′, w₂), w, u, 10) # Σ = sum(A, dims=1) # @test all(==(1), Σ) # weightedmcadd1!(A, (I′, w₂), w, u) # Σ = sum(A, dims=1) # @test all(==(2), Σ) # end # @testset "Monte Carlo, re-weighted: 1 unchanged component, 2 unknown component" begin # I′ = [1, 2, 3, 4, 5] # w = [2, 1, 3, 0, 0] # w₂ = [10, 20, 0, 20, 10] # u = 1/2 # A = weightedmcadd1(Int, (I′, w₂), w, u, 10) # Σ = sum(A, dims=1) # @test all(==(1), Σ) # weightedmcadd1!(A, (I′, w₂), w, u) # Σ = sum(A, dims=1) # @test all(==(2), Σ) # w = [2, 1, 0, 4, 0] # A = weightedmcadd1(Int, (I′, w₂), w, u, 10) # Σ = sum(A, dims=1) # @test all(==(1), Σ) # weightedmcadd1!(A, (I′, w₂), w, u) # Σ = sum(A, dims=1) # @test all(==(2), Σ) # end # @testset "Monte Carlo, re-weighted: 1 unchanged component, all unknown component" begin # I′ = [1, 2, 3, 4, 5] # w = [0, 0, 0, 0, 0] # w₂ = [10, 20, 0, 20, 10] # u = 1/2 # A = weightedmcadd1(Int, (I′, w₂), w, u, 10) # Σ = sum(A, dims=1) # @test all(==(1), Σ) # weightedmcadd1!(A, (I′, w₂), w, u) # Σ = sum(A, dims=1) # @test all(==(2), Σ) # end @testset "algorithm4 behavior" begin @test isequal(algorithm4(zeros(3), zeros(3)), [NaN, NaN, NaN]) @test !isequal(algorithm4(rand(3), zeros(3)), [NaN, NaN, NaN]) @test isequal(algorithm4(zeros(3), rand(3)), [NaN, NaN, NaN]) end @testset "algorithm4, type handling" begin w₁ = [1, 1, 1, 1, 0] w₂ = [2, 1, 3, 0, 5] p = [3/22, 3/44, 9/44, 1/4, 15/44] pᵣ = [3//22, 3//44, 9//44, 1//4, 15//44] @test algorithm4(w₁, w₂) ≈ p for T ∈ (Float32, Float64, BigFloat, Rational{Int16}, Rational{Int32}, Rational{Int64}, Rational{Int128}, Rational{BigInt}) 𝑤₁ = T.(w₁) @test algorithm4(𝑤₁, w₂) ≈ p rtol=√(eps()) 𝑤₂ = T.(w₂) @test algorithm4(w₁, 𝑤₂) ≈ p rtol=√(eps()) 𝑝 = @inferred algorithm4(𝑤₁, 𝑤₂) @test eltype(𝑝) === T @test 𝑝 ≈ p rtol=∛(eps()) end ################ # Checks and corresponding benchmarks. Use @code_warntype and inspect the type of # p[i] = w₂ᵢ == zero(U) ? c₁ * w₁ᵢ : c₂ * w₂ᵢ # _v2! : c₁ = one(S)/ s₁; c₂ = s₁′ * c₁ / s₂ #### 2-arg # 𝑤₁ = Rational.(w₁) # 𝑤₂ = Rational.(w₂) # 𝐰₁ = Float64.(w₁) # 𝐰₂ = Float64.(w₂) # @code_warntype algorithm4!(p, w₁) # @code_warntype algorithm4!(p, 𝑤₁) # @code_warntype algorithm4!(pᵣ, w₁) # # c₁::Rational{Int}, c₂::Float64 # @code_warntype algorithm4!(pᵣ, 𝐰₁) # @code_warntype algorithm4_v2!(p, w₁) # @code_warntype algorithm4_v2!(p, 𝑤₁) # @code_warntype algorithm4_v2!(pᵣ, w₁) # # c₁::Rational{Int}, c₂::Float64 # @code_warntype algorithm4_v2!(pᵣ, 𝐰₁) # @benchmark algorithm4!(p, w₁) # @benchmark algorithm4!(p, 𝑤₁) # @benchmark algorithm4!(pᵣ, w₁) # @benchmark algorithm4!(pᵣ, 𝐰₁) # @benchmark algorithm4_v2!(p, w₁) # @benchmark algorithm4_v2!(p, 𝑤₁) # @benchmark algorithm4_v2!(pᵣ, w₁) # @benchmark algorithm4_v2!(pᵣ, 𝐰₁) # #### 3-arg # @code_warntype algorithm4!(p, w₁, w₂) # @code_warntype algorithm4!(p, 𝑤₁, w₂) # # s₁::Float64, s₂::Rational{Int} # @code_warntype algorithm4!(p, w₁, 𝑤₂) # # # @code_warntype algorithm4!(pᵣ, w₁, w₂) # @code_warntype algorithm4_v2!(pᵣ, w₁, w₂) # # # @code_warntype algorithm4!(pᵣ, 𝑤₁, w₂) # @code_warntype algorithm4_v2!(pᵣ, 𝑤₁, w₂) # # c₁::Float64, c₂::Rational{Int} # @code_warntype algorithm4!(pᵣ, w₁, 𝑤₂) # @code_warntype algorithm4_v2!(pᵣ, w₁, 𝑤₂) # # # @benchmark algorithm4!($pᵣ, $𝑤₁, $w₂) # @benchmark algorithm4_v2!($pᵣ, $𝑤₁, $w₂) # # # @benchmark algorithm4!($pᵣ, $w₁, $𝑤₂) # @benchmark algorithm4_v2!($pᵣ, $w₁, $𝑤₂) # # # @benchmark algorithm4!($pᵣ, $𝑤₁, $𝑤₂) # @benchmark algorithm4_v2!($pᵣ, $𝑤₁, $𝑤₂) # # # @benchmark algorithm4!($p, $𝑤₁, $w₂) # @benchmark algorithm4_v2!($p, $𝑤₁, $w₂) # # # @benchmark algorithm4!($p, $w₁, $𝑤₂) # @benchmark algorithm4_v2!($p, $w₁, $𝑤₂) # # # @benchmark algorithm4!($p, $𝑤₁, $𝑤₂) # @benchmark algorithm4_v2!($p, $𝑤₁, $𝑤₂) # #### # @code_warntype algorithm4!(p, 𝐰₁, 𝑤₂) # # # @code_warntype algorithm4!(pᵣ, 𝐰₁, 𝐰₂) # @code_warntype algorithm4_v2!(pᵣ, 𝐰₁, 𝐰₂) # # # @code_warntype algorithm4!(pᵣ, 𝑤₁, 𝐰₂) # @code_warntype algorithm4_v2!(pᵣ, 𝑤₁, 𝐰₂) # # s₁::Float64, s₂::Rational{Int} # @code_warntype algorithm4!(pᵣ, 𝐰₁, 𝑤₂) # @code_warntype algorithm4_v2!(pᵣ, 𝐰₁, 𝑤₂) # # # @benchmark algorithm4!($pᵣ, $𝑤₁, $𝐰₂) # @benchmark algorithm4_v2!($pᵣ, $𝑤₁, $𝐰₂) # # # @benchmark algorithm4!($pᵣ, $𝐰₁, $𝑤₂) # @benchmark algorithm4_v2!($pᵣ, $𝐰₁, $𝑤₂) # # # @benchmark algorithm4!($pᵣ, $𝐰₁, $𝐰₂) # @benchmark algorithm4_v2!($pᵣ, $𝐰₁, $𝐰₂) # # # @benchmark algorithm4!($p, $𝑤₁, $𝐰₂) # @benchmark algorithm4_v2!($p, $𝑤₁, $𝐰₂) # # # @benchmark algorithm4!($p, $𝐰₁, $𝑤₂) # @benchmark algorithm4_v2!($p, $𝐰₁, $𝑤₂) # # # @benchmark algorithm4!($p, $𝐰₁, $𝐰₂) # @benchmark algorithm4_v2!($p, $𝐰₁, $𝐰₂) # # end @testset "algorithm3, algorithm4, application order effects" begin # 3 -> 4, w₁ ∌ 0, w₂ ∋ 0 w₁ = [1., 2, 3, 4, 5] w₂ = [2, 1, 3, 4, 0] u = 0.5 ω₁ = algorithm3(w₁, u) @test ω₁ ≈ w₁ ./ sum(w₁) ω = algorithm4(ω₁, w₂) @test sum(ω) ≈ 1 @test ω[5] == ω₁[5] @test ω ≉ algorithm4(rand(5), w₂) # 3 -> 4, w₁ ∋ 0, w₂ ∌ 0 w₁ = [1., 2, 3, 4, 0] w₂ = [2, 1, 3, 4, 5] u = 0.5 ω₁ = algorithm3(w₁, u) @test sum(ω₁) ≈ 1 @test 0 ∉ ω₁ @test ω₁[5] == u ω = algorithm4(ω₁, w₂) @test sum(ω) ≈ 1 @test ω ≈ algorithm4(rand(5), w₂) # 3 -> 4, w₁ ∌ 0, w₂ ∌ 0 w₁ = [1., 2, 3, 4, 5] w₂ = [2, 1, 3, 4, 1] u = 0.5 ω₁ = algorithm3(w₁, u) @test ω₁ ≈ w₁ ./ sum(w₁) ω = algorithm4(ω₁, w₂) @test sum(ω) ≈ 1 @test ω[5] ≉ ω₁[5] # 3 -> 4, w₁ ∋ 0, w₂ ∋ 0 # sub-case 1: J₁ ∩ I₂′ = ∅ w₁ = [1., 2, 3, 4, 0] w₂ = [2, 1, 3, 4, 0] u = 0.5 ω₁ = algorithm3(w₁, u) @test sum(ω₁) ≈ 1 @test 0 ∉ ω₁ ω = algorithm4(ω₁, w₂) J₁ = findall(iszero, w₁) J₂ = findall(iszero, w₂) I₁′ = findall(!iszero, w₁) I₂′ = findall(!iszero, w₂) @test sum(ω) ≈ 1 @test isdisjoint(J₁, I₂′) @test ω[5] == ω₁[5] @test ω ≉ algorithm4(rand(5), w₂) # sub-case 2: J₁ ∩ I₂′ ≠ ∅ w₁ = [1., 2, 3, 4, 0] w₂ = [2, 1, 0, 0, 5] u = 0.5 ω₁ = algorithm3(w₁, u) @test sum(ω₁) ≈ 1 @test 0 ∉ ω₁ ω = algorithm4(ω₁, w₂) J₁ = findall(iszero, w₁) J₂ = findall(iszero, w₂) I₁′ = findall(!iszero, w₁) I₂′ = findall(!iszero, w₂) @test sum(ω) ≈ 1 @test !isdisjoint(J₁, I₂′) @test ω[3] == ω₁[3] && ω[4] == ω₁[4] #### # 4 -> 3, w₁ ∌ 0, w₂ ∋ 0 # J₁′ = ∅, J₂ ≠ ∅, thus, some elements reweighted (i.e. ∈ I₂′) w₁ = [1., 2, 3, 4, 5] w₂ = [2, 1, 3, 4, 0] u = 0.5 ω₁ = algorithm4(w₁, w₂) @test sum(ω₁) ≈ 1 @test ω₁ ≉ w₁ ./ sum(w₁) @test ω₁[5] == w₁[5] / sum(w₁) ω = algorithm3(ω₁, u) @test sum(ω) ≈ 1 @test ω == ω₁ # 4 -> 3, w₁ ∋ 0, w₂ ∌ 0 w₁ = [1., 2, 3, 4, 0] w₂ = [2, 1, 3, 4, 1] u = 0.5 ω₁ = algorithm4(w₁, w₂) @test sum(ω₁) ≈ 1 @test ω₁ ≉ w₁ ./ sum(w₁) @test ω₁[5] ≉ w₁[5] / sum(w₁) ω = algorithm3(ω₁, u) @test sum(ω) ≈ 1 @test ω == ω₁ # 4 -> 3, w₁ ∌ 0, w₂ ∌ 0 w₁ = [1., 2, 3, 4, 5] w₂ = [2, 1, 3, 4, 1] u = 0.5 ω₁ = algorithm4(w₁, w₂) @test sum(ω₁) ≈ 1 @test ω₁ ≉ w₁ ./ sum(w₁) ω = algorithm3(ω₁, u) @test sum(ω) ≈ 1 @test ω == ω₁ # 4 -> 3, w₁ ∋ 0, w₂ ∋ 0 # sub-case 1: J₁ ∩ J₂ ≠ ∅, J₁ ∩ I₂′ = ∅ # elements ∈ J₁ ∩ J₂ are remain zero after application of 4, # no zero elements become non-zero as J₁ ∩ I₂′ = ∅ w₁ = [1., 2, 3, 4, 0] w₂ = [2, 1, 3, 4, 0] u = 0.5 J₁ = findall(iszero, w₁) J₂ = findall(iszero, w₂) I₁′ = findall(!iszero, w₁) I₂′ = findall(!iszero, w₂) @test !isdisjoint(J₁, J₂) @test isdisjoint(J₁, I₂′) ω₁ = algorithm4(w₁, w₂) @test sum(ω₁) ≈ 1 @test ω₁[5] == 0.0 @test ω₁ ≉ w₁ ./ sum(w₁) @test ω₁ ≈ w₂ ./ sum(w₂) @test sum(ω₁[J₂]) ≈ sum(w₁[J₂]) / sum(w₁[I₁′]) @test sum(w₁[J₂]) / sum(w₁[I₂′]) ≈ sum(ω₁[J₂]) / sum(ω₁[I₂′]) ω = algorithm3(ω₁, u) @test sum(ω) ≈ 1 @test ω[5] == u # sub-case 2: J₁ ∩ J₂ = ∅, J₁ ∩ I₂′ ≠ ∅ # no zero elements preserved on application of 4. w₁ = [1., 2, 3, 4, 0] w₂ = [2, 1, 3, 0, 5] u = 0.5 J₁ = findall(iszero, w₁) J₂ = findall(iszero, w₂) I₁′ = findall(!iszero, w₁) I₂′ = findall(!iszero, w₂) @test isdisjoint(J₁, J₂) @test !isdisjoint(J₁, I₂′) ω₁ = algorithm4(w₁, w₂) @test sum(ω₁) ≈ 1 @test ω₁[4] == w₁[4] / sum(w₁) @test ω₁ ≉ w₁ ./ sum(w₁) @test ω₁ ≉ w₂ ./ sum(w₂) @test sum(ω₁[J₂]) ≈ sum(w₁[J₂]) / sum(w₁[I₁′]) @test sum(w₁[J₂]) / sum(w₁[I₂′]) ≈ sum(ω₁[J₂]) / sum(ω₁[I₂′]) ω = algorithm3(ω₁, u) @test sum(ω) ≈ 1 @test ω == ω₁ # sub-case 3: J₁ ∩ J₂ ≠ ∅, |J₁| > |J₁ ∩ J₂|, J₁ ∩ I₂′ ≠ ∅ # elements ∈ J₁ ∩ I₂′ become non-zero # elements J₁ ∖ I₂′ = J₁ ∩ J₂ remain the same on application of 4 w₁ = [1., 2, 3, 0, 0] w₂ = [2, 1, 3, 4, 0] u = 0.5 J₁ = findall(iszero, w₁) J₂ = findall(iszero, w₂) I₁′ = findall(!iszero, w₁) I₂′ = findall(!iszero, w₂) @test !isdisjoint(J₁, J₂) @test !isdisjoint(J₁, I₂′) ω₁ = algorithm4(w₁, w₂) @test sum(ω₁) ≈ 1 @test ω₁[5] == 0.0 @test ω₁ ≉ w₁ ./ sum(w₁) @test ω₁ ≈ w₂ ./ sum(w₂) @test sum(ω₁[J₂]) ≈ sum(w₁[J₂]) / sum(w₁[I₁′]) @test sum(w₁[J₂]) / sum(w₁[I₂′]) ≈ sum(ω₁[J₂]) / sum(ω₁[I₂′]) ω = algorithm3(ω₁, u) @test sum(ω) ≈ 1 @test ω[5] == u # sub-case 4: J₁ ∩ J₂ ≠ ∅, |J₂| > |J₁ ∩ J₂|, J₂ ⊇ J₁, J₁ ∩ I₂′ = ∅ # J₁ ∩ J₂ remain zero w₁ = [1., 2, 0, 4, 0] w₂ = [0, 1, 0, 4, 0] u = 0.5 J₁ = findall(iszero, w₁) J₂ = findall(iszero, w₂) I₁′ = findall(!iszero, w₁) I₂′ = findall(!iszero, w₂) @test !isdisjoint(J₁, J₂) @test isdisjoint(J₁, I₂′) @test !isdisjoint(J₂, I₁′) @test J₂ ⊇ J₁ ω₁ = algorithm4(w₁, w₂) @test sum(ω₁) ≈ 1 @test ω₁[3] == ω₁[5] == 0.0 @test ω₁ ≉ w₁ ./ sum(w₁) @test ω₁ ≉ w₂ ./ sum(w₂) @test sum(ω₁[J₂]) ≈ sum(w₁[J₂]) / sum(w₁[I₁′]) @test sum(w₁[J₂]) / sum(w₁[I₂′]) ≈ sum(ω₁[J₂]) / sum(ω₁[I₂′]) ω = algorithm3(ω₁, u) @test sum(ω) ≈ 1 @test ω[3] == ω[5] == u / 2 # sub-case 5: J₁ ∩ J₂ ≠ ∅, |J₂| > |J₁ ∩ J₂|, J₂ ⊉ J₁, J₁ ∩ I₂′ ≠ ∅ # elements ∈ J₁ ∩ I₂′ become non-zero # J₁ ∩ J₂ remain zero w₁ = [1., 2, 0, 4, 0] w₂ = [0, 0, 2, 4, 0] u = 0.5 J₁ = findall(iszero, w₁) J₂ = findall(iszero, w₂) I₁′ = findall(!iszero, w₁) I₂′ = findall(!iszero, w₂) @test !isdisjoint(J₁, J₂) @test !isdisjoint(J₁, I₂′) @test !isdisjoint(J₂, I₁′) @test J₂ ⊉ J₁ ω₁ = algorithm4(w₁, w₂) @test sum(ω₁) ≈ 1 @test ω₁[5] == 0.0 @test ω₁[3] != 0.0 @test ω₁ ≉ w₁ ./ sum(w₁) @test ω₁ ≉ w₂ ./ sum(w₂) @test sum(ω₁[J₂]) ≈ sum(w₁[J₂]) / sum(w₁[I₁′]) @test sum(w₁[J₂]) / sum(w₁[I₂′]) ≈ sum(ω₁[J₂]) / sum(ω₁[I₂′]) ω = algorithm3(ω₁, u) @test sum(ω) ≈ 1 @test ω[5] == u # sub-case 6: J₁ ∩ J₂ ≠ ∅, |J₂| ≯ |J₁ ∩ J₂|, J₂ ⊉ J₁, J₁ ∩ I₂′ ≠ ∅ # elements ∈ J₁ ∩ I₂′ become non-zero # elements ∈ (J₂ ∩ I₁′) ∪ (J₁ ∩ I₂′) are affected # J₁ ∩ J₂ remain zero # elements ∈ J₂ ∩ I₁′ become ωᵢ = w₁ᵢ / sum(w₁) w₁ = [1., 2, 0, 4, 0] w₂ = [2, 0, 3, 0, 0] u = 0.5 J₁ = findall(iszero, w₁) J₂ = findall(iszero, w₂) I₁′ = findall(!iszero, w₁) I₂′ = findall(!iszero, w₂) @test !isdisjoint(J₁, J₂) @test !isdisjoint(J₁, I₂′) @test !isdisjoint(J₂, I₁′) @test J₂ ⊉ J₁ ω₁ = algorithm4(w₁, w₂) @test sum(ω₁) ≈ 1 @test ω₁[2] == w₁[2] / sum(w₁) @test ω₁[4] == w₁[4] / sum(w₁) @test ω₁[3] != 0.0 @test ω₁[5] == 0.0 @test ω₁ ≉ w₁ ./ sum(w₁) @test ω₁ ≉ w₂ ./ sum(w₂) @test sum(ω₁[J₂]) ≈ sum(w₁[J₂]) / sum(w₁[I₁′]) @test sum(w₁[J₂]) / sum(w₁[I₂′]) ≈ sum(ω₁[J₂]) / sum(ω₁[I₂′]) ω = algorithm3(ω₁, u) @test sum(ω) ≈ 1 @test ω[5] == u # sub-case 7: J₁ ∩ J₂ ≠ ∅, |J₁| > |J₁ ∩ J₂|, J₁ ⊇ J₁, J₁ ∩ I₂′ ≠ ∅, J₂ ∩ I₁′ = ∅ # Essentially, w₂ overwrites w₁, because it re-weights all of the probability mass # elements ∈ J₁ ∩ I₂′ become non-zero w₁ = [1., 2, 0, 0, 0] w₂ = [5, 1, 3, 0, 0] u = 0.5 J₁ = findall(iszero, w₁) J₂ = findall(iszero, w₂) I₁′ = findall(!iszero, w₁) I₂′ = findall(!iszero, w₂) @test !isdisjoint(J₁, J₂) @test !isdisjoint(J₁, I₂′) @test isdisjoint(J₂, I₁′) @test J₁ ⊇ J₁ ω₁ = algorithm4(w₁, w₂) @test sum(ω₁) ≈ 1 @test ω₁[3] != 0.0 @test ω₁[4] == 0.0 @test ω₁[5] == 0.0 @test ω₁ ≉ w₁ ./ sum(w₁) @test ω₁ ≈ w₂ ./ sum(w₂) @test sum(ω₁[J₂]) ≈ sum(w₁[J₂]) / sum(w₁[I₁′]) @test sum(w₁[J₂]) / sum(w₁[I₂′]) ≈ sum(ω₁[J₂]) / sum(ω₁[I₂′]) ω = algorithm3(ω₁, u) @test sum(ω) ≈ 1 @test ω[4] == u / 2 @test ω[5] == u / 2 end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
414
using CategoricalMonteCarlo using Random using SparseArrays using Test using CategoricalMonteCarlo: _check_reducedims, splitranges, _typeofinv, _typeofprod, _u, _check_u01, bounds_cat const tests = [ "utils.jl", "normalizations.jl", "sampler.jl", "tsampler_batch.jl", "vsampler.jl", "vtsampler_batch.jl", ] for t in tests @testset "Test $t" begin include(t) end end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
23882
# Tests of sampler functionality @testset "sampler, equal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{Int}} A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = sample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of sparse vectors A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = sample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a sparse vector A = [1,2,3,4,5,6] B = sample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = @inferred sample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError sample(Complex{T}, A, n_sim) A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = @inferred sample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError sample(Complex{T}, A, n_sim) A = [1,2,3,4,5,6] B = @inferred sample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError sample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = @inferred sample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = @inferred sample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = [1,2,3,4,5,6] B = @inferred sample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] @test_throws InexactError sample(Bool, A, 1000) @test_throws MethodError sample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError sample!(B, A) end end @testset "sparse sampler, unequal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{Int}} A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = sample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of sparse vectors A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = sample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a sparse vector A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = sample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = @inferred sample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError sample(Complex{T}, A, n_sim) A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = @inferred sample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError sample(Complex{T}, A, n_sim) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = @inferred sample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError sample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = @inferred sample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = @inferred sample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = @inferred sample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.5, 0.2, 0.2, 0.05,0.025, 0.025])]] # slight change to increase probability of Inexact throw @test_throws InexactError sample(Bool, A, 1000) @test_throws MethodError sample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError sample!(B, A) end end @testset "dense sampler, (un)equal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{<:AbstractFloat}} A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = sample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of dense vectors A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = sample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a dense vector A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = sample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = @inferred sample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError sample(Complex{T}, A, n_sim) A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = @inferred sample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError sample(Complex{T}, A, n_sim) A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = @inferred sample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError sample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = @inferred sample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = @inferred sample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = @inferred sample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.5, 0.2, 0.2, 0.05,0.025, 0.025]]] # slight change to increase probability of Inexact throw @test_throws InexactError sample(Bool, A, 1000) @test_throws MethodError sample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError sample!(B, A) end end @testset "SparseVector sampler, unequal probability mass" begin sv1 = SparseVector(2, [1,2], [0.3, 0.7]) sv2 = SparseVector(4, [1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sv3 = SparseVector(6, [1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{SparseVector{<:AbstractFloat}} A = [[sv1, sv2, sv3]] B = sample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of SparseVector A = [sv1, sv2, sv3] B = sample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a SparseVector A = sv3 B = sample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = sv2 sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[sv1, sv2, sv3]] B = @inferred sample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError sample(Complex{T}, A, n_sim) A = [sv1, sv2, sv3] B = @inferred sample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError sample(Complex{T}, A, n_sim) A = sv3 B = @inferred sample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = sv2 sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError sample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[sv1, sv2, sv3]] B = @inferred sample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [sv1, sv2, sv3] B = @inferred sample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = sv3 B = @inferred sample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = sv2 sample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 sample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[sv1, sv2, SparseVector(6, [1,2,3,4,5,6], [0.5, 0.2, 0.2, 0.05,0.025, 0.025])]] # slight change to increase probability of Inexact throw @test_throws InexactError sample(Bool, A, 1000) @test_throws MethodError sample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError sample!(B, A) end # extra SparseVector tests n_sim = 10 sv1 = SparseVector(10, [1, 10], [0.3, 0.7]) sv2 = SparseVector(4, [1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sv3 = SparseVector(10, [2, 4, 6, 8, 9, 10], [0.1, 0.1, 0.1, 0.1,0.5, 0.1]) A = [[sv1, sv2, sv3]] B = sample(Int, A, n_sim) @test all(maximum(B, dims=1) .≤ [2 2 1 2 0 1 0 1 1 2]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [2 2 1 2 0 1 0 1 1 2]) A = [sv1, sv2, sv3] B = sample(Int, A, n_sim) @test all(maximum(B, dims=1) .≤ [2 2 1 2 0 1 0 1 1 2]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [2 2 1 2 0 1 0 1 1 2]) B = sample(Int, sv1, n_sim) @test all(==(1), sum(B, dims=2)) sample!(B, sv2) @test all(==(2), sum(B, dims=2)) sample!(B, sv3) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 1 2 0 1 0 1 1 2]) @test all(≥(0), minimum(B, dims=1)) end @testset "sampler inferface throws" begin n_sim = 10 A = [1,2,3,4,5,6] @test_throws MethodError sample(Int, A, n_sim, dims=2:2) @test_throws MethodError sample(Int, A, n_sim, dims=[1,2,3]) @test_throws MethodError sample(Int, A, n_sim, dims=[.1 .2 .3 .4]) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) @test_throws MethodError sample(Int, A, n_sim, dims=2:2) @test_throws MethodError sample(Int, A, n_sim, dims=[1,2,3]) @test_throws MethodError sample(Int, A, n_sim, dims=[.1 .2 .3 .4]) end @testset "sampler, equal probability mass" begin n_sim = 100 A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] D = fill(A, 10,5,5) # # Specialized method for eltype(A)::Array{Vector{Int}} # Admittedly, not very meaningful test as Pr = 1/2 * 1/4 * 1/6 lPr = length(D) * log(Pr) # necessary to even view as log probability lPr * log10(ℯ) # or on log10 scale B = sample(Int, D, n_sim, dims=(1,2,3)) @test all(maximum(B, dims=1) .≤ length(D) .* [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(length(A) * length(D)), sum(B, dims=2)) end # struct MyReal2{T<:Real} <: Real # x::T # end # Base.zero(x::MyReal2{T}) where {T} = zero(T) # Base.zero(::Type{MyReal2{T}}) where {T} = zero(T) # Base.one(x::MyReal2{T}) where {T} = one(T) # Base.one(::Type{MyReal2{T}}) where {T} = one(T) # import Base.(+) # +(x::MyReal2{T}, y::Number) where {T} = x.x + y # +(x::Number, y::MyReal2{T}) where {T} = x + y.x # +(x::MyReal2{T}, y::MyReal2{T}) where {T} = x.x + y.x # 1 + MyReal2(1) + 1
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
21377
# Tests of tsampler functionality @testset "tsampler, equal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{Int}} A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = tsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of sparse vectors A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = tsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a sparse vector A = [1,2,3,4,5,6] B = tsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = @inferred tsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError tsample(Complex{T}, A, n_sim) A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = @inferred tsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError tsample(Complex{T}, A, n_sim) A = [1,2,3,4,5,6] B = @inferred tsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError tsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = @inferred tsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = @inferred tsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = [1,2,3,4,5,6] B = @inferred tsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] @test_throws InexactError tsample(Bool, A, 1000) @test_throws MethodError tsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError tsample!(B, A) end end @testset "sparse tsampler, unequal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{Int}} A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = tsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of sparse vectors A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = tsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a sparse vector A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = tsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = @inferred tsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError tsample(Complex{T}, A, n_sim) A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = @inferred tsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError tsample(Complex{T}, A, n_sim) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = @inferred tsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError tsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = @inferred tsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = @inferred tsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = @inferred tsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.5, 0.2, 0.2, 0.05,0.025, 0.025])]] # slight change to increase probability of Inexact throw @test_throws InexactError tsample(Bool, A, 1000) @test_throws MethodError tsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError tsample!(B, A) end end @testset "dense tsampler, (un)equal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{<:AbstractFloat}} A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = tsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of dense vectors A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = tsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a dense vector A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = tsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = @inferred tsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError tsample(Complex{T}, A, n_sim) A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = @inferred tsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError tsample(Complex{T}, A, n_sim) A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = @inferred tsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError tsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = @inferred tsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = @inferred tsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = @inferred tsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.5, 0.2, 0.2, 0.05,0.025, 0.025]]] # slight change to increase probability of Inexact throw @test_throws InexactError tsample(Bool, A, 1000) @test_throws MethodError tsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError tsample!(B, A) end end @testset "SparseVector tsampler, unequal probability mass" begin sv1 = SparseVector(2, [1,2], [0.3, 0.7]) sv2 = SparseVector(4, [1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sv3 = SparseVector(6, [1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{SparseVector{<:AbstractFloat}} A = [[sv1, sv2, sv3]] B = tsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of SparseVector A = [sv1, sv2, sv3] B = tsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a SparseVector A = sv3 B = tsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = sv2 tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[sv1, sv2, sv3]] B = @inferred tsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError tsample(Complex{T}, A, n_sim) A = [sv1, sv2, sv3] B = @inferred tsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError tsample(Complex{T}, A, n_sim) A = sv3 B = @inferred tsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = sv2 tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError tsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[sv1, sv2, sv3]] B = @inferred tsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [sv1, sv2, sv3] B = @inferred tsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = sv3 B = @inferred tsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = sv2 tsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 tsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[sv1, sv2, SparseVector(6, [1,2,3,4,5,6], [0.5, 0.2, 0.2, 0.05,0.025, 0.025])]] # slight change to increase probability of Inexact throw @test_throws InexactError tsample(Bool, A, 1000) @test_throws MethodError tsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError tsample!(B, A) end end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
6405
# Test of utilities @testset "_check_reducedims" begin A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] D = fill(A, 10,5,5); @test_throws DimensionMismatch _check_reducedims(Matrix{Int}(undef, 2,0), D) @test_throws DimensionMismatch _check_reducedims(Matrix{Int}(undef, 2,2), D) @test_throws DimensionMismatch _check_reducedims(Matrix{Int}(undef, 2,3), D) @test_throws DimensionMismatch _check_reducedims(Matrix{Int}(undef, 2,4), D) @test_throws DimensionMismatch _check_reducedims(Matrix{Int}(undef, 2,5), D) @test _check_reducedims(Matrix{Int}(undef, 2,6), D) @test _check_reducedims(Matrix{Int}(undef, 2,60), D) @test _check_reducedims(Matrix{Int}(undef, 1,6), D) @test _check_reducedims(Matrix{Int}(undef, 1,60), D) @test_throws DimensionMismatch _check_reducedims(Array{Int}(undef, 1,6,2), D) @test _check_reducedims(Array{Int}(undef, 1,6,1), D) @test_throws DimensionMismatch _check_reducedims(Array{Int}(undef, 1,6,10,2), D) @test _check_reducedims(Array{Int}(undef, 1,6,10), D) @test_throws DimensionMismatch _check_reducedims(Array{Int}(undef, 1,6,10,2,5), D) @test _check_reducedims(Array{Int}(undef, 1,6,10,1,5), D) @test _check_reducedims(Array{Int}(undef, 1,6,10,5,5), D) # B0 = Int[] B1 = zeros(Int, 2,1) B2 = zeros(Int, 2,2) for prototype ∈ ([1, 2], ([1, 2], [0.4, 0.6]), [0.4, 0.6], SparseVector([0.0, 1.0])) @test_throws DimensionMismatch _check_reducedims(B0, prototype) @test_throws DimensionMismatch _check_reducedims(B0, [prototype]) @test_throws DimensionMismatch _check_reducedims(B0, [[prototype]]) @test_throws DimensionMismatch _check_reducedims(B1, prototype) @test_throws DimensionMismatch _check_reducedims(B1, [prototype]) @test_throws DimensionMismatch _check_reducedims(B1, [[prototype]]) @test _check_reducedims(B2, prototype) @test _check_reducedims(B2, [prototype]) @test _check_reducedims(B2, [[prototype]]) end end @testset "num_cat" begin A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] @test num_cat(A) == 6 A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] @test num_cat(A) == 6 A = [1,2,3,4,5,6] @test num_cat(A) == 6 A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] @test num_cat(A) == 6 A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] @test num_cat(A) == 6 A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) @test num_cat(A) == 6 A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] @test num_cat(A) == 6 A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] @test num_cat(A) == 6 A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] @test num_cat(A) == 6 sv1 = SparseVector(2, [1,2], [0.3, 0.7]) sv2 = SparseVector(4, [1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sv3 = SparseVector(6, [1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) A = [[sv1, sv2, sv3]] @test num_cat(A) == 6 A = [sv1, sv2, sv3] @test num_cat(A) == 6 @test num_cat(sv3) == 6 # empty cases @test num_cat(Vector{Vector{Vector{Int}}}()) == 0 @test num_cat(Vector{Vector{Int}}()) == 0 @test num_cat(Vector{Int}()) == 0 @test num_cat(Vector{Vector{Tuple{Vector{Int}, Vector{Float64}}}}()) == 0 @test num_cat(Vector{Tuple{Vector{Int}, Vector{Float64}}}()) == 0 @test num_cat((Vector{Int}(), Vector{Float64}())) == 0 # partially empty A = [[1, 2], Int[], [1, 2, 3, 4, 5, 6]] @test num_cat(A) == 6 A = [[[1, 2], [1, 2, 3, 4], Int[]]] @test num_cat(A) == 4 A = [([1, 2], [0.3, 0.7]), (Int[], Float64[]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] @test num_cat(A) == 6 A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), (Int[], Float64[])]] @test num_cat(A) == 4 end @testset "splitranges" begin b = 16 for a = -16:16 ur = a:b for c = 1:b rs = splitranges(ur, c) @test sum(length, rs) == length(ur) end end end @testset "bounds_cat" begin A1 = [[-1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] A2 = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 7]] B = [A1, A2] @test bounds_cat(A1) == (-1, 6) @test bounds_cat(A2) == (1, 7) @test bounds_cat(B) == (-1, 7) # emptys A3 = [Int[], Int[]] @test bounds_cat(A3) == (1, 0) B3 = [A3, A3] @test bounds_cat(B3) == (1, 0) B4 = [A3, A1] @test bounds_cat(B4) == (-1, 6) # A1 = [([-1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] A2 = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,7], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = [A1, A2] @test bounds_cat(A1) == (-1, 6) @test bounds_cat(A2) == (1, 7) @test bounds_cat(B) == (-1, 7) # emptys A3 = [(Int[], Float64[]), (Int[], Float64[])] @test bounds_cat(A3) == (1, 0) B3 = [A3, A3] @test bounds_cat(B3) == (1, 0) B4 = [A3, A1] @test bounds_cat(B4) == (-1, 6) # A1 = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] A2 = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1]] B = [A1, A2] @test bounds_cat(A1) == (1, 6) @test bounds_cat(A2) == (1, 5) @test bounds_cat(B) == (1, 6) @test bounds_cat(Float64[]) == (1, 0) @test bounds_cat([Float64[]]) == (1, 0) @test bounds_cat([[Float64[]]]) == (1, 0) A3 = [Float64[], Float64[]] @test bounds_cat(A3) == (1, 0) B3 = [A3, A3] @test bounds_cat(B3) == (1, 0) B4 = [A3, A1] @test bounds_cat(B4) == (1, 6) # x = SparseVector([0.0, 1.0, 2.0, 0.0, 0.0, 0.0]) @test bounds_cat(x) == (1, 6) A = [x, SparseVector([0.0, 1.0, 2.0, 0.0]), SparseVector([1.0, 0.0])] @test bounds_cat(A) == (1, 6) @test bounds_cat([A, A]) == (1, 6) #### # An interesting case a1 = ([9999, 1439], [0.8029133268547554, 0.1970866731452445]) a2 = ([9284, 4370, 2965, 1590], [0.10222319762724291, 0.13054189392858026, 0.43245627176252643, 0.3347786366816504]) a3 = ([6289, 308, 6378, 7212, 5426, 662], [0.03053777422684849, 0.21452879865837565, 0.6835396454000753, 0.0713937817147005]) B = [a1, a2, a3] A = first.(B) @test bounds_cat(B) == (308, 9999) end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
23985
# Tests of vsampler functionality @testset "vsampler, equal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{Int}} A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of sparse vectors A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a sparse vector A = [1,2,3,4,5,6] B = vsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [1,2,3,4,5,6] B = @inferred vsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError vsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = [1,2,3,4,5,6] B = @inferred vsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] @test_throws InexactError vsample(Bool, A, 1000) @test_throws MethodError vsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vsample!(B, A) end end @testset "sparse vsampler, unequal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{Int}} A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of sparse vectors A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a sparse vector A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = vsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = @inferred vsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError vsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = @inferred vsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.5, 0.2, 0.2, 0.05,0.025, 0.025])]] # slight change to increase probability of Inexact throw @test_throws InexactError vsample(Bool, A, 1000) @test_throws MethodError vsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vsample!(B, A) end end @testset "dense vsampler, (un)equal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{<:AbstractFloat}} A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of dense vectors A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a dense vector A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = vsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = @inferred vsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError vsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = @inferred vsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.5, 0.2, 0.2, 0.05,0.025, 0.025]]] # slight change to increase probability of Inexact throw @test_throws InexactError vsample(Bool, A, 1000) @test_throws MethodError vsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vsample!(B, A) end end @testset "SparseVector vsampler, unequal probability mass" begin sv1 = SparseVector(2, [1,2], [0.3, 0.7]) sv2 = SparseVector(4, [1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sv3 = SparseVector(6, [1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{SparseVector{<:AbstractFloat}} A = [[sv1, sv2, sv3]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of SparseVector A = [sv1, sv2, sv3] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a SparseVector A = sv3 B = vsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = sv2 vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[sv1, sv2, sv3]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [sv1, sv2, sv3] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = sv3 B = @inferred vsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = sv2 vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError vsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[sv1, sv2, sv3]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [sv1, sv2, sv3] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = sv3 B = @inferred vsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = sv2 vsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 vsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[sv1, sv2, SparseVector(6, [1,2,3,4,5,6], [0.5, 0.2, 0.2, 0.05,0.025, 0.025])]] # slight change to increase probability of Inexact throw @test_throws InexactError vsample(Bool, A, 1000) @test_throws MethodError vsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vsample!(B, A) end # extra SparseVector tests n_sim = 10 sv1 = SparseVector(10, [1, 10], [0.3, 0.7]) sv2 = SparseVector(4, [1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sv3 = SparseVector(10, [2, 4, 6, 8, 9, 10], [0.1, 0.1, 0.1, 0.1,0.5, 0.1]) A = [[sv1, sv2, sv3]] B = vsample(Int, A, n_sim) @test all(maximum(B, dims=1) .≤ [2 2 1 2 0 1 0 1 1 2]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [2 2 1 2 0 1 0 1 1 2]) A = [sv1, sv2, sv3] B = vsample(Int, A, n_sim) @test all(maximum(B, dims=1) .≤ [2 2 1 2 0 1 0 1 1 2]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [2 2 1 2 0 1 0 1 1 2]) B = vsample(Int, sv1, n_sim) @test all(==(1), sum(B, dims=2)) vsample!(B, sv2) @test all(==(2), sum(B, dims=2)) vsample!(B, sv3) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 1 2 0 1 0 1 1 2]) @test all(≥(0), minimum(B, dims=1)) end @testset "vsampler inferface throws" begin n_sim = 10 A = [1,2,3,4,5,6] @test_throws MethodError vsample(Int, A, n_sim, dims=2:2) @test_throws MethodError vsample(Int, A, n_sim, dims=[1,2,3]) @test_throws MethodError vsample(Int, A, n_sim, dims=[.1 .2 .3 .4]) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) @test_throws MethodError vsample(Int, A, n_sim, dims=2:2) @test_throws MethodError vsample(Int, A, n_sim, dims=[1,2,3]) @test_throws MethodError vsample(Int, A, n_sim, dims=[.1 .2 .3 .4]) end @testset "vsampler, equal probability mass" begin n_sim = 100 A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] D = fill(A, 10,5,5) # # Specialized method for eltype(A)::Array{Vector{Int}} # Admittedly, not very meaningful test as Pr = 1/2 * 1/4 * 1/6 lPr = length(D) * log(Pr) # necessary to even view as log probability lPr * log10(ℯ) # or on log10 scale B = vsample(Int, D, n_sim, dims=(1,2,3)) @test all(maximum(B, dims=1) .≤ length(D) .* [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(length(A) * length(D)), sum(B, dims=2)) end # struct MyReal2{T<:Real} <: Real # x::T # end # Base.zero(x::MyReal2{T}) where {T} = zero(T) # Base.zero(::Type{MyReal2{T}}) where {T} = zero(T) # Base.one(x::MyReal2{T}) where {T} = one(T) # Base.one(::Type{MyReal2{T}}) where {T} = one(T) # import Base.(+) # +(x::MyReal2{T}, y::Number) where {T} = x.x + y # +(x::Number, y::MyReal2{T}) where {T} = x + y.x # +(x::MyReal2{T}, y::MyReal2{T}) where {T} = x.x + y.x # 1 + MyReal2(1) + 1
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
21466
# Tests of vtsampler functionality @testset "vtsampler, equal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{Int}} A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = vtsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of sparse vectors A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = vtsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a sparse vector A = [1,2,3,4,5,6] B = vtsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = @inferred vtsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vtsample(Complex{T}, A, n_sim) A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = @inferred vtsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vtsample(Complex{T}, A, n_sim) A = [1,2,3,4,5,6] B = @inferred vtsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError vtsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = @inferred vtsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = @inferred vtsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = [1,2,3,4,5,6] B = @inferred vtsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [1,2,3,4] vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [1,2] vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] @test_throws InexactError vtsample(Bool, A, 1000) @test_throws MethodError vtsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vtsample!(B, A) end end @testset "sparse vtsampler, unequal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{Int}} A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = vtsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of sparse vectors A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = vtsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a sparse vector A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = vtsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = @inferred vtsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vtsample(Complex{T}, A, n_sim) A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = @inferred vtsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vtsample(Complex{T}, A, n_sim) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = @inferred vtsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError vtsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = @inferred vtsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = @inferred vtsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = @inferred vtsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = ([1, 2], [0.3, 0.7]) vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.5, 0.2, 0.2, 0.05,0.025, 0.025])]] # slight change to increase probability of Inexact throw @test_throws InexactError vtsample(Bool, A, 1000) @test_throws MethodError vtsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vtsample!(B, A) end end @testset "dense vtsampler, (un)equal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{<:AbstractFloat}} A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = vtsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of dense vectors A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = vtsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a dense vector A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = vtsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = @inferred vtsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vtsample(Complex{T}, A, n_sim) A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = @inferred vtsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vtsample(Complex{T}, A, n_sim) A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = @inferred vtsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError vtsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = @inferred vtsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = @inferred vtsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = @inferred vtsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = [0.2, 0.3, 0.4, 0.1] vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = [0.3, 0.7] vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.5, 0.2, 0.2, 0.05,0.025, 0.025]]] # slight change to increase probability of Inexact throw @test_throws InexactError vtsample(Bool, A, 1000) @test_throws MethodError vtsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vtsample!(B, A) end end @testset "SparseVector vtsampler, unequal probability mass" begin sv1 = SparseVector(2, [1,2], [0.3, 0.7]) sv2 = SparseVector(4, [1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sv3 = SparseVector(6, [1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{SparseVector{<:AbstractFloat}} A = [[sv1, sv2, sv3]] B = vtsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) # # A simplification: an array of SparseVector A = [sv1, sv2, sv3] B = vtsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(minimum(B, dims=1) .≥ 0) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) # # The simplest case: a SparseVector A = sv3 B = vtsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=2)) A = sv2 vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[sv1, sv2, sv3]] B = @inferred vtsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vtsample(Complex{T}, A, n_sim) A = [sv1, sv2, sv3] B = @inferred vtsample(T, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) @test_throws MethodError vtsample(Complex{T}, A, n_sim) A = sv3 B = @inferred vtsample(T, A, n_sim) @test all(==(1), sum(B, dims=2)) A = sv2 vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test_throws MethodError vtsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[sv1, sv2, sv3]] B = @inferred vtsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=2)) @test all(sum(B, dims=1) .≤ n_sim .* [3 3 2 2 1 1]) A = [sv1, sv2, sv3] B = @inferred vtsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) @test all(==(3), sum(B, dims=(2,3))) @test all(sum(B, dims=(1,3)) .≤ n_sim .* [3 3 2 2 1 1]) A = sv3 B = @inferred vtsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=2)) A = sv2 vtsample!(B, A) @test all(==(2), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [2 2 2 2 1 1]) A = sv1 vtsample!(B, A) @test all(==(3), sum(B, dims=2)) @test all(maximum(B, dims=1) .≤ [3 3 2 2 1 1]) @test all(≥(0), minimum(B, dims=1)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[sv1, sv2, SparseVector(6, [1,2,3,4,5,6], [0.5, 0.2, 0.2, 0.05,0.025, 0.025])]] # slight change to increase probability of Inexact throw @test_throws InexactError vtsample(Bool, A, 1000) @test_throws MethodError vtsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vtsample!(B, A) end end
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
code
24398
# Tests of vsampler functionality @testset "vsampler, equal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{Int}} A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) # # A simplification: an array of sparse vectors A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) # # The simplest case: a sparse vector A = [1,2,3,4,5,6] B = vsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=1)) A = [1,2,3,4] vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = [1,2] vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [1,2,3,4,5,6] B = @inferred vsample(T, A, n_sim) @test all(==(1), sum(B, dims=1)) A = [1,2,3,4] vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = [1,2] vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test_throws MethodError vsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) A = [1,2,3,4,5,6] B = @inferred vsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=1)) A = [1,2,3,4] vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = [1,2] vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]]] @test_throws InexactError vsample(Bool, A, 1000) @test_throws MethodError vsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vsample!(B, A) end end @testset "sparse vsampler, unequal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{Int}} A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(minimum(B, dims=2) .≥ 0) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) # # A simplification: an array of sparse vectors A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(minimum(B, dims=2) .≥ 0) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) # # The simplest case: a sparse vector A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = vsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=1)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = ([1, 2], [0.3, 0.7]) vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = @inferred vsample(T, A, n_sim) @test all(==(1), sum(B, dims=1)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = ([1, 2], [0.3, 0.7]) vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test_throws MethodError vsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) A = [([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5])] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) B = @inferred vsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=1)) A = ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]) vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = ([1, 2], [0.3, 0.7]) vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[([1, 2], [0.3, 0.7]), ([1,2,3,4], [0.2, 0.3, 0.4, 0.1]), ([1,2,3,4,5,6], [0.5, 0.2, 0.2, 0.05,0.025, 0.025])]] # slight change to increase probability of Inexact throw @test_throws InexactError vsample(Bool, A, 1000) @test_throws MethodError vsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vsample!(B, A) end end @testset "dense vsampler, (un)equal probability mass" begin for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{Vector{<:AbstractFloat}} A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(minimum(B, dims=2) .≥ 0) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) # # A simplification: an array of dense vectors A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(minimum(B, dims=2) .≥ 0) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) # # The simplest case: a dense vector A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = vsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=1)) A = [0.2, 0.3, 0.4, 0.1] vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = [0.3, 0.7] vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = @inferred vsample(T, A, n_sim) @test all(==(1), sum(B, dims=1)) A = [0.2, 0.3, 0.4, 0.1] vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = [0.3, 0.7] vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test_throws MethodError vsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) A = [[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) A = [0.1, 0.1, 0.1, 0.1,0.1, 0.5] B = @inferred vsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=1)) A = [0.2, 0.3, 0.4, 0.1] vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = [0.3, 0.7] vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[[0.3, 0.7], [0.2, 0.3, 0.4, 0.1], [0.5, 0.2, 0.2, 0.05,0.025, 0.025]]] # slight change to increase probability of Inexact throw @test_throws InexactError vsample(Bool, A, 1000) @test_throws MethodError vsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vsample!(B, A) end end @testset "SparseVector vsampler, unequal probability mass" begin sv1 = SparseVector(2, [1,2], [0.3, 0.7]) sv2 = SparseVector(4, [1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sv3 = SparseVector(6, [1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) for region ∈ [1, 2, 3, 4, 5, (1,2), (1,3), (1,4), (2,3), (2,4), (3,4), (1,2,3), (1,2,4), (2,3,4), (1,2,3,4), :, (), (1, 5), (2, 5), (1,2,5), (5,6,7)] for i = 1:15 for j = -1:1 n_sim = (1 << i) + j # # Specialized method for eltype(A)::Array{SparseVector{<:AbstractFloat}} A = [[sv1, sv2, sv3]] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(minimum(B, dims=2) .≥ 0) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) # # A simplification: an array of SparseVector A = [sv1, sv2, sv3] B = vsample(Int, A, n_sim, dims=region) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(minimum(B, dims=2) .≥ 0) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) # # The simplest case: a SparseVector A = sv3 B = vsample(Int, A, n_sim, dims=region) @test all(==(1), sum(B, dims=1)) A = sv2 vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = sv1 vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) end end end @testset "eltypes" begin n_sim = 10 # Types one would normally expect for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128, Float16, Float32, Float64, BigFloat, BigInt, Rational] A = [[sv1, sv2, sv3]] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = [sv1, sv2, sv3] B = @inferred vsample(T, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) @test_throws MethodError vsample(Complex{T}, A, n_sim) A = sv3 B = @inferred vsample(T, A, n_sim) @test all(==(1), sum(B, dims=1)) A = sv2 vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = sv1 vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test_throws MethodError vsample(Complex{T}, A, n_sim) end # Composite numeric types for T ∈ [Int8, Int16, Int32, Int64, Int128, UInt8, UInt16, UInt32, UInt64, UInt128] A = [[sv1, sv2, sv3]] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) A = [sv1, sv2, sv3] B = @inferred vsample(Rational{T}, A, n_sim) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=(1,3))) @test all(sum(B, dims=(2,3)) .≤ n_sim .* [3; 3; 2; 2; 1; 1]) A = sv3 B = @inferred vsample(Rational{T}, A, n_sim) @test all(==(1), sum(B, dims=1)) A = sv2 vsample!(B, A) @test all(==(2), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2; 2; 2; 2; 1; 1]) A = sv1 vsample!(B, A) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [3; 3; 2; 2; 1; 1]) @test all(≥(0), minimum(B, dims=2)) end # Real, AbstractFloat, Integer, Signed, Unsigned. work but should be avoided A = [[sv1, sv2, SparseVector(6, [1,2,3,4,5,6], [0.5, 0.2, 0.2, 0.05,0.025, 0.025])]] # slight change to increase probability of Inexact throw @test_throws InexactError vsample(Bool, A, 1000) @test_throws MethodError vsample(Union{Int16, Int32}, A, n_sim) B = Matrix{Union{Int16,Int32}}(undef, 6, 10) @test_throws MethodError vsample!(B, A) end # extra SparseVector tests n_sim = 10 sv1 = SparseVector(10, [1, 10], [0.3, 0.7]) sv2 = SparseVector(4, [1,2,3,4], [0.2, 0.3, 0.4, 0.1]) sv3 = SparseVector(10, [2, 4, 6, 8, 9, 10], [0.1, 0.1, 0.1, 0.1,0.5, 0.1]) A = [[sv1, sv2, sv3]] B = vsample(Int, A, n_sim) @test all(maximum(B, dims=2) .≤ [2, 2, 1, 2, 0, 1, 0, 1, 1, 2]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [2, 2, 1, 2, 0, 1, 0, 1, 1, 2]) A = [sv1, sv2, sv3] B = vsample(Int, A, n_sim) @test all(maximum(B, dims=2) .≤ [2, 2, 1, 2, 0, 1, 0, 1, 1, 2]) @test all(≥(0), minimum(B, dims=2)) @test all(==(3), sum(B, dims=1)) @test all(sum(B, dims=2) .≤ n_sim .* [2, 2, 1, 2, 0, 1, 0, 1, 1, 2]) B = vsample(Int, sv1, n_sim) @test all(==(1), sum(B, dims=1)) vsample!(B, sv2) @test all(==(2), sum(B, dims=1)) vsample!(B, sv3) @test all(==(3), sum(B, dims=1)) @test all(maximum(B, dims=2) .≤ [2, 2, 1, 2, 0, 1, 0, 1, 1, 2]) @test all(≥(0), minimum(B, dims=2)) end @testset "vsampler inferface throws" begin n_sim = 10 A = [1,2,3,4,5,6] @test_throws MethodError vsample(Int, A, n_sim, dims=1:2) @test_throws MethodError vsample(Int, A, n_sim, dims=[1,2,3]) @test_throws MethodError vsample(Int, A, n_sim, dims=[.1 .2; .3 .4]) A = ([1,2,3,4,5,6], [0.1, 0.1, 0.1, 0.1,0.1, 0.5]) @test_throws MethodError vsample(Int, A, n_sim, dims=1:2) @test_throws MethodError vsample(Int, A, n_sim, dims=[1,2,3]) @test_throws MethodError vsample(Int, A, n_sim, dims=[.1 .2; .3 .4]) end @testset "vsampler, equal probability mass" begin n_sim = 100 A = [[1, 2], [1, 2, 3, 4], [1, 2, 3, 4, 5, 6]] D = fill(A, 10,5,5); # # Specialized method for eltype(A)::Array{Vector{Int}} # Admittedly, not very meaningful test as Pr = 1/2 * 1/4 * 1/6 lPr = length(D) * log(Pr) # necessary to even view as log probability lPr * log10(ℯ) # or on log10 scale B = vsample(Int, D, n_sim, dims=(1,2,3)) @test all(maximum(B, dims=2) .≤ length(D) .* [3; 3; 2; 2; 1; 1]) @test all(minimum(B, dims=2) .≥ 0) @test all(==(length(A) * length(D)), sum(B, dims=1)) end # struct MyReal2{T<:Real} <: Real # x::T # end # Base.zero(x::MyReal2{T}) where {T} = zero(T) # Base.zero(::Type{MyReal2{T}}) where {T} = zero(T) # Base.one(x::MyReal2{T}) where {T} = one(T) # Base.one(::Type{MyReal2{T}}) where {T} = one(T) # import Base.(+) # +(x::MyReal2{T}, y::Number) where {T} = x.x + y # +(x::Number, y::MyReal2{T}) where {T} = x + y.x # +(x::MyReal2{T}, y::MyReal2{T}) where {T} = x.x + y.x # 1 + MyReal2(1) + 1
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
0c4d9ab6df609e991ea71111b6d4525f5d2494e1
docs
3426
# CategoricalMonteCarlo ## Installation ```julia using Pkg Pkg.add("CategoricalMonteCarlo") ``` ## Description Given `i=1,…,n` independent categorical distributions, each with a unique probability mass vector, `pᵢ`, what is the distribution of the sum of the joint distribution formed from the product of the marginals? Assume that some categories are shared across the marginals, such that by re-indexing and constructing a sparse representation based on the original probability mass vectors, we may unify the categories themselves. However, the sum of the joint distribution would not be multinomial unless we have the trivial case of each distribution being identical. While there is no closed-form expression for the sum of the joint distribution, Monte Carlo simulation provides a general mechanism for computation. This package provides the facilities for such Monte Carlo simulation, based on collections of probability mass vectors, each of which corresponds to a (possibly, but not necessarily) independent categorical distribution. Several advanced strategies are utilized to maximize the performance of such computations, including fastest-in-Julia categorical sampling (`MarsagliaDiscreteSamplers`) -- in comparison to publicly visible packages -- in addition to partitioning strategies which favor memory locality and cache performance despite the random-access nature writes inherent to Monte Carlo simulation. These same partitioning strategies are utilized to enable thread-based parallelism across the iteration space of arbitrary-dimensional input arrays. Furthermore, reduction-in-place is supported via the interface familiar to Julia users -- the `dims::Vararg{<:Integer, N} where N` keyword; this enables additional increases in efficiency, as while the user may wish to simulate a distribution bearing the indices of the input array, it may be known that some of these dimensions will always be summed over. ## Usage It may help to demonstrate with an example. Consider an equally-weighted coin with sides labeled 1 and 2; an equally-weighted four-sided die with sides labeled 1, 2, 3 and 4; an equally-weighted six-sided die with sides labeled 1, 2, 3, 4, 5 and 6. If one were consider a scenario in which one flips the coin, rolls the four-sided die, and rolls the six-sided die, what is the distribution of counts on labels 1,...,6? ```julia julia> using CategoricalMonteCarlo julia> coin = [1/2, 1/2]; julia> die4 = [1/4, 1/4, 1/4, 1/4]; julia> die6 = [1/6, 1/6, 1/6, 1/6, 1/6, 1/6]; julia> sample([coin, die4, die6], 10) 10×6 Matrix{Int64}: 0 1 0 1 0 1 1 1 1 0 0 0 1 1 0 0 1 0 0 1 1 1 0 0 1 0 0 1 0 1 0 2 0 0 0 1 1 1 1 0 0 0 1 0 0 1 0 1 3 0 0 0 0 0 1 1 0 1 0 0 julia> using MonteCarloSummary julia> ["mean" "mcse" "std" "2.5th" "50th" "97.5"; mcsummary(sample([coin, die4, die6], 10^6), (0.025, 0.5, 0.975))] 7×6 Matrix{Any}: "mean" "mcse" "std" "2.5th" "50th" "97.5" 0.918372 0.000759618 0.759618 0.0 1.0 2.0 0.915886 0.000758815 0.758815 0.0 1.0 2.0 0.41628 0.000571198 0.571198 0.0 0.0 2.0 0.416611 0.000570929 0.570929 0.0 0.0 2.0 0.166285 0.000372336 0.372336 0.0 0.0 1.0 0.166566 0.000372588 0.372588 0.0 0.0 1.0 ``` ## Future Work - Description of normalizers (weight vector->probability vector)
CategoricalMonteCarlo
https://github.com/andrewjradcliffe/CategoricalMonteCarlo.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
79
using Documenter, TruncatedDistributions makedocs(sitename="My Documentation")
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
1124
module TruncatedDistributions using Distributions using HCubature using LinearAlgebra using PDMats import Distributions: insupport, pdf, moment import Base: size, length, show, rand import Statistics: mean, cov export compute_tp, compute_mean, compute_cov, compute_moment, TruncationRegion, TruncatedMvDistribution, BoxTruncatedMvNormalRecursiveMoments,#QQQQ BoxTruncationRegion, intruncationregion, insupport, BoxTruncatedMvNormal, moment, moments, compute_moments, alpha, pdf, raw_moment, mean, cov, rand, tp, TruncatedMvDistributionSecondOrderState, BasicBoxTruncatedMvNormal, RecursiveMomentsBoxTruncatedMvNormal, EllipticalTruncationRegion include("commonTypes.jl") include("regions.jl") include("commonOperations.jl") include("commonCompute.jl") include("univariate/distributionsPackageExtensions.jl") include("multivariate/boxTruncatedMvNormalRecursiveMomentsState.jl") include("multivariate/normal.jl") include("multivariate/otherThanNormal.jl") include("parameterMatching/dynamicUnivariateFit.jl") end #module
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
2100
function compute_tp(d::TruncatedMvDistribution{D,R}; tol::Float64 = 10e-4, tol_step::Int = 10^5, alg::Symbol = :hc) where {D,R} if alg == :mc i, n = 0, 0 err = 1.0 tp = 0.0 while err ≥ tol candidate = rand(d.untruncated) i += intruncationregion(d.region,candidate) n += 1 tp = i/n if n % tol_step == 0 err = 3*√(tp*(1-tp)/n) end end d.state.tp = tp elseif alg == :hc d.state.tp, d.state.tp_err = hcubature((x)->pdf(d.untruncated,x),d.region.a, d.region.b) else error("Unknown algorithm $(alg)") end nothing end function compute_mean(d::TruncatedMvDistribution{D,R}; tol::Float64 = 10e-4, tol_step::Int = 10^5, alg::Symbol = :hc) where {D,R} if alg == :mc @error("still not implemented") elseif alg == :hc d.state.μ, d.state.μ_err = hcubature((x)->pdf(d,x)*x,d.region.a, d.region.b) else error("Unknown algorithm $(alg)") end nothing end function compute_cov(d::TruncatedMvDistribution{D,R}; tol::Float64 = 10e-4, tol_step::Int = 10^5, alg::Symbol = :hc) where {D,R} if alg == :mc @error("still not implemented") elseif alg == :hc μ = mean(d) d.state.Σ, d.state.Σ_err = hcubature((x)->pdf(d,x)*(x-μ)*(x-μ)',d.region.a, d.region.b) else error("Unknown algorithm $(alg)") end nothing end function compute_moment(d::TruncatedMvDistribution{D,R},k::Vector{Int}; tol::Float64 = 10e-4, tol_step::Int = 10^5, alg::Symbol = :hc) where {D,R} if alg == :mc @error("still not implemented") elseif alg == :hc return hcubature((x)->pdf(d,x)*prod(x.^k),d.region.a, d.region.b)[1] else error("Unknown algorithm $(alg)") end nothing end
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
2324
function insupport(d::TruncatedMvDistribution{D,R,S}, x::AbstractArray) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} insupport(d.untruncated,x) && intruncationregion(d.region,x) end function rand(d::TruncatedMvDistribution{D,R,S}) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} rand_naive(d) end function rand_naive(d::TruncatedMvDistribution{D,R,S}) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} while true candidate = rand(d.untruncated) intruncationregion(d.region,candidate) && return candidate end end function length(d::TruncatedMvDistribution{D,R,S}) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} length(d.untruncated) end function size(d::TruncatedMvDistribution{D,R,S}) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} size(d.untruncated) end function pdf(d::TruncatedMvDistribution{D,R,S},x::AbstractArray; worst_tol = 1e-3) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} d.state.tp_err < worst_tol || compute_tp(d) if intruncationregion(d.region,x) pdf(d.untruncated, x) / d.state.tp else zeros(length(d)) end end function mean(d::TruncatedMvDistribution{D,R,S}; worst_tol = 1e-3 ) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} d.state.μ_err < worst_tol || compute_mean(d) return d.state.μ end function cov(d::TruncatedMvDistribution{D,R,S}; worst_tol = 1e-3 ) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} d.state.Σ_err < worst_tol || compute_cov(d) return d.state.Σ end function moment(d::TruncatedMvDistribution{D,R,S}, k::Vector{Int} ; worst_tol = 1e-3 ) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} compute_moment(d,k) end function tp(d::TruncatedMvDistribution{D,R,S}; worst_tol = 1e-3) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} d.state.tp_err < worst_tol || compute_tp(d) d.state.tp end
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
4122
" A truncation region defines a subset of space to which the distribution is truncated. The basic operation supported is `intruncationregion()`, which returns true if a vector is inside the truncation region. " abstract type TruncationRegion end " A state abstract object representing computed quantities of a truncated multivariate distribution. Every concrete subtype should expose at least the following two fields. - `n::Int` The length of the distribution - `tp::Float64` The probability of falling in the truncation region for the non-truncated case. - `tp_err::Float64` An estimate of the absolute error of the probability `tp`. Other subtypes may expose. - `μ::Vector{Float64}` The mean vector. - `μ_err::Float64` An estimate of the relative error for the mean vector. - `Σ::PDMat` The covariance matrix. - `Σ_err::Float64` An estimate of the relative error for the covariance matrix. Further subtypes may expose. - `moment_dict::Dict{Vector{Int},Float64}` A dictionary mapping multivariate moment vectors to estimate quantities. - `prob_dict` A dictionary mapping probability vectors to estimated quantities. " abstract type TruncatedMvDistributionState end "A truncated multi-variate distribution composed of a Multivariate Distribution, Truncation Region and a State object implementing computable state." struct TruncatedMvDistribution{D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} untruncated::D region::R state::S end function TruncatedMvDistribution{D,R,S}(d::D,r::R) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} TruncatedMvDistribution(d,r,S(d)) end mutable struct TruncatedMvDistributionSecondOrderState <: TruncatedMvDistributionState n::Int # dimension tp::Float64 # Truncation probability (probability under the truncated region) μ::Vector{Float64} # Mean vector Σ::PDMat # Covariance matrix tp_err::Float64 # Estimate of the error of truncation probability μ_err::Float64 # Estimate of the error of the mean vector Σ_err::Float64 # Estimate of the error fo the covariance matrix TruncatedMvDistributionSecondOrderState(d::MultivariateDistribution) = new( length(d), NaN, Vector{Float64}(undef,0), PDMat(Array{Float64,2}(I,length(d),length(d))), Inf, Inf, Inf) end # function tp(d::TruncatedMvDistribution{D,R,TruncatedMvDistributionSecondOrderState}) # where {D <: MultivariateDistribution, R <: TruncationRegion} # (d.state.tp, d.state.tp_err) # end # function mean(d::TruncatedMvDistribution{D,R,S}) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} # (d.state.μ, d.state.μ_err) # end # function cov(d::TruncatedMvDistribution{D,R,S}) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} # (d.s) # end # function moment(d::TruncatedMvDistribution{D,R,S}, k::Vector{Int}) where {D <: MultivariateDistribution, R <: TruncationRegion, S <: TruncatedMvDistributionState} # function Base.show(io::IO, d::BoxTruncatedMvNormalRecursiveMomentsState) # println(io, "Box Truncated MvNormal") # println(io, "n = $(d.n)") # println(io, "μₑ = $(d.μₑ)" ) # println(io, "Σₑ = $(d.Σₑ)" ) # println(io, "α = $(alpha(d))") # println("Limits:") # for i in 1:d.n # println(io, "$i:\t ",(d.a[i],d.b[i])) # end # if d.momentsComputed # println("Moments:") # for k in keys(d.momentDict) # println(io, k, "\t", moment(d,k)) # end # println(io,"mean:", mean(d)) # println(io,"cov:", cov(d)) # else # println("Moments not computed") # end # end
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
554
""" Truncation in a box between a and b. """ struct BoxTruncationRegion <: TruncationRegion a::Vector{Float64} b::Vector{Float64} end intruncationregion(r::BoxTruncationRegion, x::AbstractArray) = all(r.a .<= x) && all(x .<= r.b) """ An Elliptical Truncation region. """ struct EllipticalTruncationRegion <: TruncationRegion H::PDMat h::Vector{Float64} c::Float64 end intruncationregion(r::EllipticalTruncationRegion, x::AbstractArray) = (x-r.h)'*r.H*(x-r.h) <= r.c abstract type PolytopeTruncationRegion <: TruncationRegion end
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
6953
# const max_moment_levels = 2 #Just for mean and covariance matrix const Children = Union{Vector{Vector{Int}},Nothing} mutable struct BoxTruncatedMvNormalRecursiveMomentsState <: TruncatedMvDistributionState d::MvNormal r::BoxTruncationRegion n::Int #dimension max_moment_levels::Int # # #Children of type 'a' or 'b' are lower dimensional distributions used to for recursive computation children_a::Vector{BoxTruncatedMvNormalRecursiveMomentsState} children_b::Vector{BoxTruncatedMvNormalRecursiveMomentsState} # # #for each moment vector e.g. [0,1,0,1] or [0,0,2,0] has the tuple which is the computed (non-normalized) moment integral # # #of that vector and a list of children vectors rawMomentDict::Dict{Vector{Int},Float64} #note that the values are non-normalized moment integrals treeDict::Dict{Vector{Int},Children} rawMomentsComputed::Bool tp::Float64 μ::Vector{Float64} Σ::PDMat tp_err::Float64 μ_err::Float64 Σ_err::Float64 function BoxTruncatedMvNormalRecursiveMomentsState(d::MvNormal, r::BoxTruncationRegion, max_moment_levels::Int) μₑ, Σₑ = d.μ, d.Σ a, b = r.a, r.b n = length(d) length(a) != n && error("The length of a does not match the length") length(b) != n && error("The length of b does not match the length") a > b && error("The a vector must be less than the b vector") if n ≥ 2 μᵃ = [μₑ[setdiff(1:n,j)] + Σₑ[setdiff(1:n,j),j] * (a[j]-μₑ[j])/Σₑ[j,j] for j in 1:n] μᵇ = [μₑ[setdiff(1:n,j)] + Σₑ[setdiff(1:n,j),j] * (b[j]-μₑ[j])/Σₑ[j,j] for j in 1:n] Σ̃ = [Σₑ[setdiff(1:n,j),setdiff(1:n,j)] - (1/Σₑ[j,j])*Σₑ[setdiff(1:n,j),j]*Σₑ[j,setdiff(1:n,j)]' for j in 1:n] children_a = [BoxTruncatedMvNormalRecursiveMomentsState( MvNormal(μᵃ[j],Σ̃[j]), BoxTruncationRegion(a[setdiff(1:n,j)],b[setdiff(1:n,j)]), max_moment_levels) for j in 1:n] children_b = [BoxTruncatedMvNormalRecursiveMomentsState( MvNormal(μᵇ[j],Σ̃[j]), BoxTruncationRegion(a[setdiff(1:n,j)],b[setdiff(1:n,j)]), max_moment_levels) for j in 1:n] else #n==1 children_a = Array{BoxTruncatedMvNormalRecursiveMomentsState,1}[] #no children children_b = Array{BoxTruncatedMvNormalRecursiveMomentsState,1}[] #no children end rawMomentDict, treeDict = init_dicts(n,max_moment_levels) new(d,r,n,max_moment_levels,children_a,children_b,rawMomentDict,treeDict,false, NaN, Vector{Float64}(undef,0), PDMat(Array{Float64,2}(I,n,n)), Inf, Inf, Inf) end end function init_dicts(n::Int,max_moment_levels::Int) function addToBaseKey( baseKey::Vector{Int}, n::Int, md::Dict{Vector{Int},Float64}, td::Dict{Vector{Int},Children}) keys = Vector{Vector{Int}}(undef,n) for i in 1:n key = copy(baseKey) key[i] += 1 md[key] = NaN td[key] = nothing keys[i] = key end md[baseKey] = NaN td[baseKey] = keys keys end md = Dict{Vector{Int},Float64}() #rawMomentDict td = Dict{Vector{Int},Children}() #treeDict rootKey = zeros(Int,n) key_vals = [rootKey] for _ = 1:max_moment_levels levelKeys = Vector{Int}[] for key in key_vals newKeys = addToBaseKey(key,n,md,td) append!(levelKeys,newKeys) end key_vals = levelKeys end md,td end function compute_moments(d::BoxTruncatedMvNormalRecursiveMomentsState) function compute_children_moments(d::BoxTruncatedMvNormalRecursiveMomentsState,baseKey::Vector{Int}) d.treeDict[baseKey] == nothing && return #recursion stopping criteria c = c_vector(d,baseKey) for k in d.treeDict[baseKey] i = findfirst((x)->x==1,k-baseKey) d.rawMomentDict[k] = d.d.μ[i]*d.rawMomentDict[baseKey] + (d.d.Σ*c)[i] compute_children_moments(d,k) end end function c_vector(d::BoxTruncatedMvNormalRecursiveMomentsState,k::Vector{Int}) c = Vector{Float64}(undef,d.n) for j in 1:d.n kMinus = copy(k) kMinus[j] = kMinus[j]-1 F0 = (sum(kMinus .>= 0) == d.n) ? d.rawMomentDict[kMinus] : 0.0 F1 = length(d.children_a) >= 1 ? raw_moment(d.children_a[j],k[setdiff(1:d.n,j)]) : 0.0 F2 = length(d.children_b) >= 1 ? raw_moment(d.children_b[j],k[setdiff(1:d.n,j)]) : 0.0 ϕ = pdf.(Normal(d.d.μ[j],sqrt(d.d.Σ[j,j])),[d.r.a[j],d.r.b[j]] ) c[j] = k[j]*F0 + d.r.a[j]^k[j]*ϕ[1]*F1 - d.r.b[j]^k[j]*ϕ[2]*F2 end c end if d.n > 1 baseKey = zeros(Int,d.n) #[0,0,....,0] d.rawMomentDict[baseKey] = LL(d) compute_children_moments(d,baseKey) #start recursion else #n==1 @assert d.n == 1 distTruncated = TruncatedNormal(d.d.μ[1],sqrt(d.d.Σ[1]),d.r.a[1],d.r.b[1]) d.rawMomentDict[[0]] = distTruncated.tp m = moments(distTruncated, d.max_moment_levels) for i in 1:d.max_moment_levels d.rawMomentDict[[i]] = m[i]*distTruncated.tp end end d.rawMomentsComputed = true end function raw_moment(d::BoxTruncatedMvNormalRecursiveMomentsState,k::Vector{Int}) !d.rawMomentsComputed && compute_moments(d) return d.rawMomentDict[k] end # moment(d::BoxTruncatedMvNormalRecursiveMomentsState,k::Vector{Int}) = raw_moment(d,k) / alpha(d) # alpha(d::BoxTruncatedMvNormalRecursiveMomentsState) = raw_moment(d,zeros(Int,d.n)) # function mean(d::BoxTruncatedMvNormalRecursiveMomentsState) # μ = Vector{Float64}(undef,d.n) # for i in 1:d.n # ee = zeros(Int,d.n) # ee[i] = 1 # μ[i] = moment(d,ee) # end # μ # end # function cov(d::BoxTruncatedMvNormalRecursiveMomentsState) # Σ = zeros(Float64,d.n,d.n) # for i in 1:d.n, j in 1:d.n # ee = zeros(Int,d.n) # if i == j # ee[i] = 2 # else # ee[i], ee[j] = 1, 1 # end # Σ[i,j] = moment(d,ee) # end # μ = mean(d) # Σ-μ*μ' # end # function rand(d::BoxTruncatedMvNormalRecursiveMomentsState) # rand(MvNormal(d.μₑ,d.Σₑ)) #TODO QQQQ # end # function pdf_nontruncated(d::BoxTruncatedMvNormalRecursiveMomentsState,x) # d_nontruncated = MvNormal(d.μₑ,d.Σₑ) # pdf(d_nontruncated,x) # end function LL(d::BoxTruncatedMvNormalRecursiveMomentsState) @info "doing base numerical integral on dimension $(d.n)." hcubature((x)->pdf(d.d,x),d.r.a,d.r.b,maxevals = 10^6)[1] end
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
1699
" A Box Truncated normal distribution with a naive implementation and representation in the state of the mean and covariance. Works well for very low dimensions (e.g. 2,3,4). " const BasicBoxTruncatedMvNormal = TruncatedMvDistribution{MvNormal,BoxTruncationRegion,TruncatedMvDistributionSecondOrderState} function BasicBoxTruncatedMvNormal( μₑ::Vector{Float64}, Σₑ::PDMat, a::Vector{Float64}, b::Vector{Float64}) d = MvNormal(μₑ,Σₑ) r = BoxTruncationRegion(a,b) TruncatedMvDistribution{MvNormal,BoxTruncationRegion,TruncatedMvDistributionSecondOrderState}(d,r) end " A Box Truncated normal distribution with a recursive moment computation implementation. " const RecursiveMomentsBoxTruncatedMvNormal = TruncatedMvDistribution{MvNormal,BoxTruncationRegion,BoxTruncatedMvNormalRecursiveMomentsState} function RecursiveMomentsBoxTruncatedMvNormal( μₑ::Vector{Float64}, Σₑ::PDMat, a::Vector{Float64}, b::Vector{Float64}; max_moment_levels::Int = 2) d = MvNormal(μₑ,Σₑ) r = BoxTruncationRegion(a,b) s = BoxTruncatedMvNormalRecursiveMomentsState(d,r,max_moment_levels) TruncatedMvDistribution{MvNormal,BoxTruncationRegion,BoxTruncatedMvNormalRecursiveMomentsState}(d,r,s) end # const EllipsoidTruncatedMvNormal = TruncatedMvDistribution{MvNormal,EllipticalTruncationRegion} # const PolytopeTruncatedMvNormal = TruncatedMvDistribution{MvNormal,PolytopeTruncationRegion}
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
649
# #MvLogNormal truncations # const BoxTruncatedMvLogNormal = TruncatedMvDistribution{MvLogNormal,BoxTruncationRegion} # const EllipsoidTruncatedMvLogNormal = TruncatedMvDistribution{MvLogNormal,EllipticalTruncationRegion} # const PolytopeTruncatedMvLogNormal = TruncatedMvDistribution{MvLogNormal,PolytopeTruncationRegion} # #MvTDist (multi-variate T-distribution) truncations # const BoxTruncatedMvTDist = TruncatedMvDistribution{MvTDist,BoxTruncationRegion} # const EllipsoidTruncatedMvTDist = TruncatedMvDistribution{MvTDist,EllipticalTruncationRegion} # const PolytopeTruncatedMvTDist = TruncatedMvDistribution{MvTDist,PolytopeTruncationRegion}
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
1023
" Return the k'th moment of a truncated normal distribution. " function moment(d::Truncated{Normal{T},Continuous}, k::Int) where T k == 0 ? one(T) : last(moments(d,k)) end " Compute the 1'st to k'th moment of a truncated normal distribution. Uses a recursive formula. " function moments(d::Truncated{Normal{T},Continuous}, k::Int) where T k == 0 && return 1 m = Array{T}(undef,k+2) #Array of moments with index i being the i-2's moment #(treating the -1's moment as 0 and 0'ths moment as 1) m[1], m[2] = 0, 1 pars = params(d) μ, σ, σ² =pars[1], pars[2], pars[2]^2 L, U = pars[3], pars[4] zL, zU = (L - μ)/σ, (U - μ)/σ ϕL, ϕU = pdf.(Normal(),(zL, zU)) ΦUL = cdf(Normal(),zU) - cdf(Normal(),zL) for i in 3:(k+2) kk = i-2 #recursive formula for kk'th moment as a function of the previous two moments (if kk=-1 it uses 0) m[i] = (kk-1)*σ²*m[i-2] + μ*m[i-1] - σ*(U^(kk-1)*ϕU - L^(kk-1)*ϕL)/ΦUL end return m[3:(k+2)] end
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
3134
function normal2a() μₑ = [2.5, 3.5] Σₑ = [2.0 -0.5; -0.5 5.0] a = [-2.3,-20.] b = [4.,12.3] properties = Dict{String,Any}() properties["length"] = 2 properties["tp"] = 0.8551938607791414 properties["mean"] = [2.126229598541412, 3.5930224468784577] properties["covariance"] = [1.0 0; 0 1.0] #QQQQ properties["moment123"] = 2.3 #QQQQ (MvNormal(μₑ,Σₑ), BoxTruncationRegion(a,b),properties) end function normal2b() μₑ = [2.5, 3.5] Σₑ = [ 3.3 0.5; 0.5 5.0] a = [-5.4,-20.] b = [2.4,6.3] properties = Dict{String,Any}() properties["length"] = 2 properties["tp"] = 0.43660920327458974 properties["mean"] = [0.9734400003512856, 2.886032492774952] properties["covariance"] = [1.0 0; 0 1.0] #QQQQ properties["moment123"] = 2.3 #QQQQ (MvNormal(μₑ,Σₑ), BoxTruncationRegion(a,b),properties) end function normal2c() a = [-1.5, -1.4] b = [1.7, 2.4] μ₁ = 0.45; μ₂ = -0.2 σ₁ = 1.2; σ₂ = 0.8 ρ = 0.6 μₑ = [μ₁, μ₂] Σₑ = [σ₁^2 ρ*σ₁*σ₂ ;ρ*σ₁*σ₂ σ₂^2]; properties = Dict{String,Any}() properties["length"] = 2 properties["tp"] = 0.7517763397386328 properties["mean"] = [0.308275927584875, -0.18541888959459515] properties["covariance"] = [1.0 0; 0 1.0] #QQQQ properties["moment123"] = 2.3 #QQQQ (MvNormal(μₑ,Σₑ), BoxTruncationRegion(a,b),properties) end function normal3a() μₑ = [3.5,2,3.5] Σₑ = [ 7. 1 0 ; 1 3.3 2 ; 0 2 3.8 ] a = [-2. ,-1 ,2] b = [4. ,4. , 5] properties = Dict{String,Any}() properties["length"] = 3 properties["tp"] = 0.9 #QQQQ properties["mean"] = [1.0,1.0] #QQQQ properties["covariance"] = [1.0 0; 0 1.0] #QQQQ properties["moment123"] = 2.3 #QQQQ (MvNormal(μₑ,Σₑ), BoxTruncationRegion(a,b),properties) end function normal4a() μₑ = [3.5,2,3.5,3.5] Σₑ = [ 7. 1 0 1 ; 1 3.3 2 0 ; 0 2 3.4 0 ; 1 0 0 4 ] a = [-20. ,-20, -20 ,-20] b = [20. ,20. ,20, 20] properties = Dict{String,Any}() properties["length"] = 4 properties["tp"] = 0.9 #QQQQ properties["mean"] = [1.0,1.0] #QQQQ properties["covariance"] = [1.0 0; 0 1.0] #QQQQ properties["moment123"] = 2.3 #QQQQ (MvNormal(μₑ,Σₑ), BoxTruncationRegion(a,b),properties) end function normal5a() μₑ = [3.5,2,3.5,3.5,5.3] Σₑ = [ 7. 1 0 0 1 ; 1 3.3 2 0 0 ; 0 2 3.4 0 0 ; 0 0 0 4 0; 1 0 0 0 2] a = [-20.,-20,-20,-20,-20] b = [20.,20.,20,20,20] properties = Dict{String,Any}() properties["length"] = 5 properties["tp"] = 0.9 #QQQQ properties["mean"] = [1.0,1.0] #QQQQ properties["covariance"] = [1.0 0; 0 1.0] #QQQQ properties["moment123"] = 2.3 #QQQQ (MvNormal(μₑ,Σₑ), BoxTruncationRegion(a,b),properties) end distribution_generators2 = [normal2a,normal2b,normal2c] distribution_generators3 = [normal3a] distribution_generators4 = [normal4a] distribution_generators5 = [normal5a] distribution_generators = vcat(distribution_generators2,distribution_generators3,distribution_generators4,distribution_generators5)
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
code
1810
cd(@__DIR__) using Pkg; Pkg.activate("..") using Revise # Pkg.precompile() using TruncatedDistributions using Test using Distributions using HCubature using PDMats using Plots include("exampleDists.jl") function test_distributions(dist_gen) for dg in dist_gen d, r, properties = dg() d_basic = BasicBoxTruncatedMvNormal(d.μ, d.Σ, r.a, r.b) @test length(d_basic) == properties["length"] @test tp(d_basic) ≈ properties["tp"] @test mean(d_basic) ≈ properties["mean"] d_recursive = RecursiveMomentsBoxTruncatedMvNormal(d.μ, d.Σ, r.a, r.b) @test length(d_recursive) == properties["length"] @test tp(d_recursive) ≈ properties["tp"] @test mean(d_recursive) ≈ properties["mean"] end end test_distributions(distribution_generators2) # test_distributions(distribution_generators3) # test_distributions(distribution_generators4) # test_distributions(distribution_generators5) # dg = distribution_generators2[1] # d, r, _ = dg() # dt = RecursiveMomentsBoxTruncatedMvNormal(d.μ, d.Σ, r.a, r.b) # mean(dt) # H = [1 -0.5; # -0.5 1.] # h = [.0,-0.5] # c = 5.0 # r = EllipticalTruncationRegion(PDMat(H),h,c) # x = [0.1,0.3] # intruncationregion(r,x) # points = [rand(Uniform(-5,5),2) for _ in 1:10^5] # in_points = filter((x)->intruncationregion(r,x),points) # dist = TruncatedMvDistribution{MvNormal,EllipticalTruncationRegion,TruncatedMvDistributionSecondOrderState}( # MvNormal([8.5,0],[1 0; 0 5]), # EllipticalTruncationRegion(PDMat(H),h,c)) # rand_points = [rand(dist) for _ in 1:10^3] # scatter(first.(in_points),last.(in_points),legend=false,xlim=(-5,5),ylim=(-5,5)) # scatter!(first.(rand_points),last.(rand_points))
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
docs
988
# TruncatedDistributions This package provides support for univariate and multivariate distributions in Julia. The functionality provided here extends the [univariate truncated distributions support](https://juliastats.org/Distributions.jl/latest/truncate/) that is in [Distributions.jl](https://github.com/JuliaStats/Distributions.jl), Julia's main distributions package. Key functionality is support for box truncated multivariate Normal (Gaussian) distributions. Further functionality both for the univariate distributions and multivariate distributions Beyond basic distribution functionality, the package provides functions for fitting parameters to desired moments. At the moment the only supported multivariate distribution is Multivariate Normal with box truncation (truncation over a region). For univariate distributions, essentially any distribution can be truncated using the truncation mechanism from Distributions.jl and the functions of this package can be applied.
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "MIT" ]
0.1.0
1dd436c8c1725f4820f7ad578491be74a2587597
docs
192
# TruncatedDistributions.jl Documentation ```@contents ``` ## Functions ```@docs TruncatedMvDistribution{D <: MultivariateDistribution, R <: TruncationRegion} ``` ## Index ```@index ```
TruncatedDistributions
https://github.com/yoninazarathy/TruncatedDistributions.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
692
module AMDocs using Documenter, AcousticMetrics using AcousticMetrics: AcousticMetrics function doit() IN_CI = get(ENV, "CI", nothing)=="true" makedocs(sitename="AcousticMetrics.jl", modules=[AcousticMetrics], doctest=false, format=Documenter.HTML(prettyurls=IN_CI), pages=["Introduction"=>"index.md", "API"=>"api.md", "Theory"=>"theory.md", "Software Quality Assurance"=>"sqa.md", "Developer Notes"=>"dev.md"]) if IN_CI deploydocs(repo="github.com/OpenMDAO/AcousticMetrics.jl.git", devbranch="main") end end if !isinteractive() doit() end end # module
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
1288
module AcousticMetrics using Base.Iterators: Iterators using Base.Order: ord, Forward using FFTW: r2r!, R2HC, HC2R, rfftfreq using FLOWMath: abs_cs_safe using ForwardDiff: ForwardDiff include("constants.jl") include("fourier_transforms.jl") include("narrowband.jl") export AbstractPressureTimeHistory, PressureTimeHistory export AbstractNarrowbandSpectrum export PressureSpectrumAmplitude, PressureSpectrumPhase, MSPSpectrumAmplitude, MSPSpectrumPhase, PowerSpectralDensityAmplitude, PowerSpectralDensityPhase include("integrated.jl") export OASPL include("proportional_bands.jl") export AbstractProportionalBands export ExactProportionalBands export ExactOctaveCenterBands, ExactOctaveLowerBands, ExactOctaveUpperBands export ExactThirdOctaveCenterBands, ExactThirdOctaveLowerBands, ExactThirdOctaveUpperBands export ApproximateOctaveBands, ApproximateOctaveCenterBands, ApproximateOctaveLowerBands, ApproximateOctaveUpperBands export ApproximateThirdOctaveBands, ApproximateThirdOctaveCenterBands, ApproximateThirdOctaveLowerBands, ApproximateThirdOctaveUpperBands export AbstractProportionalBandSpectrum, LazyNBProportionalBandSpectrum, ProportionalBandSpectrum, ProportionalBandSpectrumWithTime, LazyPBSProportionalBandSpectrum include("weighting.jl") export W_A end # module
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
74
# Usual reference pressure for SPL, etc., in Pascals. const p_ref = 20e-6
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
4155
struct RFFTCache{TVal,TJac} val::TVal jac::TJac end function RFFTCache(::Type{V}, M, N) where {V} val = Vector{V}(undef, M) jac = Matrix{V}(undef, M, N) return RFFTCache(val, jac) end function RFFTCache(d) M = length(d) T = eltype(d) N = ForwardDiff.npartials(T) V = ForwardDiff.valtype(T) return RFFTCache(V, M, N) end """ rfft!(y, x, cache=nothing) Calculate the real-input FFT of `x` and store the result in half-complex format in `y`. Just a wrapper of `FFTW.r2r!(y, FFTW.R2HC)`. The `cache` argument is optional and not used, and is included to keep the function signiture the same as the method that takes `Vector`s of `Dual`s. """ function rfft!(y, x, cache=nothing) y .= x r2r!(y, R2HC) return nothing end function rfft!(dout::AbstractVector{ForwardDiff.Dual{T,V,N}}, d::AbstractVector{ForwardDiff.Dual{T,V,N}}, cache=RFFTCache(V,length(d),N)) where {T,V,N} # N is the number of parameters we're taking the derivative wrt. # M is the number of inputs to (and outputs of) the FFT. M = length(d) # I should check that dout and d are the same size. ldout = length(dout) M == ldout || throw(DimensionMismatch("dout and d should have the same length, but have $ldout and $M, resp.")) # But now I'd need to be able to pass that to the FFTW library. Will that # work? No, because it's a dual number. Bummer. So I'll just have to have a # working array, I guess. cache.val .= ForwardDiff.value.(d) r2r!(cache.val, R2HC) # Now I want to do the Jacobian. for i in 1:N for j in 1:M cache.jac[j, i] = ForwardDiff.partials(d[j], i) end end r2r!(cache.jac, R2HC, 1) # Now I should be able to set dout. for j in 1:M dout[j] = ForwardDiff.Dual{T}(cache.val[j], ForwardDiff.Partials(NTuple{N,V}(cache.jac[j,:]))) end return nothing end function rfft(x) y = similar(x) rfft!(y, x) return y end """ irfft!(y, x, cache=nothing) Calculate the inverse FFT of `x` and store the result in in `y`, where `x` is in the half-complex format. Just a wrapper of `FFTW.r2r!(y, FFTW.HC2R)`. The `cache` argument is optional and not used, and is included to keep the function signiture the same as the method that takes `Vector`s of `Dual`s. """ function irfft!(y, x, cache=nothing) y .= x r2r!(y, HC2R) return nothing end function irfft!(dout::AbstractVector{ForwardDiff.Dual{T,V,N}}, d::AbstractVector{ForwardDiff.Dual{T,V,N}}, cache=RFFTCache(V,length(d),N)) where {T,V,N} # N is the number of parameters we're taking the derivative wrt. # M is the number of inputs to (and outputs of) the FFT. M = length(d) # I should check that dout and d are the same size. ldout = length(dout) M == ldout || throw(DimensionMismatch("dout and d should have the same length, but have $ldout and $M, resp.")) # But now I'd need to be able to pass that to the FFTW library. Will that # work? No, because it's a dual number. Bummer. So I'll just have to have a # working array, I guess. cache.val .= ForwardDiff.value.(d) r2r!(cache.val, HC2R) # Now I want to do the Jacobian. for i in 1:N for j in 1:M cache.jac[j, i] = ForwardDiff.partials(d[j], i) end end r2r!(cache.jac, HC2R, 1) # Now I should be able to set dout. for j in 1:M dout[j] = ForwardDiff.Dual{T}(cache.val[j], ForwardDiff.Partials(NTuple{N,V}(cache.jac[j,:]))) end return nothing end function irfft(x) y = similar(x) irfft!(y, x) return y end # Wish this was implemented in FFTW.jl. Actually, I think it may be. Ah, but # only for the real-input FFT that returns a complex array, which I don't want. # Sad. Oh, wait: but are the frequencies at fftfreq? I'll need to look at that # again. Need to write that up once I figure it out. function r2rfftfreq(n, d=1.0) # http://www.fftw.org/fftw3_doc/The-Halfcomplex_002dformat-DFT.html freq = vcat(0:floor(Int, n/2), floor(Int, (n+1)/2)-1:-1:1) # Get the period. T = n*d return freq./T end
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
581
""" OASPL(ap::AbstractPressureTimeHistory) Return the overall sound pressure level of a pressure time history. """ function OASPL(ap::AbstractPressureTimeHistory) p = pressure(ap) n = inputlength(ap) p_mean = sum(p)/n msp = sum((p .- p_mean).^2)/n return 10*log10(msp/p_ref^2) end """ OASPL(ap::AbstractNarrowbandSpectrum) Return the overall sound pressure level of a narrowband spectrum. """ function OASPL(sp::AbstractNarrowbandSpectrum) amp = MSPSpectrumAmplitude(sp) msp = sum(@view amp[begin+1:end]) return 10*log10(msp/p_ref^2) end
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
20925
""" AbstractPressureTimeHistory{IsEven} Supertype for a pressure time history, i.e., pressure as a function of time defined on evenly-spaced time samples. The `IsEven` parameter is a `Bool` indicating if the length of the pressure time history is even or not. """ abstract type AbstractPressureTimeHistory{IsEven} end """ PressureTimeHistory{IsEven} <: AbstractPressureTimeHistory{IsEven} Pressure as a function of time defined on evenly-spaced time samples. The `IsEven` parameter is a `Bool` indicating if the length of the pressure time history is even or not. """ struct PressureTimeHistory{IsEven,Tp,Tdt,Tt0} <: AbstractPressureTimeHistory{IsEven} p::Tp dt::Tdt t0::Tt0 function PressureTimeHistory{IsEven}(p, dt, t0) where {IsEven} n = length(p) iseven(n) == IsEven || throw(ArgumentError("IsEven = $(IsEven) is not consistent with length(p) = $n")) return new{IsEven, typeof(p), typeof(dt), typeof(t0)}(p, dt, t0) end end """ PressureTimeHistory(p, dt, t0=zero(dt)) Construct a `PressureTimeHistory` from a vector of pressures `p`, time spacing `dt`, and initial time `t0`. """ function PressureTimeHistory(p, dt, t0=zero(dt)) # TODO: it would be nice to have a constructor that allows for a default value of t0 and explicitly set the value of the `IsEven` parameter. n = length(p) return PressureTimeHistory{iseven(n)}(p, dt, t0) end """ pressure(pth::AbstractPressureTimeHistory) Return a vector of pressures associated with a pressure time history. """ @inline pressure(pth::AbstractPressureTimeHistory) = pth.p """ inputlength(pth::AbstractPressureTimeHistory) Return a number of pressure samples associated with a pressure time history. """ @inline inputlength(pth::AbstractPressureTimeHistory) = length(pressure(pth)) """ timestep(pth::AbstractPressureTimeHistory) Return the time step size `dt` associated with a pressure time history. """ @inline timestep(pth::AbstractPressureTimeHistory) = pth.dt """ starttime(pth::AbstractPressureTimeHistory) Return the initial time `t0` associated with a pressure time history. """ @inline starttime(pth::AbstractPressureTimeHistory) = pth.t0 """ time(pth::AbstractPressureTimeHistory) Return a vector of times associated with a pressure time history. """ @inline function time(pth::AbstractPressureTimeHistory) n = inputlength(pth) return starttime(pth) .+ (0:n-1) .* timestep(pth) end """ AbstractNarrowbandSpectrum{IsEven,IsTonal,Tel} <: AbstractVector{Tel} Supertype for a generic narrowband acoustic metric which will behave as an immutable `AbstractVector` of element type `Tel`. The `IsEven` parameter is a `Bool` indicating if the length of the spectrum is even or not, affecting how the Nyquist frequency is calculated. `IsTonal` indicates how the acoustic energy is distributed through the frequency bands: * `IsTonal == false` means the acoustic energy is assumed to be evenly distributed thoughout each band * `IsTonal == true` means the acoustic energy is assumed to be concentrated at each band center """ abstract type AbstractNarrowbandSpectrum{IsEven,IsTonal,Tel} <: AbstractVector{Tel} end """ halfcomplex(sm::AbstractNarrowbandSpectrum) Return a vector of the discrete Fourier transform of the pressure time history in half-complex format. See the FFTW docs for the definition of the [halfcomplex format](https://www.fftw.org/fftw3_doc/The-Halfcomplex_002dformat-DFT.html). """ @inline halfcomplex(sm::AbstractNarrowbandSpectrum) = sm.hc """ timestep(sm::AbstractNarrowbandSpectrum) Return the time step size `dt` associated with a narrowband spectrum. """ @inline timestep(sm::AbstractNarrowbandSpectrum) = sm.dt """ starttime(sm::AbstractNarrowbandSpectrum) Return the initial time `t0` associated with a pressure time history. """ @inline starttime(sm::AbstractNarrowbandSpectrum) = sm.t0 """ inputlength(sm::AbstractNarrowbandSpectrum) Return a number of pressure time samples associated with a narrowband spectrum. This is also the length of the discrete Fourier transform associated with the spectrum in [half-complex format](https://www.fftw.org/fftw3_doc/The-Halfcomplex_002dformat-DFT.html). """ @inline inputlength(sm::AbstractNarrowbandSpectrum) = length(halfcomplex(sm)) """ samplerate(sm::AbstractNarrowbandSpectrum) Return the sample rate (aka the inverse of the time step size) associated with a narrowband spectrum. """ @inline samplerate(sm::AbstractNarrowbandSpectrum) = 1/timestep(sm) """ frequency(sm::AbstractNarrowbandSpectrum) Return a vector of frequencies associated with the narrowband spectrum. The frequencies are calculated using the `rfftfreq` function in the FFTW.jl package. """ @inline frequency(sm::AbstractNarrowbandSpectrum) = rfftfreq(inputlength(sm), samplerate(sm)) """ frequencystep(sm::AbstractNarrowbandSpectrum) Return the frequency step size `Δf` associated with the narrowband spectrum. """ @inline function frequencystep(sm::AbstractNarrowbandSpectrum) m = inputlength(sm) df = 1/(timestep(sm)*m) return df end """ istonal(sm::AbstractNarrowbandSpectrum) Return `true` if the spectrum is tonal, `false` otherwise. """ @inline istonal(sm::AbstractNarrowbandSpectrum{IsEven,IsTonal}) where {IsEven,IsTonal} = IsTonal """ PressureTimeHistory(sm::AbstractNarrowbandSpectrum, p=similar(halfcomplex(sm))) Construct a pressure time history from a narrowband spectrum `sm`. The optional `p` argument will be used to store the pressure vector of the pressure time history, and should have length `inputlength(sm)`. """ function PressureTimeHistory(sm::AbstractNarrowbandSpectrum, p=similar(halfcomplex(sm))) hc = halfcomplex(sm) # Get the inverse FFT of the pressure spectrum. irfft!(p, hc) # Need to divide by the input length since FFTW computes an "unnormalized" FFT. p ./= inputlength(sm) return PressureTimeHistory(p, timestep(sm), starttime(sm)) end @inline function Base.size(sm::AbstractNarrowbandSpectrum) # So, what's the maximum and minimum index? # Minimum is 1, aka 0 + 1. # Max is n/2 (rounded down) + 1 n = inputlength(sm) return (n>>1 + 1,) end """ PressureSpectrumAmplitude{IsEven,IsTonal,Tel} <: AbstractNarrowbandSpectrum{IsEven,IsTonal,Tel} Representation of acoustic pressure amplitude as a function of narrowband frequency. The `IsEven` parameter is a `Bool` indicating if the length of the spectrum is even or not, affecting how the Nyquist frequency is calculated. The `IsTonal` `Bool` parameter, if `true`, indicates the pressure spectrum is tonal and thus concentrated at discrete frequencies. If `false`, the spectrum is assumed to be constant over each frequency band. """ struct PressureSpectrumAmplitude{IsEven,IsTonal,Tel,Thc,Tdt,Tt0} <: AbstractNarrowbandSpectrum{IsEven,IsTonal,Tel} hc::Thc dt::Tdt t0::Tt0 function PressureSpectrumAmplitude{IsEven,IsTonal}(hc, dt, t0) where {IsEven,IsTonal} n = length(hc) iseven(n) == IsEven || throw(ArgumentError("IsEven = $(IsEven) is not consistent with length(hc) = $n")) typeof(IsTonal) === Bool || throw(ArgumentError("typeof(IsTonal) should be Bool")) return new{IsEven, IsTonal, eltype(hc), typeof(hc), typeof(dt), typeof(t0)}(hc, dt, t0) end end """ PressureSpectrumAmplitude(hc, dt, t0=zero(dt), istonal::Bool=false) Construct a narrowband spectrum of the pressure amplitude from the discrete Fourier transform in half-complex format `hc`, time step size `dt`, and initial time `t0`. The `istonal` `Bool` argument, if `true`, indicates the pressure spectrum is tonal and thus concentrated at discrete frequencies. If `false`, the spectrum is assumed to be constant over each frequency band. """ function PressureSpectrumAmplitude(hc, dt, t0=zero(dt), istonal::Bool=false) n = length(hc) return PressureSpectrumAmplitude{iseven(n),istonal}(hc, dt, t0) end """ PressureSpectrumAmplitude(sm::AbstractNarrowbandSpectrum) Construct a narrowband spectrum of the pressure amplitude from another narrowband spectrum. """ PressureSpectrumAmplitude(sm::AbstractNarrowbandSpectrum{IsEven,IsTonal}) where {IsEven,IsTonal} = PressureSpectrumAmplitude{IsEven,IsTonal}(halfcomplex(sm), timestep(sm), starttime(sm)) """ PressureSpectrumAmplitude(pth::AbstractPressureTimeHistory, istonal::Bool=false, hc=similar(pressure(pth))) Construct a narrowband spectrum of the pressure amplitude from a pressure time history. The optional argument `hc` will be used to store the discrete Fourier transform of the pressure time history, and should have length of `inputlength(pth)`. The `istonal` `Bool` argument, if `true`, indicates the pressure spectrum is tonal and thus concentrated at discrete frequencies. If `false`, the spectrum is assumed to be constant over each frequency band. """ function PressureSpectrumAmplitude(pth::AbstractPressureTimeHistory, istonal::Bool=false, hc=similar(pressure(pth))) p = pressure(pth) # Get the FFT of the acoustic pressure. rfft!(hc, p) return PressureSpectrumAmplitude(hc, timestep(pth), starttime(pth), istonal) end @inline function Base.getindex(psa::PressureSpectrumAmplitude{false}, i::Int) @boundscheck checkbounds(psa, i) m = inputlength(psa) if i == 1 @inbounds hc_real = psa.hc[i]/m return abs(hc_real) else @inbounds hc_real = psa.hc[i]/m @inbounds hc_imag = psa.hc[m-i+2]/m return 2*sqrt(hc_real^2 + hc_imag^2) end end @inline function Base.getindex(psa::PressureSpectrumAmplitude{true}, i::Int) @boundscheck checkbounds(psa, i) m = inputlength(psa) if i == 1 || i == length(psa) @inbounds hc_real = psa.hc[i]/m return abs(hc_real) else @inbounds hc_real = psa.hc[i]/m @inbounds hc_imag = psa.hc[m-i+2]/m return 2*sqrt(hc_real^2 + hc_imag^2) end end """ PressureSpectrumPhase{IsEven,IsTonal,Tel} <: AbstractNarrowbandSpectrum{IsEven,IsTonal,Tel} Representation of acoustic pressure phase as a function of narrowband frequency. The `IsEven` parameter is a `Bool` indicating if the length of the spectrum is even or not, affecting how the Nyquist frequency is calculated. The `IsTonal` `Bool` parameter, if `true`, indicates the phase spectrum is tonal and thus concentrated at discrete frequencies. If `false`, the spectrum is assumed to be constant over each frequency band. """ struct PressureSpectrumPhase{IsEven,IsTonal,Tel,Thc,Tdt,Tt0} <: AbstractNarrowbandSpectrum{IsEven,IsTonal,Tel} hc::Thc dt::Tdt t0::Tt0 function PressureSpectrumPhase{IsEven,IsTonal}(hc, dt, t0) where {IsEven,IsTonal} n = length(hc) iseven(n) == IsEven || throw(ArgumentError("IsEven = $(IsEven) is not consistent with length(hc) = $n")) typeof(IsTonal) === Bool || throw(ArgumentError("typeof(IsTonal) should be Bool")) return new{IsEven, IsTonal, eltype(hc), typeof(hc), typeof(dt), typeof(t0)}(hc, dt, t0) end end """ PressureSpectrumPhase(hc, dt, t0=zero(dt), istonal::Bool=false) Construct a narrowband spectrum of the pressure phase from the discrete Fourier transform in half-complex format `hc`, time step size `dt`, and initial time `t0`. """ function PressureSpectrumPhase(hc, dt, t0=zero(dt), istonal::Bool=false) n = length(hc) return PressureSpectrumPhase{iseven(n),istonal}(hc, dt, t0) end """ PressureSpectrumPhase(sm::AbstractNarrowbandSpectrum) Construct a narrowband spectrum of the pressure phase from another narrowband spectrum. """ PressureSpectrumPhase(sm::AbstractNarrowbandSpectrum{IsEven,IsTonal}) where {IsEven,IsTonal} = PressureSpectrumPhase{IsEven,IsTonal}(halfcomplex(sm), timestep(sm), starttime(sm)) """ PressureSpectrumPhase(pth::AbstractPressureTimeHistory, istonal::Bool=false, hc=similar(pressure(pth))) Construct a narrowband spectrum of the pressure phase from a pressure time history. The optional argument `hc` will be used to store the discrete Fourier transform of the pressure time history, and should have length of `inputlength(pth)`. The `istonal` `Bool` argument, if `true`, indicates the pressure spectrum is tonal and thus concentrated at discrete frequencies. If `false`, the spectrum is assumed to be constant over each frequency band. """ function PressureSpectrumPhase(pth::AbstractPressureTimeHistory, istonal::Bool=false, hc=similar(pressure(pth))) p = pressure(pth) # Get the FFT of the acoustic pressure. rfft!(hc, p) return PressureSpectrumPhase(hc, timestep(pth), starttime(pth), istonal) end @inline function Base.getindex(psp::PressureSpectrumPhase{false}, i::Int) @boundscheck checkbounds(psp, i) m = inputlength(psp) if i == 1 @inbounds hc_real = psp.hc[i]/m hc_imag = zero(eltype(halfcomplex(psp))) phase_t0 = atan(hc_imag, hc_real) else @inbounds hc_real = psp.hc[i]/m @inbounds hc_imag = psp.hc[m-i+2]/m phase_t0 = atan(hc_imag, hc_real) end return rem2pi(phase_t0 - 2*pi*frequency(psp)[i]*starttime(psp), RoundNearest) end @inline function Base.getindex(psp::PressureSpectrumPhase{true}, i::Int) @boundscheck checkbounds(psp, i) m = inputlength(psp) if i == 1 || i == length(psp) @inbounds hc_real = psp.hc[i]/m hc_imag = zero(eltype(halfcomplex(psp))) phase_t0 = atan(hc_imag, hc_real) else @inbounds hc_real = psp.hc[i]/m @inbounds hc_imag = psp.hc[m-i+2]/m phase_t0 = atan(hc_imag, hc_real) end return rem2pi(phase_t0 - 2*pi*frequency(psp)[i]*starttime(psp), RoundNearest) end """ MSPSpectrumAmplitude{IsEven,IsTonal,Tel} <: AbstractNarrowbandSpectrum{IsEven,IsTonal,Tel} Representation of mean-squared pressure amplitude as a function of narrowband frequency. The `IsEven` parameter is a `Bool` indicating if the length of the spectrum is even or not, affecting how the Nyquist frequency is calculated. The `IsTonal` `Bool` parameter, if `true`, indicates the mean-squared pressure spectrum is tonal and thus concentrated at discrete frequencies. If `false`, the pressure spectrum is assumed to be constant over each frequency band. """ struct MSPSpectrumAmplitude{IsEven,IsTonal,Tel,Thc,Tdt,Tt0} <: AbstractNarrowbandSpectrum{IsEven,IsTonal,Tel} hc::Thc dt::Tdt t0::Tt0 function MSPSpectrumAmplitude{IsEven,IsTonal}(hc, dt, t0) where {IsEven,IsTonal} n = length(hc) iseven(n) == IsEven || throw(ArgumentError("IsEven = $(IsEven) is not consistent with length(hc) = $n")) typeof(IsTonal) === Bool || throw(ArgumentError("typeof(IsTonal) should be Bool")) return new{IsEven, IsTonal, eltype(hc), typeof(hc), typeof(dt), typeof(t0)}(hc, dt, t0) end end """ MSPSpectrumAmplitude(hc, dt, t0=zero(dt), istonal::Bool=false) Construct a narrowband spectrum of the mean-squared pressure amplitude from the discrete Fourier transform in half-complex format `hc`, time step size `dt`, and initial time `t0`. The `istonal` `Bool` argument, if `true`, indicates the pressure spectrum is tonal and thus concentrated at discrete frequencies. If `false`, the spectrum is assumed to be constant over each frequency band. """ function MSPSpectrumAmplitude(hc, dt, t0=zero(dt), istonal::Bool=false) n = length(hc) return MSPSpectrumAmplitude{iseven(n),istonal}(hc, dt, t0) end """ MSPSpectrumAmplitude(sm::AbstractNarrowbandSpectrum) Construct a narrowband spectrum of the mean-squared pressure amplitude from another narrowband spectrum. """ MSPSpectrumAmplitude(sm::AbstractNarrowbandSpectrum{IsEven,IsTonal}) where {IsEven,IsTonal} = MSPSpectrumAmplitude{IsEven,IsTonal}(halfcomplex(sm), timestep(sm), starttime(sm)) """ MSPSpectrumAmplitude(pth::AbstractPressureTimeHistory, istonal::Bool=false, hc=similar(pressure(pth))) Construct a narrowband spectrum of the mean-squared pressure amplitude from a pressure time history. The optional argument `hc` will be used to store the discrete Fourier transform of the pressure time history, and should have length of `inputlength(pth)`. The `istonal` `Bool` argument, if `true`, indicates the pressure spectrum is tonal and thus concentrated at discrete frequencies. If `false`, the spectrum is assumed to be constant over each frequency band. """ function MSPSpectrumAmplitude(pth::AbstractPressureTimeHistory, istonal::Bool=false, hc=similar(pressure(pth))) p = pressure(pth) # Get the FFT of the acoustic pressure. rfft!(hc, p) return MSPSpectrumAmplitude(hc, timestep(pth), starttime(pth), istonal) end @inline function Base.getindex(psa::MSPSpectrumAmplitude{false}, i::Int) @boundscheck checkbounds(psa, i) m = inputlength(psa) if i == 1 @inbounds hc_real = psa.hc[i]/m return hc_real^2 else @inbounds hc_real = psa.hc[i]/m @inbounds hc_imag = psa.hc[m-i+2]/m return 2*(hc_real^2 + hc_imag^2) end end @inline function Base.getindex(psa::MSPSpectrumAmplitude{true}, i::Int) @boundscheck checkbounds(psa, i) m = inputlength(psa) if i == 1 || i == length(psa) @inbounds hc_real = psa.hc[i]/m return hc_real^2 else @inbounds hc_real = psa.hc[i]/m @inbounds hc_imag = psa.hc[m-i+2]/m return 2*(hc_real^2 + hc_imag^2) end end """ MSPSpectrumPhase Alias for `PressureSpectrumPhase`. """ const MSPSpectrumPhase = PressureSpectrumPhase """ PowerSpectralDensityAmplitude{IsEven,Tel} <: AbstractNarrowbandSpectrum{IsEven,false,Tel} Representation of acoustic power spectral density amplitude as a function of narrowband frequency. The `IsEven` parameter is a `Bool` indicating if the length of the spectrum is even or not, affecting how the Nyquist frequency is calculated. As the power spectral density is not well-defined for tones, the `IsTonal` parameter is always `false`. """ struct PowerSpectralDensityAmplitude{IsEven,Tel,Thc,Tdt,Tt0} <: AbstractNarrowbandSpectrum{IsEven,false,Tel} hc::Thc dt::Tdt t0::Tt0 function PowerSpectralDensityAmplitude{IsEven}(hc, dt, t0) where {IsEven} n = length(hc) iseven(n) == IsEven || throw(ArgumentError("IsEven = $(IsEven) is not consistent with length(hc) = $n")) return new{IsEven, eltype(hc), typeof(hc), typeof(dt), typeof(t0)}(hc, dt, t0) end end """ PowerSpectralDensityAmplitude(hc, dt, t0=zero(dt)) Construct a narrowband spectrum of the power spectral density amplitude from the discrete Fourier transform in half-complex format `hc`, time step size `dt`, and initial time `t0`. """ function PowerSpectralDensityAmplitude(hc, dt, t0=zero(dt)) n = length(hc) return PowerSpectralDensityAmplitude{iseven(n)}(hc, dt, t0) end """ PowerSpectralDensityAmplitude(sm::AbstractNarrowbandSpectrum) Construct a narrowband spectrum of the power spectral density amplitude from another narrowband spectrum. """ PowerSpectralDensityAmplitude(sm::AbstractNarrowbandSpectrum{IsEven,false}) where {IsEven} = PowerSpectralDensityAmplitude(halfcomplex(sm), timestep(sm), starttime(sm)) PowerSpectralDensityAmplitude(sm::AbstractNarrowbandSpectrum{IsEven,true}) where {IsEven} = throw(ArgumentError("IsTonal == true parameter cannot be used with PowerSpectralDensityAmplitude type")) """ PowerSpectralDensityAmplitude(pth::AbstractPressureTimeHistory, hc=similar(pressure(pth))) Construct a narrowband spectrum of the power spectral density amplitude from a pressure time history. The optional argument `hc` will be used to store the discrete Fourier transform of the pressure time history, and should have length of `inputlength(pth)`. """ function PowerSpectralDensityAmplitude(pth::AbstractPressureTimeHistory, hc=similar(pressure(pth))) p = pressure(pth) # Get the FFT of the acoustic pressure. rfft!(hc, p) return PowerSpectralDensityAmplitude(hc, timestep(pth), starttime(pth)) end @inline function Base.getindex(psa::PowerSpectralDensityAmplitude{false}, i::Int) @boundscheck checkbounds(psa, i) m = inputlength(psa) df = frequencystep(psa) if i == 1 @inbounds hc_real = psa.hc[i]/m return hc_real^2/df else @inbounds hc_real = psa.hc[i]/m @inbounds hc_imag = psa.hc[m-i+2]/m return 2*(hc_real^2 + hc_imag^2)/df end end @inline function Base.getindex(psa::PowerSpectralDensityAmplitude{true}, i::Int) @boundscheck checkbounds(psa, i) m = inputlength(psa) df = frequencystep(psa) if i == 1 || i == length(psa) @inbounds hc_real = psa.hc[i]/m return hc_real^2/df else @inbounds hc_real = psa.hc[i]/m @inbounds hc_imag = psa.hc[m-i+2]/m return 2*(hc_real^2 + hc_imag^2)/df end end """ PowerSpectralDensityPhase Alias for `PressureSpectrumPhase`. """ const PowerSpectralDensityPhase = PressureSpectrumPhase
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
57028
""" AbstractProportionalBands{NO,LCU,TF} <: AbstractVector{TF} Abstract type representing the exact proportional frequency bands with band fraction `NO` and `eltype` `TF`. The `LCU` parameter can take one of three values: * `:lower`: The `struct` returns the lower edges of each frequency band. * `:center`: The `struct` returns the center of each frequency band. * `:upper`: The `struct` returns the upper edges of each frequency band. """ abstract type AbstractProportionalBands{NO,LCU,TF} <: AbstractVector{TF} end """ octave_fraction(bands::AbstractProportionalBands{NO}) where {NO} Return `NO`, the "octave fraction," e.g. `1` for octave bands, `3` for third-octave, `12` for twelfth-octave. """ octave_fraction(::AbstractProportionalBands{NO}) where {NO} = NO octave_fraction(::Type{<:AbstractProportionalBands{NO}}) where {NO} = NO """ lower_center_upper(bands::AbstractProportionalBands{NO,LCU,TF}) where {NO,LCU,TF} Return `LCU`, which can be either `:lower`, `:center`, `:upper`, indicating if `bands` represents the lower edges, centers, or upper edges of proportional bands, respectively. """ lower_center_upper(bands::AbstractProportionalBands{NO,LCU,TF}) where {NO,LCU,TF} = lower_center_upper(typeof(bands)) lower_center_upper(::Type{<:AbstractProportionalBands{NO,LCU,TF}}) where {NO,LCU,TF} = LCU """ freq_scaler(bands::AbstractProportionalBands) Return the factor each "standard" frequency band is scaled by. For example, the approximate octave center bands include 1000 Hz, 2000 Hz, and 4000 Hz. If `freq_scaler(bands) == 1.0`, then these frequencies would be unchanged. If `freq_scaler(bands) == 1.5`, then `bands` would include 1500 Hz, 3000 Hz, and 6000 Hz instead. If `freq_scaler(bands) == 0.5`, then `bands` would include 500 Hz, 1000 Hz, and 2000 Hz in place of 1000 Hz, 2000 Hz, and 4000 Hz. """ @inline freq_scaler(bands::AbstractProportionalBands) = bands.scaler """ band_start(bands::AbstractProportionalBands) Return the standard band index number for the first band in `bands`. For example, it happens that the approximate octave center bands includes 1000 Hz, and that particular band is numbered `10`. So if the first band contained in `bands` happens to be 1000 Hz (and `freq_scaler(bands) == 1.0`), then `band_start(bands) == 10`. Not particularly useful to a user. """ @inline band_start(bands::AbstractProportionalBands) = bands.bstart """ band_end(bands::AbstractProportionalBands) Return the standard band index number for the last band in `bands`. For example, it happens that the approximate octave center bands includes 1000 Hz, and that particular band is numbered `10`. So if the last band contained in `bands` happens to be 1000 Hz (and `freq_scaler(bands) == 1.0`), then `band_end(bands) == 10`. Not particularly useful to a user. """ @inline band_end(bands::AbstractProportionalBands) = bands.bend @inline function Base.size(bands::AbstractProportionalBands) return (band_end(bands) - band_start(bands) + 1,) end """ lower_bands(TBands::Type{<:AbstractProportionalBands{NO}}, fstart::TF, fend::TF, scaler=1) where {NO,TF} Construct and return the lower edges of the proportional bands `TBands`, scaled by `scaler`, that would fully encompass a frequency range beginning with `fstart` and ending with `fend`. """ function lower_bands(TBands::Type{<:AbstractProportionalBands{NO}}, fstart::TF, fend::TF, scaler=1) where {NO,TF} return TBands{:lower}(fstart, fend, scaler) end """ upper_bands(TBands::Type{<:AbstractProportionalBands{NO}}, fstart::TF, fend::TF, scaler=1) where {NO,TF} Construct and return the upper edges of the proportional bands `TBands`, scaled by `scaler`, that would fully encompass a frequency range beginning with `fstart` and ending with `fend`. """ function upper_bands(TBands::Type{<:AbstractProportionalBands{NO}}, fstart::TF, fend::TF, scaler=1) where {NO,TF} return TBands{:upper}(fstart, fend, scaler) end """ center_bands(TBands::Type{<:AbstractProportionalBands{NO}}, fstart::TF, fend::TF, scaler=1) where {NO,TF} Construct and return the centers of the proportional bands `TBands`, scaled by `scaler`, that would fully encompass a frequency range beginning with `fstart` and ending with `fend`. """ function center_bands(TBands::Type{<:AbstractProportionalBands{NO}}, fstart::TF, fend::TF, scaler=1) where {NO,TF} return TBands{:center}(fstart, fend, scaler) end """ cband_number(bands::AbstractProportionalBands, fc) Return the standard band index number of the band with center frequency `fc` for proportional bands `bands`. For example, if `bands` is a subtype of `ApproximateOctaveBands` and `freq_scaler(bands) == 1.0`, then `cband_number(bands, 1000.0) == 10`. """ cband_number(bands::AbstractProportionalBands, fc) = cband_number(typeof(bands), fc, freq_scaler(bands)) const f0_exact = 1000 const fmin_exact = 1 """ ExactProportionalBands{NO,LCU,TF} <: AbstractProportionalBands{NO,LCU,TF} Representation of the exact proportional frequency bands with band fraction `NO` and `eltype` `TF`. The `LCU` parameter can take one of three values: * `:lower`: The `struct` returns the lower edges of each frequency band. * `:center`: The `struct` returns the center of each frequency band. * `:upper`: The `struct` returns the upper edges of each frequency band. """ ExactProportionalBands struct ExactProportionalBands{NO,LCU,TF} <: AbstractProportionalBands{NO,LCU,TF} bstart::Int bend::Int f0::TF scaler::TF function ExactProportionalBands{NO,LCU,TF}(bstart::Int, bend::Int, scaler=1) where {NO,LCU,TF} NO > 0 || throw(ArgumentError("Octave band fraction NO must be greater than 0")) LCU in (:lower, :center, :upper) || throw(ArgumentError("LCU must be one of :lower, :center, :upper")) bend >= bstart || throw(ArgumentError("bend should be greater than or equal to bstart")) scaler > 0 || throw(ArgumentError("non-positive scaler argument not supported")) return new{NO,LCU,TF}(bstart, bend, TF(f0_exact), TF(scaler)) end end """ ExactProportionalBands{NO,LCU}(TF=Float64, bstart::Int, bend::Int, scaler=1) Construct an `ExactProportionalBands` with `eltype` `TF` encomposing band index numbers from `bstart` to `bend`. The "standard" band frequencies will be scaled by `scaler`, e.g. if `scaler = 0.5` then what would normally be the `1000 Hz` frequency will be `500 Hz`, etc.. """ function ExactProportionalBands{NO,LCU}(TF::Type, bstart::Int, bend::Int, scalar=1) where {NO,LCU} return ExactProportionalBands{NO,LCU,TF}(bstart, bend, scalar) end function ExactProportionalBands{NO,LCU}(bstart::Int, bend::Int, scaler=1) where {NO,LCU} return ExactProportionalBands{NO,LCU}(Float64, bstart, bend, scaler) end @inline band_exact_lower_limit(NO, fl, scaler) = floor(Int, 1/2 + NO*log2(fl/(f0_exact*scaler)) + 10*NO) @inline band_exact_upper_limit(NO, fu, scaler) = ceil(Int, -1/2 + NO*log2(fu/(f0_exact*scaler)) + 10*NO) function _cband_exact(NO, fc, scaler) # f = 2^((b - 10*NO)/NO)*f0 # f/f0 = 2^((b - 10*NO)/NO) # log2(f/f0) = log2(2^((b - 10*NO)/NO)) # log2(f/f0) = ((b - 10*NO)/NO) # log2(f/f0)*NO = b - 10*NO # log2(f/f0)*NO + 10*NO = b # b = log2(f/f0)*NO + 10*NO # Get the band number from a center band frequency `fc`. log2_fc_over_f0_exact_NO = log2(fc/(f0_exact*scaler))*NO # Check that the result will be very close to an integer. rounded = round(Int, log2_fc_over_f0_exact_NO) tol = 10*eps(fc) abs_cs_safe(log2_fc_over_f0_exact_NO - rounded) < tol || throw(ArgumentError("fc does not correspond to a center-band frequency")) b = rounded + 10*NO return b end function cband_number(::Type{<:ExactProportionalBands{NO}}, fc, scaler) where {NO} return _cband_exact(NO, fc, scaler) end """ ExactProportionalBands{NO,LCU}(fstart::TF, fend::TF, scaler) Construct an `ExactProportionalBands` with `eltype` `TF`, scaled by `scaler`, encomposing the bands needed to completely extend over minimum frequency `fstart` and maximum frequency `fend`. """ ExactProportionalBands{NO,LCU}(fstart::TF, fend::TF, scaler=1) where {NO,LCU,TF} = ExactProportionalBands{NO,LCU,TF}(fstart, fend, scaler) ExactProportionalBands{NO,LCU,TF}(fstart::TF, fend::TF, scaler=1) where {NO,LCU,TF} = ExactProportionalBands{NO,LCU,TF}(band_exact_lower_limit(NO, fstart, scaler), band_exact_upper_limit(NO, fend, scaler), scaler) @inline function Base.getindex(bands::ExactProportionalBands{NO,:center}, i::Int) where {NO} @boundscheck checkbounds(bands, i) # Now, how do I get the band? # This is the band number: b = bands.bstart + (i - 1) # So then the center frequency f_c that I want is defined by the function # # b = NO*log2(f_c/f_0) + 10*NO # # where f_0 is the reference frequency, 1000 Hz. # OK, so. # 2^((b - 10*NO)/NO)*f_c return 2^((b - 10*NO)/NO)*(bands.f0*freq_scaler(bands)) end @inline function Base.getindex(bands::ExactProportionalBands{NO,:lower}, i::Int) where {NO} @boundscheck checkbounds(bands, i) b = bands.bstart + (i - 1) # return 2^((b - 10*NO)/NO)*(2^(-1/(2*NO)))*bands.f0 # return 2^(2*(b - 10*NO)/(2*NO))*(2^(-1/(2*NO)))*bands.f0 return 2^((2*(b - 10*NO) - 1)/(2*NO))*(bands.f0*freq_scaler(bands)) end @inline function Base.getindex(bands::ExactProportionalBands{NO,:upper}, i::Int) where {NO} @boundscheck checkbounds(bands, i) b = bands.bstart + (i - 1) # return 2^((b - 10*NO)/NO)*(2^(1/(2*NO)))*bands.f0 # return 2^(2*(b - 10*NO)/(2*NO))*(2^(1/(2*NO)))*bands.f0 return 2^((2*(b - 10*NO) + 1)/(2*NO))*(bands.f0*freq_scaler(bands)) end """ ExactOctaveCenterBands{TF} Alias for `ExactProportionalBands{1,:center,TF}` """ const ExactOctaveCenterBands{TF} = ExactProportionalBands{1,:center,TF} """ ExactThirdOctaveCenterBands{TF} Alias for `ExactProportionalBands{3,:center,TF}` """ const ExactThirdOctaveCenterBands{TF} = ExactProportionalBands{3,:center,TF} """ ExactOctaveLowerBands{TF} Alias for `ExactProportionalBands{1,:lower,TF}` """ const ExactOctaveLowerBands{TF} = ExactProportionalBands{1,:lower,TF} """ ExactThirdOctaveLowerBands{TF} Alias for `ExactProportionalBands{3,:lower,TF}` """ const ExactThirdOctaveLowerBands{TF} = ExactProportionalBands{3,:lower,TF} """ ExactOctaveUpperBands{TF} Alias for `ExactProportionalBands{1,:upper,TF}` """ const ExactOctaveUpperBands{TF} = ExactProportionalBands{1,:upper,TF} """ ExactThirdOctaveUpperBands{TF} Alias for `ExactProportionalBands{3,:upper,TF}` """ const ExactThirdOctaveUpperBands{TF} = ExactProportionalBands{3,:upper,TF} """ lower_bands(bands::ExactProportionalBands{NO,LCU,TF}, scaler=freq_scaler(bands)) where {NO,TF} Construct and return the lower edges of the proportional bands `bands` scaled by `scaler`. """ lower_bands(bands::ExactProportionalBands{NO,LCU,TF}, scaler=freq_scaler(bands)) where {NO,LCU,TF} = ExactProportionalBands{NO,:lower,TF}(band_start(bands), band_end(bands), scaler) """ center_bands(bands::ExactProportionalBands{NO,LCU,TF}, scaler=freq_scaler(bands)) where {NO,TF} Construct and return the centers of the proportional bands `bands` scaled by `scaler`. """ center_bands(bands::ExactProportionalBands{NO,LCU,TF}, scaler=freq_scaler(bands)) where {NO,LCU,TF} = ExactProportionalBands{NO,:center,TF}(band_start(bands), band_end(bands), scaler) """ upper_bands(bands::ExactProportionalBands{NO,LCU,TF}, scaler=freq_scaler(bands)) where {NO,TF} Construct and return the upper edges of the proportional bands `bands` scaled by `scaler`. """ upper_bands(bands::ExactProportionalBands{NO,LCU,TF}, scaler=freq_scaler(bands)) where {NO,LCU,TF} = ExactProportionalBands{NO,:upper,TF}(band_start(bands), band_end(bands), scaler) const approx_3rd_octave_cbands_pattern = [1.0, 1.25, 1.6, 2.0, 2.5, 3.15, 4.0, 5.0, 6.3, 8.0] const approx_3rd_octave_lbands_pattern = [0.9, 1.12, 1.4, 1.8, 2.24, 2.8, 3.35, 4.5, 5.6, 7.1] const approx_3rd_octave_ubands_pattern = [1.12, 1.4, 1.8, 2.24, 2.8, 3.35, 4.5, 5.6, 7.1, 9.0] """ ApproximateThirdOctaveBands{LCU,TF} <: AbstractProportionalBands{3,LCU,TF} Representation of the approximate third-octave proportional frequency bands with `eltype` `TF`. The `LCU` parameter can take one of three values: * `:lower`: The `struct` returns the lower edges of each frequency band. * `:center`: The `struct` returns the center of each frequency band. * `:upper`: The `struct` returns the upper edges of each frequency band. """ struct ApproximateThirdOctaveBands{LCU,TF} <: AbstractProportionalBands{3,LCU,TF} bstart::Int bend::Int scaler::TF function ApproximateThirdOctaveBands{LCU,TF}(bstart::Int, bend::Int, scaler=1) where {LCU, TF} LCU in (:lower, :center, :upper) || throw(ArgumentError("LCU must be one of :lower, :center, :upper")) bend >= bstart || throw(ArgumentError("bend should be greater than or equal to bstart")) scaler > 0 || throw(ArgumentError("non-positive scaler argument not supported")) return new{LCU,TF}(bstart, bend, TF(scaler)) end end """ ApproximateThirdOctaveBands{LCU}(TF=Float64, bstart::Int, bend::Int, scaler=1) Construct an `ApproximateThirdOctaveBands` with `eltype` `TF` encomposing band index numbers from `bstart` to `bend`. The "standard" band frequencies will be scaled by `scaler`, e.g. if `scaler = 0.5` then what would normally be the `1000 Hz` frequency will be `500 Hz`, etc.. """ function ApproximateThirdOctaveBands{LCU}(TF::Type, bstart::Int, bend::Int, scaler=1) where {LCU} return ApproximateThirdOctaveBands{LCU,TF}(bstart, bend, scaler) end function ApproximateThirdOctaveBands{LCU}(bstart::Int, bend::Int, scaler=1) where {LCU} return ApproximateThirdOctaveBands{LCU}(Float64, bstart, bend, scaler) end @inline function Base.getindex(bands::ApproximateThirdOctaveBands{:center,TF}, i::Int) where {TF} @boundscheck checkbounds(bands, i) j = bands.bstart + i - 1 factor10, b0 = divrem(j, 10, RoundDown) b = b0 + 1 return freq_scaler(bands)*approx_3rd_octave_cbands_pattern[b]*TF(10)^factor10 end @inline function Base.getindex(bands::ApproximateThirdOctaveBands{:lower,TF}, i::Int) where {TF} @boundscheck checkbounds(bands, i) j = bands.bstart + i - 1 factor10, b0 = divrem(j, 10, RoundDown) b = b0 + 1 return freq_scaler(bands)*approx_3rd_octave_lbands_pattern[b]*TF(10)^factor10 end @inline function Base.getindex(bands::ApproximateThirdOctaveBands{:upper,TF}, i::Int) where {TF} @boundscheck checkbounds(bands, i) j = bands.bstart + i - 1 factor10, b0 = divrem(j, 10, RoundDown) b = b0 + 1 return freq_scaler(bands)*approx_3rd_octave_ubands_pattern[b]*TF(10)^factor10 end @inline function band_approx_3rd_octave_lower_limit(fl::TF, scaler) where {TF} # For the `scaler`, I've been thinking about always leaving the input frequency (here `fl`) alone and modifying the standard bands (here `approx_3rd_octave_lbands_pattern`). # But then that would involve multiplying all of `approx_3rd_octave_lbands_pattern`... # Or maybe not. factor10 = floor(Int, log10(fl/(scaler*approx_3rd_octave_lbands_pattern[1]))) i = searchsortedfirst(approx_3rd_octave_lbands_pattern, fl; lt=(lband, f)->isless(scaler*lband*TF(10)^factor10, f)) # - 2 because # # * -1 for searchsortedfirst giving us the first index in approx_3rd_octave_lbands_pattern that is greater than fl, and we want the band before that # * -1 because the array approx_3rd_octave_lbands_pattern is 1-based, but the third-octave band pattern band numbers are 0-based (centerband 1.0 Hz is band number 0, etc..) return (i - 2) + factor10*10 end @inline function band_approx_3rd_octave_upper_limit(fu::TF, scaler) where {TF} factor10 = floor(Int, log10(fu/(scaler*approx_3rd_octave_lbands_pattern[1]))) i = searchsortedfirst(approx_3rd_octave_ubands_pattern, fu; lt=(uband, f)->isless(scaler*uband*TF(10)^factor10, f)) # - 1 because # # * -1 because the array approx_3rd_octave_lbands_pattern is 1-based, but the third-octave band pattern band numbers are 0-based (centerband 1.0 Hz is band number 0, etc..) return (i - 1) + factor10*10 end function cband_approx_3rd_octave(fc, scaler) fc_scaled = fc/scaler frac, factor10 = modf(log10(fc_scaled)) # if (frac < -eps(frac)) # frac += 1 # factor10 -= 1 # end adj = ifelse(frac < -eps(frac), 1, 0) frac += adj factor10 -= adj cband_pattern_entry = 10^frac tol_shift = 0.001 b = searchsortedfirst(approx_3rd_octave_cbands_pattern, cband_pattern_entry-tol_shift) tol_compare = 100*eps(approx_3rd_octave_cbands_pattern[b]) abs_cs_safe(approx_3rd_octave_cbands_pattern[b] - cband_pattern_entry) < tol_compare || throw(ArgumentError("frequency fc does not correspond to an approximate 3rd-octave center band")) b0 = b - 1 j = 10*Int(factor10) + b0 return j end function cband_number(::Type{<:ApproximateThirdOctaveBands}, fc, scaler) return cband_approx_3rd_octave(fc, scaler) end """ ApproximateThirdOctaveBands{LCU}(fstart::TF, fend::TF, scaler=1) Construct an `ApproximateThirdOctaveBands` with `eltype` `TF`, scaled by `scaler`, encomposing the bands needed to completely extend over minimum frequency `fstart` and maximum frequency `fend`. """ ApproximateThirdOctaveBands{LCU}(fstart::TF, fend::TF, scaler=1) where {LCU,TF} = ApproximateThirdOctaveBands{LCU,TF}(fstart, fend, scaler) ApproximateThirdOctaveBands{LCU,TF}(fstart::TF, fend::TF, scaler=1) where {LCU,TF} = ApproximateThirdOctaveBands{LCU,TF}(band_approx_3rd_octave_lower_limit(fstart, scaler), band_approx_3rd_octave_upper_limit(fend, scaler), scaler) """ ApproximateThirdOctaveCenterBands{TF} Alias for `ApproximateThirdOctaveBands{:center,TF}` """ const ApproximateThirdOctaveCenterBands{TF} = ApproximateThirdOctaveBands{:center,TF} """ ApproximateThirdOctaveLowerBands{TF} Alias for `ApproximateThirdOctaveBands{:lower,TF}` """ const ApproximateThirdOctaveLowerBands{TF} = ApproximateThirdOctaveBands{:lower,TF} """ ApproximateThirdOctaveUpperBands{TF} Alias for `ApproximateThirdOctaveBands{:upper,TF}` """ const ApproximateThirdOctaveUpperBands{TF} = ApproximateThirdOctaveBands{:upper,TF} """ lower_bands(bands::ApproximateThirdOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) where {LCU,TF} Construct and return the lower edges of the proportional bands `bands` scaled by `scaler`. """ lower_bands(bands::ApproximateThirdOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) where {LCU,TF} = ApproximateThirdOctaveBands{:lower,TF}(band_start(bands), band_end(bands), scaler) """ center_bands(bands::ApproximateThirdOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) where {LCU,TF} Construct and return the centers of the proportional bands `bands` scaled by `scaler`. """ center_bands(bands::ApproximateThirdOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) where {LCU,TF} = ApproximateThirdOctaveBands{:center,TF}(band_start(bands), band_end(bands), scaler) """ upper_bands(bands::ApproximateThirdOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) where {LCU,TF} Construct and return the upper edges of the proportional bands `bands` scaled by `scaler`. """ upper_bands(bands::ApproximateThirdOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) where {LCU,TF} = ApproximateThirdOctaveBands{:upper,TF}(band_start(bands), band_end(bands), scaler) const approx_octave_cbands_pattern = [1.0, 2.0, 4.0, 8.0, 16.0, 31.5, 63.0, 125.0, 250.0, 500.0] const approx_octave_lbands_pattern = [0.71, 1.42, 2.84, 5.68, 11.0, 22.0, 44.0, 88.0, 177.0, 355.0] const approx_octave_ubands_pattern = [1.42, 2.84, 5.68, 11.0, 22.0, 44.0, 88.0, 177.0, 355.0, 710.0] """ ApproximateOctaveBands{LCU,TF} <: AbstractProportionalBands{1,LCU,TF} Representation of the approximate octave proportional frequency bands with `eltype` `TF`. The `LCU` parameter can take one of three values: * `:lower`: The `struct` returns the lower edges of each frequency band. * `:center`: The `struct` returns the center of each frequency band. * `:upper`: The `struct` returns the upper edges of each frequency band. """ struct ApproximateOctaveBands{LCU,TF} <: AbstractProportionalBands{1,LCU,TF} bstart::Int bend::Int scaler::TF function ApproximateOctaveBands{LCU,TF}(bstart::Int, bend::Int, scaler=1) where {LCU, TF} LCU in (:lower, :center, :upper) || throw(ArgumentError("LCU must be one of :lower, :center, :upper")) bend >= bstart || throw(ArgumentError("bend should be greater than or equal to bstart")) scaler > 0 || throw(ArgumentError("non-positive scaler argument not supported")) return new{LCU,TF}(bstart, bend, TF(scaler)) end end """ ApproximateOctaveBands{LCU,TF}(bstart::Int, bend::Int) Construct an `ApproximateOctaveBands` with `eltype` `TF` encomposing band index numbers from `bstart` to `bend`. The "standard" band frequencies will be scaled by `scaler`, e.g. if `scaler = 0.5` then what would normally be the `1000 Hz` frequency will be `500 Hz`, etc.. """ function ApproximateOctaveBands{LCU}(TF::Type, bstart::Int, bend::Int, scaler=1) where {LCU} return ApproximateOctaveBands{LCU,TF}(bstart, bend, scaler) end function ApproximateOctaveBands{LCU}(bstart::Int, bend::Int, scaler=1) where {LCU} return ApproximateOctaveBands{LCU}(Float64, bstart, bend, scaler) end @inline function Base.getindex(bands::ApproximateOctaveBands{:center,TF}, i::Int) where {TF} @boundscheck checkbounds(bands, i) j = bands.bstart + i - 1 factor1000, b0 = divrem(j, 10, RoundDown) b = b0 + 1 return freq_scaler(bands)*approx_octave_cbands_pattern[b]*TF(1000)^factor1000 end @inline function Base.getindex(bands::ApproximateOctaveBands{:lower,TF}, i::Int) where {TF} @boundscheck checkbounds(bands, i) j = bands.bstart + i - 1 factor1000, b0 = divrem(j, 10, RoundDown) b = b0 + 1 return freq_scaler(bands)*approx_octave_lbands_pattern[b]*TF(1000)^factor1000 end @inline function Base.getindex(bands::ApproximateOctaveBands{:upper,TF}, i::Int) where {TF} @boundscheck checkbounds(bands, i) j = bands.bstart + i - 1 factor1000, b0 = divrem(j, 10, RoundDown) b = b0 + 1 return freq_scaler(bands)*approx_octave_ubands_pattern[b]*TF(1000)^factor1000 end @inline function band_approx_octave_lower_limit(fl::TF, scaler) where {TF} factor1000 = floor(Int, log10(fl/(scaler*approx_octave_lbands_pattern[1]))/3) i = searchsortedfirst(approx_octave_lbands_pattern, fl; lt=(lband, f)->isless(scaler*lband*TF(10)^(3*factor1000), f)) # - 2 because # # * -1 for searchsortedfirst giving us the first index in approx_octave_lbands_pattern that is greater than fl, and we want the band before that # * -1 because the array approx_octave_lbands_pattern is 1-based, but the octave band pattern band numbers are 0-based (centerband 1.0 Hz is band number 0, etc..) return (i - 2) + factor1000*10 end @inline function band_approx_octave_upper_limit(fu::TF, scaler) where {TF} factor1000 = floor(Int, log10(fu/(scaler*approx_octave_lbands_pattern[1]))/3) i = searchsortedfirst(approx_octave_ubands_pattern, fu; lt=(lband, f)->isless(scaler*lband*TF(10)^(3*factor1000), f)) # - 1 because # # * -1 because the array approx_octave_lbands_pattern is 1-based, but the octave band pattern band numbers are 0-based (centerband 1.0 Hz is band number 0, etc..) return (i - 1) + factor1000*10 end function cband_approx_octave(fc, scaler) fc_scaled = fc/scaler frac, factor1000 = modf(log10(fc_scaled)/log10(1000)) # if (frac < -eps(frac)) # frac += 1 # factor1000 -= 1 # end adj = ifelse(frac < -eps(frac), 1, 0) frac += adj factor1000 -= adj cband_pattern_entry = 1000^frac tol_shift = 0.001 b = searchsortedfirst(approx_octave_cbands_pattern, cband_pattern_entry-tol_shift) tol_compare = 100*eps(approx_octave_cbands_pattern[b]) abs_cs_safe(approx_octave_cbands_pattern[b] - cband_pattern_entry) < tol_compare || throw(ArgumentError("frequency f does not correspond to an approximate octave center band")) b0 = b - 1 j = 10*Int(factor1000) + b0 return j end function cband_number(::Type{<:ApproximateOctaveBands}, fc, scaler) return cband_approx_octave(fc, scaler) end """ ApproximateOctaveBands{LCU}(fstart::TF, fend::TF, scaler=1) Construct an `ApproximateOctaveBands` with `eltype` `TF`, scaled by `scaler`, encomposing the bands needed to completely extend over minimum frequency `fstart` and maximum frequency `fend`. """ ApproximateOctaveBands{LCU}(fstart::TF, fend::TF, scaler=1) where {LCU,TF} = ApproximateOctaveBands{LCU,TF}(fstart, fend, scaler) ApproximateOctaveBands{LCU,TF}(fstart::TF, fend::TF, scaler=1) where {LCU,TF} = ApproximateOctaveBands{LCU,TF}(band_approx_octave_lower_limit(fstart, scaler), band_approx_octave_upper_limit(fend, scaler), scaler) """ ApproximateOctaveCenterBands{TF} Alias for `ApproximateOctaveBands{:center,TF}` """ const ApproximateOctaveCenterBands{TF} = ApproximateOctaveBands{:center,TF} """ ApproximateOctaveLowerBands{TF} Alias for `ApproximateOctaveBands{:lower,TF}` """ const ApproximateOctaveLowerBands{TF} = ApproximateOctaveBands{:lower,TF} """ ApproximateOctaveUpperBands{TF} Alias for `ApproximateOctaveBands{:upper,TF}` """ const ApproximateOctaveUpperBands{TF} = ApproximateOctaveBands{:upper,TF} """ lower_bands(bands::ApproximateOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) Construct and return the lower edges of the proportional bands `bands` scaled by `scaler`. """ lower_bands(bands::ApproximateOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) where {LCU,TF} = ApproximateOctaveBands{:lower,TF}(band_start(bands), band_end(bands), scaler) """ center_bands(bands::ApproximateOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) Construct and return the centers of the proportional bands `bands` scaled by `scaler`. """ center_bands(bands::ApproximateOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) where {LCU,TF} = ApproximateOctaveBands{:center,TF}(band_start(bands), band_end(bands), scaler) """ upper_bands(bands::ApproximateOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) Construct and return the upper edges of the proportional bands `bands` scaled by `scaler`. """ upper_bands(bands::ApproximateOctaveBands{LCU,TF}, scaler=freq_scaler(bands)) where {LCU,TF} = ApproximateOctaveBands{:upper,TF}(band_start(bands), band_end(bands), scaler) """ AbstractProportionalBandSpectrum{NO,TF} <: AbstractVector{TF} Abstract type representing a proportional band spectrum with band fraction `NO` and `eltype` `TF`. """ abstract type AbstractProportionalBandSpectrum{NO,TF} <: AbstractVector{TF} end """ octave_fraction(pbs::AbstractProportionalBandSpectrum{NO}) where {NO} Return `NO`, the "octave fraction," e.g. `1` for octave bands, `3` for third-octave, `12` for twelfth-octave. """ octave_fraction(::AbstractProportionalBandSpectrum{NO}) where {NO} = NO octave_fraction(::Type{<:AbstractProportionalBandSpectrum{NO}}) where {NO} = NO """ lower_bands(pbs::AbstractProportionalBandSpectrum) Return the lower edges of the proportional bands associated with the proportional band spectrum `pbs`. """ @inline lower_bands(pbs::AbstractProportionalBandSpectrum) = lower_bands(pbs.cbands) """ center_bands(pbs::AbstractProportionalBandSpectrum) Return the centers of the proportional bands associated with the proportional band spectrum `pbs`. """ @inline center_bands(pbs::AbstractProportionalBandSpectrum) = pbs.cbands """ upper_bands(pbs::AbstractProportionalBandSpectrum) Return the upper edges of the proportional bands associated with the proportional band spectrum `pbs`. """ @inline upper_bands(pbs::AbstractProportionalBandSpectrum) = upper_bands(pbs.cbands) """ freq_scaler(pbs::AbstractProportionalBandSpectrum) Return the factor each "standard" frequency band associated with the proportional band spectrum `pbs` is scaled by. For example, the approximate octave center bands include 1000 Hz, 2000 Hz, and 4000 Hz. If `freq_scaler(pbs) == 1.0`, then these frequencies would be unchanged. If `freq_scaler(pbs) == 1.5`, then `bands` would include 1500 Hz, 3000 Hz, and 6000 Hz instead. If `freq_scaler(pbs) == 0.5`, then `bands` would include 500 Hz, 1000 Hz, and 2000 Hz in place of 1000 Hz, 2000 Hz, and 4000 Hz. """ @inline freq_scaler(pbs::AbstractProportionalBandSpectrum) = freq_scaler(center_bands(pbs)) """ has_observer_time(pbs::AbstractProportionalBandSpectrum) Return `true` if the proportional band spectrum is defined to exist over a limited time, `false` otherwise. """ @inline has_observer_time(pbs::AbstractProportionalBandSpectrum) = false """ observer_time(pbs::AbstractProportionalBandSpectrum) Return the observer time at which the proportional band spectrum is defined to exist. """ @inline observer_time(pbs::AbstractProportionalBandSpectrum{NO,TF}) where {NO,TF} = zero(TF) """ timestep(pbs::AbstractProportionalBandSpectrum) Return the time range over which the proportional band spectrum is defined to exist. """ @inline timestep(pbs::AbstractProportionalBandSpectrum) = Inf*one(eltype(pbs)) """ amplitude(pbs::AbstractProportionalBandSpectrum) Return the underlying `Vector` containing the proportional band spectrum amplitudes contained in `pbs`. """ @inline amplitude(pbs::AbstractProportionalBandSpectrum) = pbs.pbs """ time_period(pbs::AbstractArray{<:AbstractProportionalBandSpectrum}) Find the period of time over which the collection of proportional band spectrum `pbs` exists. """ function time_period(pbs::AbstractArray{<:AbstractProportionalBandSpectrum}) tmin, tmax = extrema(observer_time, Iterators.filter(has_observer_time, pbs); init=(Inf, -Inf)) return tmax - tmin end """ time_scaler(pbs::AbstractProportionalBandSpectrum{NO,TF}, period) Find the scaling factor appropriate to multiply the proportional band spectrum `pbs` by that accounts for the duration of time the spectrum exists. This is used when combining multiple proportional band spectra with the [combine](@ref) function. """ time_scaler(pbs::AbstractProportionalBandSpectrum{NO,TF}, period) where {NO,TF} = one(TF) @inline Base.size(pbs::AbstractProportionalBandSpectrum) = size(center_bands(pbs)) @inline function Base.getindex(pbs::AbstractProportionalBandSpectrum, i::Int) @boundscheck checkbounds(pbs, i) return @inbounds amplitude(pbs)[i] end """ LazyNBProportionalBandSpectrum{NO,IsTonal,TF,TAmp,TBandsC} Lazy representation of a proportional band spectrum with octave fraction `NO` and `eltype` `TF` constructed from a narrowband (`NB`) spectrum. `IsTonal` indicates how the acoustic energy is distributed through the narrow frequency bands: * `IsTonal == false` means the acoustic energy is assumed to be evenly distributed thoughout each band * `IsTonal == true` means the acoustic energy is assumed to be concentrated at each band center """ struct LazyNBProportionalBandSpectrum{NO,IsTonal,TF,TAmp<:AbstractVector{TF},TBandsC<:AbstractProportionalBands{NO,:center}} <: AbstractProportionalBandSpectrum{NO,TF} f1_nb::TF df_nb::TF msp_amp::TAmp cbands::TBandsC function LazyNBProportionalBandSpectrum{NO,IsTonal,TF,TAmp}(f1_nb::TF, df_nb::TF, msp_amp::TAmp, cbands::AbstractProportionalBands{NO,:center}) where {NO,IsTonal,TF,TAmp<:AbstractVector{TF}} f1_nb > zero(f1_nb) || throw(ArgumentError("f1_nb must be > 0")) df_nb > zero(df_nb) || throw(ArgumentError("df_nb must be > 0")) return new{NO,IsTonal,TF,TAmp,typeof(cbands)}(f1_nb, df_nb, msp_amp, cbands) end end """ LazyNBProportionalBandSpectrum{NO,IsTonal}(f1_nb, df_nb, msp_amp, cbands::AbstractProportionalBands{NO,:center}) Construct a lazy representation of a proportional band spectrum with proportional center bands `cbands` from a narrowband spectrum. The narrowband frequencies are defined by the first narrowband frequency `f1_nb` and the narrowband frequency spacing `df_nb`. `msp_amp` is the spectrum of narrowband mean squared pressure amplitude. `IsTonal` indicates how the acoustic energy is distributed through the narrow frequency bands: * `IsTonal == false` means the acoustic energy is assumed to be evenly distributed thoughout each band * `IsTonal == true` means the acoustic energy is assumed to be concentrated at each band center """ function LazyNBProportionalBandSpectrum{NO,IsTonal}(f1_nb, df_nb, msp_amp, cbands::AbstractProportionalBands{NO,:center}) where {NO,IsTonal} TF = eltype(msp_amp) TAmp = typeof(msp_amp) return LazyNBProportionalBandSpectrum{NO,IsTonal,TF,TAmp}(TF(f1_nb), TF(df_nb), msp_amp, cbands) end """ LazyNBProportionalBandSpectrum(f1_nb, df_nb, msp_amp, cbands::AbstractProportionalBands{NO,:center}, istonal=false) Construct a lazy representation of a proportional band spectrum with proportional center bands `cbands` from a narrowband spectrum. The narrowband frequencies are defined by the first narrowband frequency `f1_nb` and the narrowband frequency spacing `df_nb`. `msp_amp` is the spectrum of narrowband mean squared pressure amplitude. `istonal` indicates how the acoustic energy is distributed through the narrow frequency bands: * `istonal == false` means the acoustic energy is assumed to be evenly distributed thoughout each band * `istonal == true` means the acoustic energy is assumed to be concentrated at each band center """ function LazyNBProportionalBandSpectrum(f1_nb, df_nb, msp_amp, cbands::AbstractProportionalBands{NO,:center}, istonal::Bool=false) where {NO} return LazyNBProportionalBandSpectrum{NO,istonal}(f1_nb, df_nb, msp_amp, cbands) end """ LazyNBProportionalBandSpectrum{NO,IsTonal}(TBands::Type{<:AbstractProportionalBands{NO}}, f1_nb, df_nb, msp_amp, scaler=1) Construct a lazy representation of a proportional band spectrum with proportional band type `TBands` from a narrowband spectrum. The narrowband frequencies are defined by the first narrowband frequency `f1_nb` and the narrowband frequency spacing `df_nb`. `msp_amp` is the spectrum of narrowband mean squared pressure amplitude. The proportional band frequencies will be scaled by `scaler`. `IsTonal` is a `Bool` indicating how the acoustic energy is distributed through the narrow frequency bands: * `IsTonal == false` means the acoustic energy is assumed to be evenly distributed thoughout each band * `IsTonal == true` means the acoustic energy is assumed to be concentrated at each band center """ function LazyNBProportionalBandSpectrum{NO,false}(TBands::Type{<:AbstractProportionalBands{NO}}, f1_nb, df_nb, msp_amp, scaler=1) where {NO} TF = eltype(msp_amp) TAmp = typeof(msp_amp) # We're thinking of each non-zero freqeuncy as being a bin with center frequency `f` and width `df_nb`. # So to get the lowest non-zero frequency we'll subtract 0.5*df_nb from the lowest non-zero frequency center: fstart = max(f1_nb - 0.5*df_nb, TF(fmin_exact)) fend = f1_nb + (length(msp_amp)-1)*df_nb + 0.5*df_nb cbands = TBands{:center}(fstart, fend, scaler) return LazyNBProportionalBandSpectrum{NO,false,TF,TAmp}(TF(f1_nb), TF(df_nb), msp_amp, cbands) end function LazyNBProportionalBandSpectrum{NO,true}(TBands::Type{<:AbstractProportionalBands{NO}}, f1_nb, df_nb, msp_amp, scaler=1) where {NO} TF = eltype(msp_amp) TAmp = typeof(msp_amp) # We're thinking of each non-zero freqeuncy as being an infinitely thin "bin" with center frequency `f` and spacing `df_nb`. # So to get the lowest non-zero frequency is f1_nb, and the highest is f1_nb + (length(msp_amp)-1)*df_nb. fstart = f1_nb fend = f1_nb + (length(msp_amp)-1)*df_nb cbands = TBands{:center}(fstart, fend, scaler) return LazyNBProportionalBandSpectrum{NO,true,TF,TAmp}(TF(f1_nb), TF(df_nb), msp_amp, cbands) end """ LazyNBProportionalBandSpectrum(TBands::Type{<:AbstractProportionalBands}, f1_nb, df_nb, msp_amp, scaler=1, istonal::Bool=false) Construct a `LazyNBProportionalBandSpectrum` using proportional bands `TBands` and narrowband mean squared pressure amplitude vector `msp_amp` and optional proportional band frequency scaler `scaler`. `f1_nb` is the first non-zero narrowband frequency, and `df_nb` is the narrowband frequency spacing. The `istonal` `Bool` argument, if `true`, indicates the narrowband spectrum is tonal and thus concentrated at discrete frequencies. If `false`, the spectrum is assumed to be constant over each narrow frequency band. The proportional band frequencies will be scaled by `scaler`. """ function LazyNBProportionalBandSpectrum(TBands::Type{<:AbstractProportionalBands{NO}}, f1_nb, df_nb, msp_amp, scaler=1, istonal::Bool=false) where {NO} return LazyNBProportionalBandSpectrum{NO,istonal}(TBands, f1_nb, df_nb, msp_amp, scaler) end """ LazyNBProportionalBandSpectrum(TBands::Type{<:AbstractProportionalBands}, sm::AbstractNarrowbandSpectrum, scaler=1) Construct a `LazyNBProportionalBandSpectrum` using a proportional band `TBands` and narrowband spectrum `sm`, and optional frequency scaler `scaler`. The proportional band frequencies will be scaled by `scaler`. """ function LazyNBProportionalBandSpectrum(TBands::Type{<:AbstractProportionalBands{NO}}, sm::AbstractNarrowbandSpectrum{IsEven,IsTonal}, scaler=1) where {NO,IsEven,IsTonal} msp = MSPSpectrumAmplitude(sm) freq = frequency(msp) f1_nb = freq[begin+1] df_nb = step(freq) # Skip the zero frequency. msp_amp = @view msp[begin+1:end] return LazyNBProportionalBandSpectrum{NO,IsTonal}(TBands, f1_nb, df_nb, msp_amp, scaler) end """ LazyNBProportionalBandSpectrum(sm::AbstractNarrowbandSpectrum, cbands::AbstractProportionalBands{NO,:center}) Construct a `LazyNBProportionalBandSpectrum` using proportional centerbands `cbands` and narrowband spectrum `sm`. The proportional band frequencies will be scaled by `scaler`. """ function LazyNBProportionalBandSpectrum(sm::AbstractNarrowbandSpectrum{IsEven,IsTonal}, cbands::AbstractProportionalBands{NO,:center}) where {NO,IsEven,IsTonal} msp = MSPSpectrumAmplitude(sm) TF = eltype(msp) freq = frequency(msp) f1_nb = TF(freq[begin+1]) df_nb = TF(step(freq)) # Skip the zero frequency. msp_amp = @view msp[begin+1:end] TAmp = typeof(msp_amp) return LazyNBProportionalBandSpectrum{NO,IsTonal,TF,TAmp}(f1_nb, df_nb, msp_amp, cbands) end """ frequency_nb(pbs::LazyNBProportionalBandSpectrum) Return the narrowband frequencies associated with the underlying narrowband spectrum contained in `pbs`. """ frequency_nb(pbs::LazyNBProportionalBandSpectrum) = pbs.f1_nb .+ (0:length(pbs.msp_amp)-1).*pbs.df_nb """ lazy_pbs(pbs, cbands::AbstractProportionalBands{NO,:center}) Construct a lazy proportional band spectrum on proportional center bands `cbands` using the proportional band spectrum `pbs`. """ lazy_pbs function lazy_pbs(pbs::LazyNBProportionalBandSpectrum{NOIn,IsTonal}, cbands::AbstractProportionalBands{NO,:center}) where {NOIn,IsTonal,NO} return LazyNBProportionalBandSpectrum{NO,IsTonal}(pbs.f1_nb, pbs.df_nb, pbs.msp_amp, cbands) end @inline function Base.getindex(pbs::LazyNBProportionalBandSpectrum{NO,false}, i::Int) where {NO} @boundscheck checkbounds(pbs, i) # This is where the fun begins. # So, first I want the lower and upper bands of this band. fl = lower_bands(pbs)[i] fu = upper_bands(pbs)[i] # Now I need to find the starting and ending indices that are included in this frequency band. # Need the narrowband frequencies. # This will not include the zero frequency. f_nb = frequency_nb(pbs) # This is the narrowband frequency spacing. Δf = pbs.df_nb # So, what is the first index we want? # It's the one that has f_nb[i] + 0.5*Δf >= fl. # So that means f_nb[i] >= fl - 0.5*Δf istart = searchsortedfirst(f_nb, fl - 0.5*Δf) # `searchsortedfirst` will return `length(f_nb)+1` it doesn't find anything. # What does that mean? # That means that all the frequencies in the narrowband spectrum are lower # than the band we're looking at. So return 0. if istart == length(f_nb) + 1 return zero(eltype(pbs)) end # What is the last index we want? # It's the last one that has f_nb[i] - 0.5*Δf <= fu # Or f_nb[i] <= fu + 0.5*Δf iend = searchsortedlast(f_nb, fu + 0.5*Δf) if iend == 0 # All the frequencies are lower than the band we're looking for. return zero(eltype(pbs)) end # Need the msp amplitude relavent for this band. # First, get all of the msp amplitudes. msp_amp = pbs.msp_amp # Now get the amplitudes we actually want. msp_amp_v = @view msp_amp[istart:iend] f_nb_v = @view f_nb[istart:iend] # Get the contribution of the first band, which might not be a full band. # So the band will start at fl, the lower edge of the proportional band, and # end at the narrowband center frequency + 0.5*Δf. # This isn't right if the "narrowband" is actually wider than the # proportional bands. If that's the case, then we need to clip it to the proportional band width. band_lhs = max(f_nb_v[1] - 0.5*Δf, fl) band_rhs = min(f_nb_v[1] + 0.5*Δf, fu) res_first_band = msp_amp_v[1]/Δf*(band_rhs - band_lhs) # Get the contribution of the last band, which might not be a full band. if length(msp_amp_v) > 1 band_lhs = max(f_nb_v[end] - 0.5*Δf, fl) band_rhs = min(f_nb_v[end] + 0.5*Δf, fu) res_last_band = msp_amp_v[end]/Δf*(band_rhs - band_lhs) else res_last_band = zero(eltype(pbs)) end # Get all the others and return them. msp_amp_v2 = @view msp_amp_v[2:end-1] return res_first_band + sum(msp_amp_v2) + res_last_band end @inline function Base.getindex(pbs::LazyNBProportionalBandSpectrum{NO,true}, i::Int) where {NO} @boundscheck checkbounds(pbs, i) # This is where the fun begins. # So, first I want the lower and upper bands of this band. fl = lower_bands(pbs)[i] # Arg, numerical problems: lower_bands[i+1] should be the same as upper_bands[i]. # But because of floating point inaccuracies, they can be a tiny bit different. # And that can lead to a "gap" between, say, upper_bands[i] and lower_bands[i+1]. # And then if a tone is right in that gap, we'll miss part of the spectrum. # So, to fix this, always use the lower band values except for the last proportional band (where it won't matter, since that frequency value is only used once, and hence there can't be any gap). if i < length(pbs) fu = lower_bands(pbs)[i+1] else fu = upper_bands(pbs)[i] end # Now I need to find the starting and ending indices that are included in this frequency band. # Need the narrowband frequencies. # This will not include the zero frequency. f_nb = frequency_nb(pbs) # This is the narrowband frequency spacing. Δf = pbs.df_nb # So, what is the first index we want? # It's the one that has f_nb[i] >= fl. istart = searchsortedfirst(f_nb, fl) # `searchsortedfirst` will return `length(f_nb)+1` it doesn't find anything. # What does that mean? # That means that all the frequencies in the narrowband spectrum are lower # than the band we're looking at. So return 0. if istart == length(f_nb) + 1 return zero(eltype(pbs)) end # What is the last index we want? # It's the last one that has f_nb[i] <= fu # iend = searchsortedlast(f_nb, fu) # But we don't want to double-count frequencies, so we actually want f_nb[i] < fu. # Could just do `searchsortedlast(f_nb, fu; lt=<=)`, but this avoids the possibly-slow keyword arguments. iend = searchsortedlast(f_nb, fu, ord(<=, identity, nothing, Forward)) if iend == 0 # All the frequencies are lower than the band we're looking for. return zero(eltype(pbs)) end # Need the msp amplitude relavent for this band. # First, get all of the msp amplitudes. msp_amp = pbs.msp_amp # Now get the amplitudes we actually want. msp_amp_v = @view msp_amp[istart:iend] # Since we're thinking of the narrowband frequency bins as being infinitely thin, they can't partially extend beyond the lower or upper limits of the relevant proportional band. # So we just need to add them up here: return sum(msp_amp_v) end """ ProportionalBandSpectrum{NO,TF,TPBS,TBandsL,TBandsC,TBandsU} Representation of a proportional band spectrum with octave fraction `NO` and `eltype` `TF`. """ struct ProportionalBandSpectrum{NO,TF,TPBS<:AbstractVector{TF},TBandsC<:AbstractProportionalBands{NO,:center}} <: AbstractProportionalBandSpectrum{NO,TF} pbs::TPBS cbands::TBandsC function ProportionalBandSpectrum(pbs, cbands::AbstractProportionalBands{NO,:center}) where {NO} length(pbs) == length(cbands) || throw(ArgumentError("length(pbs) must match length(cbands)")) return new{NO,eltype(pbs),typeof(pbs),typeof(cbands)}(pbs, cbands) end end function lazy_pbs(pbs::ProportionalBandSpectrum, cbands::AbstractProportionalBands{NO,:center}) where {NO} return LazyPBSProportionalBandSpectrum(pbs, cbands) end """ ProportionalBandSpectrum(TBandsC, cfreq_start, pbs, scaler=1) Construct a `ProportionalBandSpectrum` from an array of proportional band amplitudes and proportional band type `TBandsC`. `cfreq_start` is the centerband frequency corresponding to the first entry of `pbs`. The proportional band frequencies indicated by `TBandsC` are multiplied by `scaler`. """ function ProportionalBandSpectrum(TBandsC::Type{<:AbstractProportionalBands{NO,:center}}, cfreq_start, pbs, scaler=1) where {NO} bstart = cband_number(TBandsC, cfreq_start, scaler) bend = bstart + length(pbs) - 1 cbands = TBandsC(bstart, bend, scaler) return ProportionalBandSpectrum(pbs, cbands) end """ ProportionalBandSpectrumWithTime{NO,TF,TPBS,TBandsC,TTime,TDTime} Representation of a proportional band spectrum with octave fraction `NO` and `eltype` `TF`, but with an observer time. """ struct ProportionalBandSpectrumWithTime{NO,TF,TPBS<:AbstractVector{TF},TBandsC<:AbstractProportionalBands{NO,:center},TDTime,TTime} <: AbstractProportionalBandSpectrum{NO,TF} pbs::TPBS cbands::TBandsC dt::TDTime t::TTime @doc """ ProportionalBandSpectrumWithTime(pbs, cbands::AbstractProportionalBands{NO,:center}, dt, t) Construct a proportional band spectrum from mean-squared pressure amplitudes `pbs` and centerband frequencies `cbands`, defined to exist over time range `dt` and at observer time `t`. """ function ProportionalBandSpectrumWithTime(pbs, cbands::AbstractProportionalBands{NO,:center}, dt, t) where {NO} length(pbs) == length(cbands) || throw(ArgumentError("length(pbs) must match length(cbands)")) dt > zero(dt) || throw(ArgumentError("dt must be positive")) return new{NO,eltype(pbs),typeof(pbs),typeof(cbands),typeof(dt),typeof(t)}(pbs, cbands, dt, t) end end @inline has_observer_time(pbs::ProportionalBandSpectrumWithTime) = true @inline observer_time(pbs::ProportionalBandSpectrumWithTime) = pbs.t @inline timestep(pbs::ProportionalBandSpectrumWithTime{NO,TF}) where {NO,TF} = pbs.dt @inline time_scaler(pbs::ProportionalBandSpectrumWithTime, period) = timestep(pbs)/period function lazy_pbs(pbs::ProportionalBandSpectrumWithTime, cbands::AbstractProportionalBands{NO,:center}) where {NO} return LazyPBSProportionalBandSpectrum(pbs, cbands) end """ LazyPBSProportionalBandSpectrum{NO,TF} <: AbstractProportionalBandSpectrum{NO,TF} Lazy representation of a proportional band spectrum with octave fraction `NO` and `eltype` `TF` constructed from a different proportional band spectrum. """ struct LazyPBSProportionalBandSpectrum{NO,TF,TPBS<:AbstractProportionalBandSpectrum,TBandsC<:AbstractProportionalBands{NO,:center}} <: AbstractProportionalBandSpectrum{NO,TF} pbs::TPBS cbands::TBandsC function LazyPBSProportionalBandSpectrum(pbs::AbstractProportionalBandSpectrum{NOIn,TF}, cbands::AbstractProportionalBands{NO,:center}) where {NO,TF,NOIn} return new{NO,TF,typeof(pbs),typeof(cbands)}(pbs, cbands) end end function LazyPBSProportionalBandSpectrum(TBands::Type{<:AbstractProportionalBands{NO}}, pbs::AbstractProportionalBandSpectrum, scaler=1) where {NO} # First, get the minimum and maximum frequencies associated with the input pbs. fstart = lower_bands(pbs)[begin] fend = upper_bands(pbs)[end] # Now use those frequencies to construct some centerbands. cbands = TBands{:center}(fstart, fend, scaler) # Now we can create the object. return LazyPBSProportionalBandSpectrum(pbs, cbands) end @inline has_observer_time(pbs::LazyPBSProportionalBandSpectrum) = has_observer_time(pbs.pbs) @inline observer_time(pbs::LazyPBSProportionalBandSpectrum) = observer_time(pbs.pbs) @inline timestep(pbs::LazyPBSProportionalBandSpectrum) = timestep(pbs.pbs) @inline time_scaler(pbs::LazyPBSProportionalBandSpectrum, period) = time_scaler(pbs.pbs, period) function lazy_pbs(pbs::LazyPBSProportionalBandSpectrum, cbands::AbstractProportionalBands{NO,:center}) where {NO} return LazyPBSProportionalBandSpectrum(pbs.pbs, cbands) end @inline function Base.getindex(pbs::LazyPBSProportionalBandSpectrum, i::Int) @boundscheck checkbounds(pbs, i) # So, first I want the lower and upper bands of this output band. fol = lower_bands(pbs)[i] fou = upper_bands(pbs)[i] # Get the underlying pbs. pbs_in = pbs.pbs # Get the lower and upper edges of the input band's spectrum. inbands_lower = lower_bands(pbs_in) inbands_upper = upper_bands(pbs_in) # So now I have the boundaries of the frequencies I'm interested in in `fol` and `fou`. # What I'm looking for now is: # # * the first input band whose upper edge is greater than `fol` # * the last input band whose lower edge is less than `fou`. # # So, for the first input band whose upper edge is greater than `fol`, I should be able to do this: istart = searchsortedfirst(inbands_upper, fol) # For that, what if # # * All of `inbands_upper` are less than `fol`? # That would mean all of the `inband` frequencies are lower than and outside the current `outband`. # Then the docs for `searchsortedfirst` say that it will return `length(inbands_upper)+1`. # So if I started a view of the data from that index, it would obviously be empty, which is what I'd want. # * All of the `inbands_upper` are greater than `fol`? # Not necessarily a problem, unless, I guess, the lowest of `inbands_lower` is *also* greater than `fou`. # Then the entire input spectrum would be larger than this band. # But `searchsortedfirst` should just return `1`, and hopefully that would be the right thing. # Now I want the last input band whose lower edge is less than `fou`. # I should be able to get that from iend = searchsortedlast(inbands_lower, fou) # For that, what if # # * All of the `inbands_lower` are greater than `fou`? # That would mean all of the `inband` frequencies are greater than and outside the current `outband`. # The docs indicate `searchsortedlast` would return `firstindex(inbands_lower)-1` for that case, i.e. `0`. # That's what I'd want, I think. # * All of the `inbands_lower` are lower than `fou`? # Not necessarily a problem, unless the highest of `inbands_upper` are also lower than `fou`, which would mean the entire input spectrum is lower than this output band. # Now I have the first and last input bands relevant to this output band, and so I can start adding up the input PBS's contributions to this output band. pbs_out = zero(eltype(pbs)) # First, we need to check that there's something to do: if (istart <= lastindex(pbs_in)) && (iend >= firstindex(pbs_in)) # First, get the bandwidth of the first input band associated with this output band. fil_start = inbands_lower[istart] fiu_start = inbands_upper[istart] dfin_start = fiu_start - fil_start # Next, need to get the frequency overlap of the first input band and this output band. # For the lower edge of the overlap, it will usually be `fol`, unless there's a gap where `inbands_lower[istart]` is greater than `fol`. foverlapl_start = max(fol, fil_start) # For the upper edge of the overlap, it will usually be `fiu_start`, unless there's a gap where `inbands_upper[istart]` is less than `fou`. foverlapu_start = min(fou, fiu_start) # Now get the first band's contribution to the PBS. pbs_out += pbs_in[istart]/dfin_start*(foverlapu_start - foverlapl_start) # Now, think about the last band's contribution to the PBS. # First, we need to check if the first and last band are identicial, which would indicate that there's only one input band in this output band. if iend > istart # Now need to get the bandwidth associated with this input band. fil_end = inbands_lower[iend] fiu_end = inbands_upper[iend] dfin_end = fiu_end - fil_end # Next, need to get the frequency overlap of the last input band and this output band. foverlapl_end = max(fol, fil_end) foverlapu_end = min(fou, fiu_end) # Now we can get the last band's contribution to the PBS. pbs_out += pbs_in[iend]/dfin_end*(foverlapu_end - foverlapl_end) # Now we need the contribution of the input bands between `istart+1` and `iend-1`, inclusive. # Don't need to worry about incomplete overlap of the bands since these are "inside" this output band, so we can just directly sum them. pbs_in_v = @view pbs_in[istart+1:iend-1] pbs_out += sum(pbs_in_v) end end return pbs_out end """ combine(pbs::AbstractArray{<:AbstractProportionalBandSpectrum}, outcbands::AbstractProportionalBands{NO,:center}, time_axis=1) where {NO} Combine each input proportional band spectrum of `pbs` into one output proportional band spectrum using the proportional center bands indicated by `outcbands`. `time_axis` is an integer indicating the axis of the `pbs` array along which time varies. For example, if `time_axis == 1` and `pbs` is a three-dimensional array, then `apth[:, i, j]` would be proportional band spectrum of source `i`, `j` for all time. But if `time_axis == 3`, then `pbs[i, j, :]` would be the proportional band spectrum of source `i`, `j` for all time. """ function combine(pbs::AbstractArray{<:AbstractProportionalBandSpectrum}, outcbands::AbstractProportionalBands{NO,:center}, time_axis=1) where {NO} # Create the vector that will contain the new PBS. # An <:AbstractProportionalBandSpectrum is <:AbstractVector{TF}, so AbstractArray{<:AbstractProportionalBandSpectrum,N} is actually an Array of AbstractVectors. # So `eltype(eltype(pbs))` should give me the element type of the PBS. TFOut = promote_type(eltype(eltype(pbs)), eltype(outcbands)) pbs_out = zeros(TFOut, length(outcbands)) dims_in = axes(pbs) ndims_in = ndims(pbs) alldims_in = 1:ndims_in otherdims = setdiff(alldims_in, time_axis) itershape = tuple(dims_in[otherdims]...) # Create an array we'll use to index pbs_in, with a `Colon()` for the time_axis position and integers of the first value for all the others. idx = [ifelse(d==time_axis, Colon(), first(ind)) for (d, ind) in enumerate(axes(pbs))] nidx = length(otherdims) indices = CartesianIndices(itershape) # Loop through the indices. for I in indices for i in 1:nidx idx[otherdims[i]] = I.I[i] end # Grab all the elements associated with this time. pbs_v = @view pbs[idx...] # Now add this element's contribution to pbs_out. _combine!(pbs_out, pbs_v, outcbands) end return ProportionalBandSpectrum(pbs_out, outcbands) end function _combine!(pbs_out::AbstractVector, pbs::AbstractVector{<:AbstractProportionalBandSpectrum}, outcbands::AbstractProportionalBands{NO,:center}) where {NO} # Get the time period for this collection of PBSs. period = time_period(pbs) # Now start looping over each input PBS. for pbs_in in pbs # Get the time scaler associated with this particular input PBS. scaler = time_scaler(pbs_in, period) # Create a lazy version of the input proportional band spectrum using the output center bands. pbs_in_lazy = lazy_pbs(pbs_in, outcbands) # Now add this element's contribution to output pbs. pbs_out .+= pbs_in_lazy .* scaler end return nothing end
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
866
""" W_A(f::AbstractFloat) Calculate the A-weighting factor for a frequency `f` in Hertz. Taken from the ANOPP2 Acoustics Analysis API Reference Manual. """ function W_A(f) f_1 = 20.598997 f_2 = 107.65265 f_3 = 737.86233 f_4 = 12194.217 f_5 = 158.48932 f_6 = 79919.29 f_7 = 1345600.0 f_8 = 1037918.48 f_9 = 9837328.0 K_1 = 2.242881e16 K_2 = 1.025119 K_3 = 1.562339 K_4 = 14500.0 K_5 = 1080768.18 K_6 = 11723776.0 W_C = (K_1*f^4) / ((f^2 + f_1^2)^2*(f^2 + f_4^2)^2) w_a = (K_3*f^4*W_C) / ((f^2 + f_2^2)*(f^2 + f_3^2)) return w_a end # """ # W_A(nbs::AbstractNarrowbandSpectrum) # A-weight and return the amplitudes of `nbs`. # """ # function W_A(nbs::AbstractNarrowbandSpectrum) # freq = frequency(nbs) # amp = amplitude(nbs) # amp .*= W_A.(freq) # return amp # end
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
3308
using OffsetArrays: OffsetArray """ dft_r2hc(x::AbstractVector) Calculate the real-input discrete Fourier transform, returning the result in the "half-complex" format. See http://www.fftw.org/fftw3_doc/The-1d-Real_002ddata-DFT.html#The-1d-Real_002ddata-DFT and http://www.fftw.org/fftw3_doc/The-Halfcomplex_002dformat-DFT.html for details. Only use this for checking the derivatives of the FFT routines (should work fine, just slow). """ function dft_r2hc(x::AbstractVector) # http://www.fftw.org/fftw3_doc/The-1d-Real_002ddata-DFT.html#The-1d-Real_002ddata-DFT # http://www.fftw.org/fftw3_doc/The-Halfcomplex_002dformat-DFT.html # So # # * we don't need the imaginary part of y_0 (which will be the first element in y, say, i=1) # * if n is even, we don't need the imaginary part of y_{n/2} (which would be i = n/2+1) # # Now, the order is supposed to be like this (r for real, i for imaginary): # # * r_0, r_1, r_2, r_{n/2}, i_{(n+1)/2-1}, ..., i_2, i_1 # # But the docs say that they're still using the same old formula, which is: # # Y_k = Σ_{j=0}^{n-1} X_j exp(-2*π*i*j*k/n) # # (where i is sqrt(-1)). n = length(x) xo = OffsetArray(x, 0:n-1) y = similar(x) yo = OffsetArray(y, 0:n-1) # Let's do k = 0 first. yo[0] = sum(xo) # Now go from k = 1 to n/2 for the real parts. T = eltype(x) for k in 1:n÷2 yo[k] = zero(T) for j in 0:n-1 # yo[k] += xo[j]*exp(-2*pi*sqrt(-1)*j*k/n) yo[k] += xo[j]*cos(-2*pi*j*k/n) end end # Now go from 1 to (n+1)/2-1 for the imaginary parts. for k in 1:(n+1)÷2-1 yo[n-k] = zero(T) for j in 0:n-1 yo[n-k] += xo[j]*sin(-2*pi*j*k/n) end end return y end """ dft_hc2r(x::AbstractVector) Calculate the inverse discrete Fourier transform of a real-input DFT. This is the inverse of `dft_r2hc`, except for a factor of `N`, where `N` is the length of the input (and output), since FFTW computes an "unnormalized" FFT. See http://www.fftw.org/fftw3_doc/The-1d-Real_002ddata-DFT.html#The-1d-Real_002ddata-DFT and http://www.fftw.org/fftw3_doc/The-Halfcomplex_002dformat-DFT.html for details. Only use this for checking the derivatives of the FFT routines (should work fine, just slow). """ function dft_hc2r(x::AbstractVector) n = length(x) xo = OffsetArray(x, 0:n-1) y = zero(x) yo = OffsetArray(y, 0:n-1) j = 0 for k in 0:n-1 yo[k] += xo[j] end # So, I need this loop to get r_1 to r_{n÷2} and i_{(n+1)÷2-1} to i_1. # Let's say n is even. # Maybe 8. # So then n÷2 == 4 and (n+1)÷2-1 == 3. # So x0 looks like this: # # r_0, r_1, r_2, r_3, r_4, i_3, i_2, i_1 # # If n is odd, say, 9, then n÷2 == 4 and (n+1)÷2-1 == 4, and x0 looks like this: # # r_0, r_1, r_2, r_3, r_4, i_4, i_3, i_2, i_1 # for j in 1:(n-1)÷2 rj = xo[j] ij = xo[n-j] for k in 0:n-1 yo[k] += 2*rj*cos(2*pi*j*k/n) - 2*ij*sin(2*pi*j*k/n) end end if iseven(n) # Handle the Nyquist frequency. j = n÷2 rj = xo[j] for k in 0:n-1 yo[k] += rj*cos(2*pi*j*k/n) end end return y end
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
255949
using AcousticMetrics: p_ref using AcousticMetrics: r2rfftfreq, rfft, rfft!, irfft, irfft!, RFFTCache using AcousticMetrics: PressureTimeHistory using AcousticMetrics: PressureSpectrumAmplitude, PressureSpectrumPhase, MSPSpectrumAmplitude, MSPSpectrumPhase, PowerSpectralDensityAmplitude, PowerSpectralDensityPhase using AcousticMetrics: starttime, timestep, frequencystep, time, pressure, frequency, halfcomplex, OASPL, istonal using AcousticMetrics: octave_fraction, band_start, band_end, cband_number using AcousticMetrics: AbstractProportionalBands using AcousticMetrics: ExactOctaveCenterBands, ExactOctaveLowerBands, ExactOctaveUpperBands using AcousticMetrics: ExactThirdOctaveCenterBands, ExactThirdOctaveLowerBands, ExactThirdOctaveUpperBands using AcousticMetrics: ExactProportionalBands, lower_bands, center_bands, upper_bands using AcousticMetrics: AbstractProportionalBandSpectrum using AcousticMetrics: LazyNBProportionalBandSpectrum, ProportionalBandSpectrum using AcousticMetrics: ApproximateOctaveBands, ApproximateOctaveCenterBands, ApproximateOctaveLowerBands, ApproximateOctaveUpperBands using AcousticMetrics: ApproximateThirdOctaveBands, ApproximateThirdOctaveCenterBands, ApproximateThirdOctaveLowerBands, ApproximateThirdOctaveUpperBands using AcousticMetrics: combine using AcousticMetrics: freq_scaler, time_period, time_scaler, has_observer_time, observer_time using AcousticMetrics: ProportionalBandSpectrumWithTime using AcousticMetrics: LazyPBSProportionalBandSpectrum, frequency_nb using AcousticMetrics: W_A using ForwardDiff using JLD2 using Polynomials: Polynomials using Random using Test include("dfts.jl") include(joinpath(@__DIR__, "gen_anopp2_data", "test_functions.jl")) @testset "Fourier transforms" begin @testset "FFTW compared to a function with a known Fourier transform" begin for T in [1.0, 2.0] f(t) = 6 + 8*cos(1*2*pi/T*t + 0.2) + 2.5*cos(2*2*pi/T*t - 3.0) + 9*cos(3*2*pi/T*t + 3.1) + 0.5*cos(4*2*pi/T*t - 1.1) + 3*cos(5*2*pi/T*t + 0.2) for n in [10, 11] dt = T/n t = (0:n-1).*dt p = f.(t) p_fft = rfft(p)./n p_fft_expected = similar(p_fft) p_fft_expected[1] = 6.0 p_fft_expected[2] = 0.5*8*cos(0.2) p_fft_expected[end] = 0.5*8*sin(0.2) p_fft_expected[3] = 0.5*2.5*cos(-3.0) p_fft_expected[end-1] = 0.5*2.5*sin(-3.0) p_fft_expected[4] = 0.5*9*cos(3.1) p_fft_expected[end-2] = 0.5*9*sin(3.1) p_fft_expected[5] = 0.5*0.5*cos(-1.1) p_fft_expected[end-3] = 0.5*0.5*sin(-1.1) if n == 10 p_fft_expected[6] = 3*cos(0.2) else p_fft_expected[6] = 0.5*3*cos(0.2) p_fft_expected[end-4] = 0.5*3*sin(0.2) end @test all(isapprox.(p_fft, p_fft_expected, atol=1e-12)) end end end @testset "FFTW vs ad-hoc discrete Fourier transform" begin # Should check both even- and odd-length inputs, since the definition of the # discrete Fourier transform output depends slightly on that. for n in [64, 65] x = rand(n) y_fft = similar(x) rfft!(y_fft, x) y_dft = dft_r2hc(x) @test all(y_dft .≈ y_fft) y_ifft = similar(x) irfft!(y_ifft, x) y_idft = dft_hc2r(x) @test all(y_idft .≈ y_ifft) end end # Now check derivatives. @testset "FFTW derivatives" begin @testset "basic" begin for n in [64, 65] x = rand(n) y = similar(x) dy_dx_fft = ForwardDiff.jacobian(rfft!, y, x) dy_dx_dft = ForwardDiff.jacobian(dft_r2hc, x) @test all(isapprox.(dy_dx_fft, dy_dx_dft, atol=1e-13)) dy_dx_ifft = ForwardDiff.jacobian(irfft!, y, x) dy_dx_idft = ForwardDiff.jacobian(dft_hc2r, x) @test all(isapprox.(dy_dx_ifft, dy_dx_idft, atol=1e-13)) end end @testset "as intermediate function" begin function f1_fft(t) x = range(t[begin], t[end], length=8) x = @. 2*x^2 + 3*x + 5 y = similar(x) rfft!(y, x) return y end function f1_dft(t) x = range(t[begin], t[end], length=8) x = @. 2*x^2 + 3*x + 5 y = dft_r2hc(x) return y end dy_dx_fft = ForwardDiff.jacobian(f1_fft, [1.1, 3.5]) dy_dx_dft = ForwardDiff.jacobian(f1_dft, [1.1, 3.5]) @test all(isapprox.(dy_dx_fft, dy_dx_dft, atol=1e-13)) function f1_ifft(t) x = range(t[begin], t[end], length=8) x = @. 2*x^2 + 3*x + 5 y = similar(x) irfft!(y, x) return y end function f1_idft(t) x = range(t[begin], t[end], length=8) x = @. 2*x^2 + 3*x + 5 y = dft_hc2r(x) return y end dy_dx_ifft = ForwardDiff.jacobian(f1_ifft, [1.1, 3.5]) dy_dx_idft = ForwardDiff.jacobian(f1_idft, [1.1, 3.5]) @test all(isapprox.(dy_dx_ifft, dy_dx_idft, atol=1e-13)) end @testset "with user-supplied cache" begin nx = 8 nt = 5 cache = RFFTCache(Float64, nx, nt) function f2_fft(t) xstart = sum(t) xend = xstart + 2 x = range(xstart, xend, length=nx) x = @. 2*x^2 + 3*x + 5 y = similar(x) rfft!(y, x, cache) return y end function f2_dft(t) xstart = sum(t) xend = xstart + 2 x = range(xstart, xend, length=nx) x = @. 2*x^2 + 3*x + 5 y = dft_r2hc(x) return y end t = rand(nt) dy_dx_fft = ForwardDiff.jacobian(f2_fft, t) dy_dx_dft = ForwardDiff.jacobian(f2_dft, t) @test all(isapprox.(dy_dx_fft, dy_dx_dft, atol=1e-13)) function f2_ifft(t) xstart = sum(t) xend = xstart + 2 x = range(xstart, xend, length=nx) x = @. 2*x^2 + 3*x + 5 y = similar(x) irfft!(y, x, cache) return y end function f2_idft(t) xstart = sum(t) xend = xstart + 2 x = range(xstart, xend, length=nx) x = @. 2*x^2 + 3*x + 5 y = dft_hc2r(x) return y end t = rand(nt) dy_dx_ifft = ForwardDiff.jacobian(f2_ifft, t) dy_dx_idft = ForwardDiff.jacobian(f2_idft, t) @test all(isapprox.(dy_dx_ifft, dy_dx_idft, atol=1e-13)) end end end @testset "Pressure Spectrum" begin @testset "t0 == 0" begin for T in [1.0, 2.0] f(t) = 6 + 8*cos(1*2*pi/T*t + 0.2) + 2.5*cos(2*2*pi/T*t - 3.0) + 9*cos(3*2*pi/T*t + 3.1) + 0.5*cos(4*2*pi/T*t - 1.1) + 3*cos(5*2*pi/T*t + 0.2) for n in [10, 11] dt = T/n t = (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt) @test all(isapprox.(time(ap), t)) @test timestep(ap) ≈ dt @test starttime(ap) ≈ 0.0 amp = PressureSpectrumAmplitude(ap) phase = PressureSpectrumPhase(ap) freq_expected = [0.0, 1/T, 2/T, 3/T, 4/T, 5/T] amp_expected = similar(amp) amp_expected[1] = 6 amp_expected[2] = 8 amp_expected[3] = 2.5 amp_expected[4] = 9 amp_expected[5] = 0.5 phase_expected = similar(phase) phase_expected[1] = 0 phase_expected[2] = 0.2 phase_expected[3] = -3 phase_expected[4] = 3.1 phase_expected[5] = -1.1 # Handle the Nyquist frequency (kinda tricky). There isn't really a # Nyquist frequency for the odd input length case. if n == 10 amp_expected[6] = 3*cos(0.2) phase_expected[6] = 0 else amp_expected[6] = 3 phase_expected[6] = 0.2 end @test all(isapprox.(frequency(amp), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase), freq_expected; atol=1e-12)) @test all(isapprox.(amp, amp_expected; atol=1e-12)) @test all(isapprox.(phase.*amp, phase_expected.*amp_expected; atol=1e-12)) @test timestep(amp) ≈ dt @test timestep(phase) ≈ dt @test starttime(amp) ≈ 0.0 @test starttime(phase) ≈ 0.0 @test frequencystep(amp) ≈ freq_expected[2] @test frequencystep(phase) ≈ freq_expected[2] @test istonal(amp) == false @test istonal(phase) == false # Make sure I can go from a PressureSpectrum to an PressureTimeHistory. ap_from_ps = PressureTimeHistory(amp) @test timestep(ap_from_ps) ≈ timestep(ap) @test starttime(ap_from_ps) ≈ starttime(ap) @test all(isapprox.(pressure(ap_from_ps), pressure(ap))) # Create a tonal version of the same spectrum. # Nothing should be any different except the `IsTonal` parameter. amp_tonal = PressureSpectrumAmplitude(halfcomplex(amp), timestep(amp), starttime(amp), true) phase_tonal = PressureSpectrumPhase(halfcomplex(phase), timestep(phase), starttime(phase), true) @test all(isapprox.(frequency(amp_tonal), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase_tonal), freq_expected; atol=1e-12)) @test all(isapprox.(amp_tonal, amp_expected; atol=1e-12)) @test all(isapprox.(phase_tonal.*amp_tonal, phase_expected.*amp_expected; atol=1e-12)) @test timestep(amp_tonal) ≈ dt @test timestep(phase_tonal) ≈ dt @test starttime(amp_tonal) ≈ 0.0 @test starttime(phase_tonal) ≈ 0.0 @test frequencystep(amp_tonal) ≈ freq_expected[2] @test frequencystep(phase_tonal) ≈ freq_expected[2] @test istonal(amp_tonal) == true @test istonal(phase_tonal) == true end end @testset "negative amplitudes" begin for T in [1.0, 2.0] f(t) = -6 - 8*cos(1*2*pi/T*t + 0.2) - 2.5*cos(2*2*pi/T*t - 3.0) + 9*cos(3*2*pi/T*t + 3.1) + 0.5*cos(4*2*pi/T*t - 1.1) + 3*cos(5*2*pi/T*t + 0.2) for n in [10, 11] dt = T/n t = (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt) @test all(isapprox.(time(ap), t)) @test timestep(ap) ≈ dt @test starttime(ap) ≈ 0.0 amp = PressureSpectrumAmplitude(ap) phase = PressureSpectrumPhase(ap) freq_expected = [0.0, 1/T, 2/T, 3/T, 4/T, 5/T] amp_expected = similar(amp) amp_expected[1] = 6 amp_expected[2] = 8 amp_expected[3] = 2.5 amp_expected[4] = 9 amp_expected[5] = 0.5 phase_expected = similar(phase) phase_expected[1] = pi phase_expected[2] = -pi + 0.2 phase_expected[3] = pi - 3 phase_expected[4] = 3.1 phase_expected[5] = -1.1 # Handle the Nyquist frequency (kinda tricky). There isn't really a # Nyquist frequency for the odd input length case. if n == 10 amp_expected[6] = 3*cos(0.2) phase_expected[6] = 0 else amp_expected[6] = 3 phase_expected[6] = 0.2 end @test all(isapprox.(frequency(amp), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase), freq_expected; atol=1e-12)) @test all(isapprox.(amp, amp_expected; atol=1e-12)) @test all(isapprox.(phase.*amp, phase_expected.*amp_expected; atol=1e-12)) @test timestep(amp) ≈ dt @test timestep(phase) ≈ dt @test starttime(amp) ≈ 0.0 @test starttime(phase) ≈ 0.0 @test frequencystep(amp) ≈ freq_expected[2] @test frequencystep(phase) ≈ freq_expected[2] @test istonal(amp) == false @test istonal(phase) == false # Make sure I can go from a PressureSpectrum to an PressureTimeHistory. ap_from_ps = PressureTimeHistory(amp) @test timestep(ap_from_ps) ≈ timestep(ap) @test starttime(ap_from_ps) ≈ starttime(ap) @test all(isapprox.(pressure(ap_from_ps), pressure(ap))) # Create a tonal version of the same spectrum. # Nothing should be any different except the `IsTonal` parameter. amp_tonal = PressureSpectrumAmplitude(halfcomplex(amp), timestep(amp), starttime(amp), true) phase_tonal = PressureSpectrumPhase(halfcomplex(phase), timestep(phase), starttime(phase), true) @test all(isapprox.(frequency(amp_tonal), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase_tonal), freq_expected; atol=1e-12)) @test all(isapprox.(amp_tonal, amp_expected; atol=1e-12)) @test all(isapprox.(phase_tonal.*amp_tonal, phase_expected.*amp_expected; atol=1e-12)) @test timestep(amp_tonal) ≈ dt @test timestep(phase_tonal) ≈ dt @test starttime(amp_tonal) ≈ 0.0 @test starttime(phase_tonal) ≈ 0.0 @test frequencystep(amp_tonal) ≈ freq_expected[2] @test frequencystep(phase_tonal) ≈ freq_expected[2] @test istonal(amp_tonal) == true @test istonal(phase_tonal) == true end end end end @testset "t0 !== 0" begin for T in [1.0, 2.0] t0 = 0.13 f(t) = 6 + 8*cos(1*2*pi/T*t + 0.2) + 2.5*cos(2*2*pi/T*t - 3.0) + 9*cos(3*2*pi/T*t + 3.1) + 0.5*cos(4*2*pi/T*t - 1.1) + 3*cos(5*2*pi/T*t + 0.2) for n in [10, 11] dt = T/n t = t0 .+ (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt, t0) @test all(isapprox.(time(ap), t)) @test timestep(ap) ≈ dt @test starttime(ap) ≈ t0 amp = PressureSpectrumAmplitude(ap) phase = PressureSpectrumPhase(ap) freq_expected = [0.0, 1/T, 2/T, 3/T, 4/T, 5/T] amp_expected = similar(amp) amp_expected[1] = 6 amp_expected[2] = 8 amp_expected[3] = 2.5 amp_expected[4] = 9 amp_expected[5] = 0.5 phase_expected = similar(phase) phase_expected[1] = 0 phase_expected[2] = 0.2 phase_expected[3] = -3 phase_expected[4] = 3.1 phase_expected[5] = -1.1 # Handle the Nyquist frequency (kinda tricky). There isn't really a # Nyquist frequency for the odd input length case. if n == 10 amp_expected[6] = abs(3*cos(5*2*pi/T*t0 + 0.2)) phase_expected[6] = rem2pi(pi - 5*2*pi/T*t0, RoundNearest) else amp_expected[6] = 3 phase_expected[6] = 0.2 end @test all(isapprox.(frequency(amp), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase), freq_expected; atol=1e-12)) @test all(isapprox.(amp, amp_expected; atol=1e-12)) @test all(isapprox.(phase, phase_expected; atol=1e-12)) @test timestep(amp) ≈ dt @test timestep(phase) ≈ dt @test starttime(amp) ≈ t0 @test starttime(phase) ≈ t0 @test frequencystep(amp) ≈ freq_expected[2] @test frequencystep(phase) ≈ freq_expected[2] @test istonal(amp) == false @test istonal(phase) == false # Make sure I can go from a PressureSpectrum to an PressureTimeHistory. ap_from_ps = PressureTimeHistory(amp) @test timestep(ap_from_ps) ≈ timestep(ap) @test starttime(ap_from_ps) ≈ starttime(ap) @test all(isapprox.(pressure(ap_from_ps), pressure(ap))) # Create a tonal version of the same spectrum. # Nothing should be any different except the `IsTonal` parameter. amp_tonal = PressureSpectrumAmplitude(halfcomplex(amp), timestep(amp), starttime(amp), true) phase_tonal = PressureSpectrumPhase(halfcomplex(phase), timestep(phase), starttime(phase), true) @test all(isapprox.(frequency(amp_tonal), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase_tonal), freq_expected; atol=1e-12)) @test all(isapprox.(amp_tonal, amp_expected; atol=1e-12)) @test all(isapprox.(phase_tonal.*amp_tonal, phase_expected.*amp_expected; atol=1e-12)) @test timestep(amp_tonal) ≈ dt @test timestep(phase_tonal) ≈ dt @test starttime(amp_tonal) ≈ t0 @test starttime(phase_tonal) ≈ t0 @test frequencystep(amp_tonal) ≈ freq_expected[2] @test frequencystep(phase_tonal) ≈ freq_expected[2] @test istonal(amp_tonal) == true @test istonal(phase_tonal) == true end end end end @testset "Mean-squared Pressure Spectrum" begin @testset "t0 == 0" begin for T in [1.0, 2.0] f(t) = 6 + 8*cos(1*2*pi/T*t + 0.2) + 2.5*cos(2*2*pi/T*t - 3.0) + 9*cos(3*2*pi/T*t + 3.1) + 0.5*cos(4*2*pi/T*t - 1.1) + 3*cos(5*2*pi/T*t + 0.2) for n in [10, 11] dt = T/n t = (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt) @test all(isapprox.(time(ap), t)) @test timestep(ap) ≈ dt @test starttime(ap) ≈ 0.0 amp = MSPSpectrumAmplitude(ap) phase = MSPSpectrumPhase(ap) freq_expected = [0.0, 1/T, 2/T, 3/T, 4/T, 5/T] amp_expected = similar(amp) amp_expected[1] = 6^2 amp_expected[2] = 0.5*8^2 amp_expected[3] = 0.5*2.5^2 amp_expected[4] = 0.5*9^2 amp_expected[5] = 0.5*0.5^2 phase_expected = similar(phase) phase_expected[1] = 0 phase_expected[2] = 0.2 phase_expected[3] = -3 phase_expected[4] = 3.1 phase_expected[5] = -1.1 # Handle the Nyquist frequency (kinda tricky). There isn't really a # Nyquist frequency for the odd input length case. if n == 10 amp_expected[6] = (3*cos(0.2))^2 phase_expected[6] = 0 else amp_expected[6] = 0.5*3^2 phase_expected[6] = 0.2 end @test all(isapprox.(frequency(amp), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase), freq_expected; atol=1e-12)) @test all(isapprox.(amp, amp_expected; atol=1e-12)) @test all(isapprox.(phase.*amp, phase_expected.*amp_expected; atol=1e-12)) @test timestep(amp) ≈ dt @test timestep(phase) ≈ dt @test starttime(amp) ≈ 0.0 @test starttime(phase) ≈ 0.0 @test frequencystep(amp) ≈ freq_expected[2] @test frequencystep(phase) ≈ freq_expected[2] @test istonal(amp) == false @test istonal(phase) == false # Make sure I can convert a mean-squared pressure to a pressure spectrum. psamp = PressureSpectrumAmplitude(amp) psamp_expected = similar(amp) psamp_expected[1] = 6 psamp_expected[2] = 8 psamp_expected[3] = 2.5 psamp_expected[4] = 9 psamp_expected[5] = 0.5 # Handle the Nyquist frequency (kinda tricky). There isn't really a # Nyquist frequency for the odd input length case. if n == 10 psamp_expected[6] = 3*cos(0.2) else psamp_expected[6] = 3 end @test all(isapprox.(frequency(psamp), freq_expected; atol=1e-12)) @test all(isapprox.(psamp, psamp_expected; atol=1e-12)) # Make sure I can convert a mean-squared pressure to the acoustic pressure. ap_from_msp = PressureTimeHistory(amp) @test timestep(ap_from_msp) ≈ timestep(ap) @test starttime(ap_from_msp) ≈ starttime(ap) @test all(isapprox.(pressure(ap_from_msp), pressure(ap))) # Create a tonal version of the same spectrum. # Nothing should be any different except the `IsTonal` parameter. amp_tonal = MSPSpectrumAmplitude(halfcomplex(amp), timestep(amp), starttime(amp), true) phase_tonal = MSPSpectrumPhase(halfcomplex(phase), timestep(phase), starttime(phase), true) @test all(isapprox.(frequency(amp_tonal), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase_tonal), freq_expected; atol=1e-12)) @test all(isapprox.(amp_tonal, amp_expected; atol=1e-12)) @test all(isapprox.(phase_tonal.*amp_tonal, phase_expected.*amp_expected; atol=1e-12)) @test timestep(amp_tonal) ≈ dt @test timestep(phase_tonal) ≈ dt @test starttime(amp_tonal) ≈ 0.0 @test starttime(phase_tonal) ≈ 0.0 @test frequencystep(amp_tonal) ≈ freq_expected[2] @test frequencystep(phase_tonal) ≈ freq_expected[2] @test istonal(amp_tonal) == true @test istonal(phase_tonal) == true end end end @testset "t0 !== 0" begin for T in [1.0, 2.0] t0 = 0.13 f(t) = 6 + 8*cos(1*2*pi/T*t + 0.2) + 2.5*cos(2*2*pi/T*t - 3.0) + 9*cos(3*2*pi/T*t + 3.1) + 0.5*cos(4*2*pi/T*t - 1.1) + 3*cos(5*2*pi/T*t + 0.2) for n in [10, 11] dt = T/n t = t0 .+ (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt, t0) @test all(isapprox.(time(ap), t)) @test timestep(ap) ≈ dt @test starttime(ap) ≈ t0 amp = MSPSpectrumAmplitude(ap) phase = MSPSpectrumPhase(ap) freq_expected = [0.0, 1/T, 2/T, 3/T, 4/T, 5/T] amp_expected = similar(amp) amp_expected[1] = 6^2 amp_expected[2] = 0.5*8^2 amp_expected[3] = 0.5*2.5^2 amp_expected[4] = 0.5*9^2 amp_expected[5] = 0.5*0.5^2 phase_expected = similar(phase) phase_expected[1] = 0 phase_expected[2] = 0.2 phase_expected[3] = -3 phase_expected[4] = 3.1 phase_expected[5] = -1.1 # Handle the Nyquist frequency (kinda tricky). There isn't really a # Nyquist frequency for the odd input length case. if n == 10 # amp_expected[6] = (3*cos(5*2*pi/T*t0 + 0.2))^2 # phase_expected[6] = rem2pi(-5*2*pi/T*t0, RoundNearest) amp_expected[6] = (3*cos(5*2*pi/T*t0 + 0.2))^2 phase_expected[6] = rem2pi(pi - 5*2*pi/T*t0, RoundNearest) else amp_expected[6] = 0.5*3^2 phase_expected[6] = 0.2 end @test all(isapprox.(frequency(amp), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase), freq_expected; atol=1e-12)) @test all(isapprox.(amp, amp_expected; atol=1e-12)) @test all(isapprox.(phase, phase_expected; atol=1e-12)) @test timestep(amp) ≈ dt @test timestep(phase) ≈ dt @test starttime(amp) ≈ t0 @test starttime(phase) ≈ t0 @test frequencystep(amp) ≈ freq_expected[2] @test frequencystep(phase) ≈ freq_expected[2] @test istonal(amp) == false @test istonal(phase) == false # Make sure I can convert a mean-squared pressure to a pressure spectrum. psamp = PressureSpectrumAmplitude(amp) psamp_expected = similar(psamp) psamp_expected[1] = 6 psamp_expected[2] = 8 psamp_expected[3] = 2.5 psamp_expected[4] = 9 psamp_expected[5] = 0.5 # Handle the Nyquist frequency (kinda tricky). There isn't really a # Nyquist frequency for the odd input length case. if n == 10 # The `t0` term pushes the cosine below zero, which messes # up the test. Hmm... what's the right thing to do here? # Well, what should the phase and amplitude be? # psamp_expected[6] = 3*cos(5*2*pi/T*t0 + 0.2) psamp_expected[6] = abs(3*cos(5*2*pi/T*t0 + 0.2)) else psamp_expected[6] = 3 end @test all(isapprox.(frequency(psamp), freq_expected; atol=1e-12)) @test all(isapprox.(psamp, psamp_expected; atol=1e-12)) # Make sure I can convert a mean-squared pressure to the acoustic pressure. ap_from_msp = PressureTimeHistory(amp) @test timestep(ap_from_msp) ≈ timestep(ap) @test starttime(ap_from_msp) ≈ starttime(ap) @test all(isapprox.(pressure(ap_from_msp), pressure(ap))) # Create a tonal version of the same spectrum. # Nothing should be any different except the `IsTonal` parameter. amp_tonal = MSPSpectrumAmplitude(halfcomplex(amp), timestep(amp), starttime(amp), true) phase_tonal = MSPSpectrumPhase(halfcomplex(phase), timestep(phase), starttime(phase), true) @test all(isapprox.(frequency(amp_tonal), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase_tonal), freq_expected; atol=1e-12)) @test all(isapprox.(amp_tonal, amp_expected; atol=1e-12)) @test all(isapprox.(phase_tonal.*amp_tonal, phase_expected.*amp_expected; atol=1e-12)) @test timestep(amp_tonal) ≈ dt @test timestep(phase_tonal) ≈ dt @test starttime(amp_tonal) ≈ t0 @test starttime(phase_tonal) ≈ t0 @test frequencystep(amp_tonal) ≈ freq_expected[2] @test frequencystep(phase_tonal) ≈ freq_expected[2] @test istonal(amp_tonal) == true @test istonal(phase_tonal) == true end end end @testset "ANOPP2 comparison" begin a2_data = load(joinpath(@__DIR__, "gen_anopp2_data", "nbs.jld2")) freq_a2 = a2_data["a2_nbs_freq"] nbs_msp_a2 = a2_data["a2_nbs_amp"] nbs_phase_a2 = a2_data["a2_nbs_phase"] for T in [1, 2] for n in [19, 20] dt = T/n t = (0:n-1).*dt p = apth_for_nbs.(t) ap = PressureTimeHistory(p, dt) amp = MSPSpectrumAmplitude(ap) phase = MSPSpectrumPhase(ap) @test all(isapprox.(frequency(amp), freq_a2[(T,n)], atol=1e-12)) @test all(isapprox.(frequency(phase), freq_a2[(T,n)], atol=1e-12)) @test all(isapprox.(amp, nbs_msp_a2[(T,n)], atol=1e-12)) # Checking the phase is tricky, since it involves the ratio of # the imaginary component to the real component of the MSP # spectrum (definition is phase = atan(imag(fft(p)), # real(fft(p)))). For the components of the spectrum that have # zero amplitude that ratio ends up being very noisy. So scale # the phase by the amplitude to remove the problematic # zero-amplitude components. @test all(isapprox.(phase.*amp, nbs_phase_a2[T, n].*nbs_msp_a2[T, n], atol=1e-12)) end end end end @testset "Power Spectral Density" begin @testset "t0 == 0" begin for T in [1.0, 2.0] f(t) = 6 + 8*cos(1*2*pi/T*t + 0.2) + 2.5*cos(2*2*pi/T*t - 3.0) + 9*cos(3*2*pi/T*t + 3.1) + 0.5*cos(4*2*pi/T*t - 1.1) + 3*cos(5*2*pi/T*t + 0.2) for n in [10, 11] dt = T/n df = 1/T t = (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt) @test all(isapprox.(time(ap), t)) @test timestep(ap) ≈ dt @test starttime(ap) ≈ 0.0 amp = PowerSpectralDensityAmplitude(ap) phase = PowerSpectralDensityPhase(ap) freq_expected = [0.0, 1/T, 2/T, 3/T, 4/T, 5/T] amp_expected = similar(amp) amp_expected[1] = 6^2/df amp_expected[2] = 0.5*8^2/df amp_expected[3] = 0.5*2.5^2/df amp_expected[4] = 0.5*9^2/df amp_expected[5] = 0.5*0.5^2/df phase_expected = similar(phase) phase_expected[1] = 0 phase_expected[2] = 0.2 phase_expected[3] = -3 phase_expected[4] = 3.1 phase_expected[5] = -1.1 # Handle the Nyquist frequency (kinda tricky). There isn't really a # Nyquist frequency for the odd input length case. if n == 10 amp_expected[6] = (3*cos(0.2))^2/df phase_expected[6] = 0 else amp_expected[6] = 0.5*3^2/df phase_expected[6] = 0.2 end @test all(isapprox.(frequency(amp), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase), freq_expected; atol=1e-12)) @test all(isapprox.(amp, amp_expected; atol=1e-12)) @test all(isapprox.(phase.*amp, phase_expected.*amp_expected; atol=1e-12)) @test timestep(amp) ≈ dt @test timestep(phase) ≈ dt @test starttime(amp) ≈ 0.0 @test starttime(phase) ≈ 0.0 @test frequencystep(amp) ≈ freq_expected[2] @test frequencystep(phase) ≈ freq_expected[2] @test istonal(amp) == false @test istonal(phase) == false # Make sure I can convert a PSD to a pressure spectrum. psamp = PressureSpectrumAmplitude(amp) psamp_expected = similar(psamp) psamp_expected[1] = 6 psamp_expected[2] = 8 psamp_expected[3] = 2.5 psamp_expected[4] = 9 psamp_expected[5] = 0.5 # Handle the Nyquist frequency (kinda tricky). There isn't really a # Nyquist frequency for the odd input length case. if n == 10 psamp_expected[6] = 3*cos(0.2) else psamp_expected[6] = 3 end @test all(isapprox.(frequency(psamp), freq_expected; atol=1e-12)) @test all(isapprox.(psamp, psamp_expected; atol=1e-12)) # Make sure I can convert a PSD to the acoustic pressure. ap_from_psd = PressureTimeHistory(amp) @test timestep(ap_from_psd) ≈ timestep(ap) @test starttime(ap_from_psd) ≈ starttime(ap) @test all(isapprox.(pressure(ap_from_psd), pressure(ap))) # I shouldn't be able to create a PSD from a tonal spectrum. amp_tonal = PressureSpectrumAmplitude(halfcomplex(amp), timestep(amp), starttime(amp), true) phase_tonal = PressureSpectrumPhase(halfcomplex(phase), timestep(phase), starttime(phase), true) @test_throws ArgumentError PowerSpectralDensityAmplitude(amp_tonal) # @test_throws ArgumentError PowerSpectralDensityPhase(phase_tonal) end end end @testset "t0 !== 0" begin for T in [1.0, 2.0] t0 = 0.13 f(t) = 6 + 8*cos(1*2*pi/T*t + 0.2) + 2.5*cos(2*2*pi/T*t - 3.0) + 9*cos(3*2*pi/T*t + 3.1) + 0.5*cos(4*2*pi/T*t - 1.1) + 3*cos(5*2*pi/T*t + 0.2) for n in [10, 11] dt = T/n df = 1/T t = t0 .+ (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt, t0) @test all(isapprox.(time(ap), t)) @test timestep(ap) ≈ dt @test starttime(ap) ≈ t0 amp = PowerSpectralDensityAmplitude(ap) phase = PowerSpectralDensityPhase(ap) freq_expected = [0.0, 1/T, 2/T, 3/T, 4/T, 5/T] amp_expected = similar(amp) amp_expected[1] = 6^2/df amp_expected[2] = 0.5*8^2/df amp_expected[3] = 0.5*2.5^2/df amp_expected[4] = 0.5*9^2/df amp_expected[5] = 0.5*0.5^2/df phase_expected = similar(phase) phase_expected[1] = 0 phase_expected[2] = 0.2 phase_expected[3] = -3 phase_expected[4] = 3.1 phase_expected[5] = -1.1 # Handle the Nyquist frequency (kinda tricky). There isn't really a # Nyquist frequency for the odd input length case. if n == 10 # amp_expected[6] = (3*cos(5*2*pi/T*t0 + 0.2))^2 # phase_expected[6] = rem2pi(-5*2*pi/T*t0, RoundNearest) amp_expected[6] = (3*cos(5*2*pi/T*t0 + 0.2))^2/df phase_expected[6] = rem2pi(pi - 5*2*pi/T*t0, RoundNearest) else amp_expected[6] = 0.5*3^2/df phase_expected[6] = 0.2 end @test all(isapprox.(frequency(amp), freq_expected; atol=1e-12)) @test all(isapprox.(frequency(phase), freq_expected; atol=1e-12)) @test all(isapprox.(amp, amp_expected; atol=1e-12)) @test all(isapprox.(phase, phase_expected; atol=1e-12)) @test timestep(amp) ≈ dt @test timestep(phase) ≈ dt @test starttime(amp) ≈ t0 @test starttime(phase) ≈ t0 @test frequencystep(amp) ≈ freq_expected[2] @test frequencystep(phase) ≈ freq_expected[2] @test istonal(amp) == false @test istonal(phase) == false # Make sure I can convert a PSD to a pressure spectrum. psamp = PressureSpectrumAmplitude(amp) psamp_expected = similar(psamp) psamp_expected[1] = 6 psamp_expected[2] = 8 psamp_expected[3] = 2.5 psamp_expected[4] = 9 psamp_expected[5] = 0.5 # Handle the Nyquist frequency (kinda tricky). There isn't really a # Nyquist frequency for the odd input length case. if n == 10 # The `t0` term pushes the cosine below zero, which messes # up the test. Hmm... what's the right thing to do here? # Well, what should the phase and amplitude be? # amp_expected[6] = 3*cos(5*2*pi/T*t0 + 0.2) psamp_expected[6] = abs(3*cos(5*2*pi/T*t0 + 0.2)) else psamp_expected[6] = 3 end @test all(isapprox.(frequency(psamp), freq_expected; atol=1e-12)) @test all(isapprox.(psamp, psamp_expected; atol=1e-12)) # Make sure I can convert a PSD to the acoustic pressure. ap_from_psd = PressureTimeHistory(amp) @test timestep(ap_from_psd) ≈ timestep(ap) @test starttime(ap_from_psd) ≈ starttime(ap) @test all(isapprox.(pressure(ap_from_psd), pressure(ap))) # I shouldn't be able to create a PSD from a tonal spectrum. # But I actually can creeate a PSD phase, since the PSD phase is the same as the pressure and MSP phase. amp_tonal = PressureSpectrumAmplitude(halfcomplex(amp), timestep(amp), starttime(amp), true) phase_tonal = PressureSpectrumPhase(halfcomplex(phase), timestep(phase), starttime(phase), true) @test_throws ArgumentError PowerSpectralDensityAmplitude(amp_tonal) # @test_throws ArgumentError PowerSpectralDensityPhase(phase_tonal) end end end @testset "ANOPP2 comparison" begin a2_data = load(joinpath(@__DIR__, "gen_anopp2_data", "psd.jld2")) freq_a2 = a2_data["a2_psd_freq"] psd_msp_a2 = a2_data["a2_psd_amp"] psd_phase_a2 = a2_data["a2_psd_phase"] for T in [1, 2] for n in [19, 20] dt = T/n t = (0:n-1).*dt p = apth_for_nbs.(t) ap = PressureTimeHistory(p, dt) amp = PowerSpectralDensityAmplitude(ap) phase = PowerSpectralDensityPhase(ap) @test all(isapprox.(frequency(amp), freq_a2[(T,n)], atol=1e-12)) @test all(isapprox.(frequency(phase), freq_a2[(T,n)], atol=1e-12)) @test all(isapprox.(amp, psd_msp_a2[(T,n)], atol=1e-12)) # Checking the phase is tricky, since it involves the ratio of # the imaginary component to the real component of the MSP # spectrum (definition is phase = atan(imag(fft(p)), # real(fft(p)))). For the components of the spectrum that have # zero amplitude that ratio ends up being very noisy. So scale # the phase by the amplitude to remove the problematic # zero-amplitude components. @test all(isapprox.(phase.*amp, psd_phase_a2[T, n].*psd_msp_a2[T, n], atol=1e-12)) end end end end @testset "Proportional Band Spectrum" begin @testset "exact octave" begin @testset "standard" begin bands = ExactOctaveCenterBands(6, 16) @test octave_fraction(bands) == 1 bands_expected = [62.5, 125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0, 16000.0, 32000.0, 64000.0] @test all(isapprox.(bands, bands_expected)) @test_throws BoundsError bands[0] @test_throws BoundsError bands[12] bands_9_to_11 = ExactOctaveCenterBands(9, 11) @test all(isapprox.(bands_9_to_11, bands_expected[4:6])) @test_throws BoundsError bands_9_to_11[0] @test_throws BoundsError bands_9_to_11[4] @test_throws ArgumentError ExactOctaveCenterBands(5, 4) lbands = ExactOctaveLowerBands(6, 16) @test octave_fraction(lbands) == 1 @test all((log2.(bands) .- log2.(lbands)) .≈ 1/2) ubands = ExactOctaveUpperBands(6, 16) @test octave_fraction(ubands) == 1 @test all((log2.(ubands) .- log2.(bands)) .≈ 1/2) @test all((log2.(ubands) .- log2.(lbands)) .≈ 1) cbands = ExactOctaveCenterBands(700.0, 22000.0) @test octave_fraction(cbands) == 1 @test cbands.bstart == 9 @test cbands.bend == 14 lbands = ExactOctaveLowerBands(700.0, 22000.0) @test lbands.bstart == 9 @test lbands.bend == 14 ubands = ExactOctaveUpperBands(700.0, 22000.0) @test ubands.bstart == 9 @test ubands.bend == 14 # Test the `_cband_exact` routine, which goes from an exact centerband frequency to the band number. for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end end @testset "scaler argument" begin for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] bands = ExactOctaveCenterBands(6, 16, scaler) bands_expected = scaler .* [62.5, 125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0, 16000.0, 32000.0, 64000.0] @test all(isapprox.(bands, bands_expected)) @test_throws BoundsError bands[0] @test_throws BoundsError bands[12] bands_9_to_11 = ExactOctaveCenterBands(9, 11, scaler) @test all(isapprox.(bands_9_to_11, bands_expected[4:6])) @test_throws BoundsError bands_9_to_11[0] @test_throws BoundsError bands_9_to_11[4] @test_throws ArgumentError ExactOctaveCenterBands(5, 4, scaler) lbands = ExactOctaveLowerBands(6, 16, scaler) @test all((log2.(bands) .- log2.(lbands)) .≈ 1/2) ubands = ExactOctaveUpperBands(6, 16, scaler) @test all((log2.(ubands) .- log2.(bands)) .≈ 1/2) @test all((log2.(ubands) .- log2.(lbands)) .≈ 1) cbands = ExactOctaveCenterBands(700.0*scaler, 22000.0*scaler, scaler) @test cbands.bstart == 9 @test cbands.bend == 14 lbands = ExactOctaveLowerBands(700.0*scaler, 22000.0*scaler, scaler) @test lbands.bstart == 9 @test lbands.bend == 14 ubands = ExactOctaveUpperBands(700.0*scaler, 22000.0*scaler, scaler) @test ubands.bstart == 9 @test ubands.bend == 14 # Test the `_cband_exact` routine, which goes from an exact centerband frequency to the band number. for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end end end end @testset "exact 1/3-octave" begin @testset "standard" begin bands = ExactThirdOctaveCenterBands(17, 40) @test octave_fraction(bands) == 3 # These are just from the ANOPP2 manual. bands_expected_all = [49.61, 62.50, 78.75, 99.21, 125.00, 157.49, 198.43, 250.00, 314.98, 396.85, 500.00, 629.96, 793.70, 1000.0, 1259.92, 1587.40, 2000.00, 2519.84, 3174.80, 4000.00, 5039.68, 6349.60, 8000.00, 10079.37] @test all(isapprox.(bands, bands_expected_all; atol=0.005)) @test_throws BoundsError bands[0] @test_throws BoundsError bands[25] bands_30_to_38 = ExactThirdOctaveCenterBands(30, 38) @test all(isapprox.(bands_30_to_38, bands_expected_all[14:end-2]; atol=0.005)) @test_throws BoundsError bands_30_to_38[0] @test_throws BoundsError bands_30_to_38[10] @test_throws ArgumentError ExactThirdOctaveCenterBands(5, 4) lbands = ExactThirdOctaveLowerBands(17, 40) @test octave_fraction(lbands) == 3 @test all((log2.(bands) .- log2.(lbands)) .≈ 1/(2*3)) ubands = ExactThirdOctaveUpperBands(17, 40) @test octave_fraction(ubands) == 3 @test all((log2.(ubands) .- log2.(bands)) .≈ 1/(2*3)) @test all((log2.(ubands) .- log2.(lbands)) .≈ 1/3) cbands = ExactThirdOctaveCenterBands(332.0, 7150.0) @test octave_fraction(cbands) == 3 @test cbands.bstart == 25 @test cbands.bend == 39 lbands = ExactThirdOctaveLowerBands(332.0, 7150.0) @test lbands.bstart == 25 @test lbands.bend == 39 ubands = ExactThirdOctaveUpperBands(332.0, 7150.0) @test ubands.bstart == 25 @test ubands.bend == 39 # Test the `cband_number` routine, which goes from an exact centerband frequency to the band number. for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end end @testset "scaler argument" begin for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] bands = ExactThirdOctaveCenterBands(17, 40, scaler) # These are just from the ANOPP2 manual. bands_expected_all = scaler .* [49.61, 62.50, 78.75, 99.21, 125.00, 157.49, 198.43, 250.00, 314.98, 396.85, 500.00, 629.96, 793.70, 1000.0, 1259.92, 1587.40, 2000.00, 2519.84, 3174.80, 4000.00, 5039.68, 6349.60, 8000.00, 10079.37] @test all(isapprox.(bands, bands_expected_all; atol=0.01)) @test_throws BoundsError bands[0] @test_throws BoundsError bands[25] bands_30_to_38 = ExactThirdOctaveCenterBands(30, 38, scaler) @test all(isapprox.(bands_30_to_38, bands_expected_all[14:end-2]; atol=0.01)) @test_throws BoundsError bands_30_to_38[0] @test_throws BoundsError bands_30_to_38[10] @test_throws ArgumentError ExactThirdOctaveCenterBands(5, 4, scaler) lbands = ExactThirdOctaveLowerBands(17, 40, scaler) @test all((log2.(bands) .- log2.(lbands)) .≈ 1/(2*3)) ubands = ExactThirdOctaveUpperBands(17, 40, scaler) @test all((log2.(ubands) .- log2.(bands)) .≈ 1/(2*3)) @test all((log2.(ubands) .- log2.(lbands)) .≈ 1/3) cbands = ExactThirdOctaveCenterBands(332.0*scaler, 7150.0*scaler, scaler) @test cbands.bstart == 25 @test cbands.bend == 39 lbands = ExactThirdOctaveLowerBands(332.0*scaler, 7150.0*scaler, scaler) @test lbands.bstart == 25 @test lbands.bend == 39 ubands = ExactThirdOctaveUpperBands(332.0*scaler, 7150.0*scaler, scaler) @test ubands.bstart == 25 @test ubands.bend == 39 # Test the `cband_number` routine, which goes from an exact centerband frequency to the band number. for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end end end @testset "not-so-narrow narrowband spectrum" begin T = 1/1000.0 t0 = 0.13 f(t) = 6 + 8*cos(1*2*pi/T*t + 0.2) + 2.5*cos(2*2*pi/T*t - 3.0) + 9*cos(3*2*pi/T*t + 3.1) + 0.5*cos(4*2*pi/T*t - 1.1) + 3*cos(5*2*pi/T*t + 0.2) n = 10 dt = T/n df = 1/T t = t0 .+ (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt) psd = PowerSpectralDensityAmplitude(ap) pbs = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, psd) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) # So, this should have non-zero stuff at 1000 Hz, 2000 Hz, 3000 Hz, 4000 Hz, 5000 Hz. # And that means that, say, the 1000 Hz signal will exend from 500 # Hz to 1500 Hz. # So then it will show up in a bunch of bands: # # * 445 to 561 # * 561 to 707 # * 707 to 890 # * 890 to 1122 # * 1122 to 1414 # * 1414 to 1781 # # And that means the signal at 2000 will also show up at that last frequency. # So did I code this up properly? # I think so. # Here are all the bands that should be active: # # 1: 445.44935907016963, 561.2310241546866 # 2: 561.2310241546866, 707.1067811865476 # 3: 707.1067811865476, 890.8987181403395 # 4: 890.8987181403393, 1122.4620483093731 # 5: 1122.4620483093731, 1414.213562373095 # 6: 1414.2135623730949, 1781.7974362806785 # 7: 1781.7974362806785, 2244.9240966187463 # 8: 2244.9240966187463, 2828.42712474619 # 9: 2828.42712474619, 3563.594872561358 # 10: 3563.594872561357, 4489.8481932374925 # 11: 4489.8481932374925, 5656.85424949238 lbands = lower_bands(pbs) ubands = upper_bands(pbs) @test pbs[1] ≈ 0.5*8^2/df*(ubands[1] - 500) @test pbs[2] ≈ 0.5*8^2/df*(ubands[2] - lbands[2]) @test pbs[3] ≈ 0.5*8^2/df*(ubands[3] - lbands[3]) @test pbs[4] ≈ 0.5*8^2/df*(ubands[4] - lbands[4]) @test pbs[5] ≈ 0.5*8^2/df*(ubands[5] - lbands[5]) @test pbs[6] ≈ 0.5*8^2/df*(1500 - lbands[6]) + 0.5*2.5^2/df*(ubands[6] - 1500) @test pbs[7] ≈ 0.5*2.5^2/df*(ubands[7] - lbands[7]) @test pbs[8] ≈ 0.5*2.5^2/df*(2500 - lbands[8]) + 0.5*9^2/df*(ubands[8] - 2500) @test pbs[9] ≈ 0.5*9^2/df*(3500 - lbands[9]) + 0.5*0.5^2/df*(ubands[9] - 3500) @test pbs[10] ≈ 0.5*0.5^2/df*(ubands[10] - lbands[10]) # Last one is wierd because of the Nyquist frequency. @test pbs[11] ≈ 0.5*0.5^2/df*(4500 - lbands[11]) + (3*cos(0.2))^2/df*(5500 - 4500) # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(psd, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, what if `istonal==true`? # Then the narrowband frequencies are thin, and so each narrowband frequency can only show up in one proportional band each. tonal = true msp_tonal = MSPSpectrumAmplitude(ap, tonal) # Using `istonal=true` shouldn't be any different than `istonal=false`. @test all(msp_tonal .≈ MSPSpectrumAmplitude(psd)) # Now get the PBS and check it. pbs_tonal = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, msp_tonal) # Check that we end up with the proportional bands we expect: cbands_tonal = center_bands(pbs_tonal) band_start(cbands_tonal) == 30 band_end(cbands_tonal) == 37 # Now check that we have the right answer for the PBS. @test pbs_tonal[1] ≈ 0.5*8^2 # 1000 Hz @test pbs_tonal[2] ≈ 0 @test pbs_tonal[3] ≈ 0 @test pbs_tonal[4] ≈ 0.5*2.5^2 # 2000 Hz @test pbs_tonal[5] ≈ 0 @test pbs_tonal[6] ≈ 0.5*9^2 # 3000 Hz @test pbs_tonal[7] ≈ 0.5*0.5^2 # 4000 Hz # Last one is wierd because of the Nyquist frequency. @test pbs_tonal[8] ≈ (3*cos(0.2))^2 # 5000 Hz # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(msp_tonal, center_bands(pbs_tonal)) @test all(pbs_init_cbands .≈ pbs_tonal) end @testset "narrowband spectrum, one narrowband per proportional band" begin freq0 = 1000.0 T = 20/freq0 t0 = 0.13 f(t) = 6 + 8*cos(1*2*pi*freq0*t + 0.2) + 2.5*cos(2*2*pi*freq0*t - 3.0) + 9*cos(3*2*pi*freq0*t + 3.1) n = 128 dt = T/n df = 1/T t = t0 .+ (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt) psd = PowerSpectralDensityAmplitude(ap) pbs = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, psd) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) lbands = lower_bands(pbs) ubands = upper_bands(pbs) psd_freq = frequency(psd) @test psd_freq[21] ≈ freq0 @test pbs[17] ≈ psd[21]*df @test psd_freq[41] ≈ 2*freq0 @test pbs[20] ≈ psd[41]*df @test psd_freq[61] ≈ 3*freq0 @test pbs[22] ≈ psd[61]*df # Make sure all the other PBS entries are zero. for (i, amp) in enumerate(pbs) if i ∉ [17, 20, 22] @test isapprox(amp, 0.0; atol=1e-12) end end a2_data = load(joinpath(@__DIR__, "gen_anopp2_data", "pbs.jld2")) a2_freq = a2_data["a2_pbs_freq"] a2_pbs = a2_data["a2_pbs"] pbs_level = @. 10*log10(pbs/p_ref^2) # For some reason ANOPP2 doesn't give me the first four proportional # bands that I'd expect, but they're all zero anyway, so maybe # that's not important. But it also doesn't give me the last band I # expect, which is not zero. :-( The rest look good, though. for i in 1:length(a2_freq) @test center_bands(pbs)[i + 4] ≈ a2_freq[i] if a2_pbs[i] > 0 @test pbs_level[i + 4] ≈ a2_pbs[i] end end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(psd, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # So, for this example, I only have non-zero stuff at 1000 Hz, 2000 Hz, 3000 Hz. # But the lowest non-zero frequency is 50 Hz, highest is 3200 Hz. istonal = true msp_tonal = MSPSpectrumAmplitude(ap, istonal) pbs_tonal = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, msp_tonal) cbands_tonal = center_bands(pbs_tonal) @test band_start(cbands_tonal) == 17 @test band_end(cbands_tonal) == 35 for (i, amp) in enumerate(pbs_tonal) if i ∉ [14, 17, 19] @test isapprox(amp, 0; atol=1e-12) end end @test pbs_tonal[14] ≈ 0.5*8^2 @test pbs_tonal[17] ≈ 0.5*2.5^2 @test pbs_tonal[19] ≈ 0.5*9^2 # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(msp_tonal, center_bands(pbs_tonal)) @test all(pbs_init_cbands .≈ pbs_tonal) end @testset "narrowband spectrum, many narrowbands per proportional band" begin nfreq_nb = 800 freq_min_nb = 55.0 freq_max_nb = 1950.0 df_nb = (freq_max_nb - freq_min_nb)/(nfreq_nb - 1) f_nb = freq_min_nb .+ (0:(nfreq_nb-1)).*df_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, freq_min_nb, df_nb, psd) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) cbands = center_bands(pbs) pbs_level = @. 10*log10(pbs/p_ref^2) a2_data = load(joinpath(@__DIR__, "gen_anopp2_data", "pbs3.jld2")) a2_pbs_freq = a2_data["a2_pbs_freq"] a2_pbs = a2_data["a2_pbs"] for i in 1:length(a2_pbs) # I'm not sure why ANOPP2 doesn't include all of the proportional bands I think it should. j = i + 1 @test cbands[j] ≈ a2_pbs_freq[i] @test isapprox(pbs_level[j], a2_pbs[i]; atol=1e-2) end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Let's create a tonal MSP. scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, freq_min_nb, df_nb, msp, scaler, tonal) # So, the narrowband frequencies go from 55.0 Hz to 1950 Hz. # Let's check that. cbands_tonal = center_bands(pbs_tonal) @test band_start(cbands_tonal) == 17 @test band_end(cbands_tonal) == 33 # Now, this really isn't a tonal spectrum—it's actually very broadband. # But I should still be able to check it. # I need to indentify which narrowbands are in which proportional band. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) end @testset "narrowband spectrum, many narrowbands per proportional band, scaled frequency" begin # Create a PBS with the standard frequencies. nfreq_nb = 800 freq_min_nb = 55.0 freq_max_nb = 1950.0 df_nb = (freq_max_nb - freq_min_nb)/(nfreq_nb - 1) f_nb = freq_min_nb .+ (0:(nfreq_nb-1)).*df_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, freq_min_nb, df_nb, psd) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] # Now create another PBS, but with the scaled frequency bands, same psd. freq_min_nb_scaled = 55.0*scaler freq_max_nb_scaled = 1950.0*scaler df_nb_scaled = (freq_max_nb_scaled - freq_min_nb_scaled)/(nfreq_nb - 1) f_nb_scaled = freq_min_nb_scaled .+ (0:(nfreq_nb-1)).*df_nb_scaled # pbs_scaled = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, freq_min_nb_scaled, df_nb_scaled, psd, scaler) # pbs_scaled = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, freq_min_nb_scaled, df_nb_scaled, msp, scaler) # msp_scaled = msp ./ df_nb .* df_nb_scaled # If we want the same psd, we need to adjust for the new narrowband frequency bin width. msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs) # And the band frequencies should all be scaled. @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_scaled_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs_scaled)), center_bands(pbs_scaled)[begin], pbs_scaled, freq_scaler(pbs_scaled)) @test all(pbs_scaled_non_lazy .≈ pbs_scaled) @test lower_bands(pbs_scaled_non_lazy) === lower_bands(pbs_scaled) @test center_bands(pbs_scaled_non_lazy) === center_bands(pbs_scaled) @test upper_bands(pbs_scaled_non_lazy) === upper_bands(pbs_scaled) end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, for the tonal stuff, let's make sure we get the right thing, also. scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, freq_min_nb, df_nb, msp, scaler, tonal) # So, the narrowband frequencies go from 55.0 Hz to 1950 Hz. # Let's check that. cbands_tonal = center_bands(pbs_tonal) @test band_start(cbands_tonal) == 17 @test band_end(cbands_tonal) == 33 # Now check that the pbs is what we expect. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Now, what about the scaler stuff? # Should be able to use the same trick for the istonal == false stuff. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] # Now create another PBS, but with the scaled frequency bands, same psd. freq_min_nb_scaled = 55.0*scaler freq_max_nb_scaled = 1950.0*scaler df_nb_scaled = (freq_max_nb_scaled - freq_min_nb_scaled)/(nfreq_nb - 1) f_nb_scaled = freq_min_nb_scaled .+ (0:(nfreq_nb-1)).*df_nb_scaled msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler, tonal) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs_tonal) # And the band frequencies should all be scaled. @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs_tonal)) end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) end # @testset "ANOPP2 docs example" begin # n_freq = 2232 # psd_freq = 45.0 .+ 5 .* (0:n_freq-1) # df = psd_freq[2] - psd_freq[1] # msp_amp = 20 .+ 10 .* (1:n_freq)./n_freq # psd_amp = msp_amp ./ df # # pbs = ExactLazyNBProportionalBandSpectrum{3}(first(psd_freq), df, psd_amp) # pbs = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, first(psd_freq), df, psd_amp) # cbands = center_bands(pbs) # lbands = lower_bands(pbs) # ubands = upper_bands(pbs) # pbs_level = @. 10*log10(pbs/p_ref^2) # # @show cbands pbs_level # # @show length(cbands) length(pbs_level) # end @testset "convergence test" begin @testset "one band" begin freq_min, freq_max = 50.0, 2000.0 lbands = ExactThirdOctaveLowerBands(freq_min, freq_max) cbands = ExactThirdOctaveCenterBands(freq_min, freq_max) ubands = ExactThirdOctaveUpperBands(freq_min, freq_max) for b in 1:length(cbands) pbs_b_exact = psd_func_int(lbands[b], ubands[b]) errs = Vector{Float64}() nfreqs = Vector{Int}() for nfreq in 200:10:300 # df_nb = (freq_max - freq_min)/(nfreq - 1) # f = freq_min .+ (0:nfreq-1).*df_nb df_nb = (ubands[b] - lbands[b])/nfreq f0 = lbands[b] + 0.5*df_nb f1 = ubands[b] - 0.5*df_nb f = f0 .+ (0:nfreq-1).*df_nb psd = psd_func.(f) # pbs = LazyNBExactThirdOctaveSpectrum(f0, df_nb, psd) msp = psd .* df_nb # pbs = LazyNBExactThirdOctaveSpectrum(f0, df_nb, msp) pbs = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, f0, df_nb, msp) if length(pbs) > 1 # We tried above to construct the narrowand frequencies # to only cover the current 1/3-octave proportional # band, i.e., the one that starts at lbands[b] and ends # at ubands[b]. But because of floating point errors, we # might end up with a tiny bit of the narrowband in the # next proportional band. But only in the next one, so # check that we only have two: @test length(pbs) == 2 # And the amount of energy we have in the next band # should be very small. @test isapprox(pbs[2], 0; atol=1e-10) end @test center_bands(pbs)[1] ≈ cbands[b] push!(nfreqs, nfreq) push!(errs, abs(pbs[1] - pbs_b_exact)) end # So here we're assuming that # # err ≈ 1/(nfreq^p) # # We want to find `p`. # If we take the error for two different values of `nfreq` and find their ratio: # # err2/err1 = nfreq1^p/nfreq2^p # log(err2/err1) = p*log(nfreq1/nfreq2) # p = log(err2/err1)/log(nfreq1/nfreq2) # # p = log.(errs[2:end]./errs[1:end-1])./log.(nfreqs[1:end-1]./nfreqs[2:end]) # @show b errs p # # But here we'll just use the Polynomials package to fit a line though the error as a function of nfreq on a log-log plot. l = Polynomials.fit(log.(nfreqs), log.(errs), 1) # @show l.coeffs[2] @test isapprox(l.coeffs[2], -2; atol=1e-5) end end @testset "many bands" begin freq_min, freq_max = 50.0, 2000.0 lbands = ExactThirdOctaveLowerBands(freq_min, freq_max) cbands = ExactThirdOctaveCenterBands(freq_min, freq_max) ubands = ExactThirdOctaveUpperBands(freq_min, freq_max) for b in 1:length(cbands) pbs_b_exact = psd_func_int(lbands[b], ubands[b]) errs = Vector{Float64}() nfreqs_b = Vector{Int}() for nfreq_b in 200:10:300 # OK, I want to decide on a frequency spacing that will # fit nicely in the current band. df_nb = (ubands[b] - lbands[b])/nfreq_b # This is where I want the narrowband frequencies to start in the current band `b`. f0 = lbands[b] + 0.5*df_nb f1 = ubands[b] - 0.5*df_nb # But now I want the actual narrowband frequency to cover freq_min and freq_max. # So I need to figure out how many bins I need before f0 and after f1. n_before_f0 = Int(floor((f0 - (lbands[1] + 0.5*df_nb)) / df_nb)) n_after_f1 = Int(floor(((ubands[end] - 0.5*df_nb) - f1) / df_nb)) # So now I should be able to get the narrowband frequencies. f = f0 .+ (-n_before_f0 : (nfreq_b-1)+n_after_f1).*df_nb # Now the PSD for the entire narrowband range. psd = psd_func.(f) # And the PBS # pbs = ExactLazyNBProportionalBandSpectrum{3}(f[1], df_nb, psd) # pbs = LazyNBExactThirdOctaveSpectrum(f[1], df_nb, psd) msp = psd .* df_nb # pbs = LazyNBExactThirdOctaveSpectrum(f[1], df_nb, msp) pbs = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, f[1], df_nb, msp) # We created a narrowband range that should cover from freq_min to freq_max, so the sizes should be the same. @test length(pbs) == length(cbands) @test pbs.cbands.bstart == cbands.bstart @test pbs.cbands.bend == cbands.bend push!(nfreqs_b, nfreq_b) # We only want to check the error for the current band. push!(errs, abs(pbs[b] - pbs_b_exact)) end # So here we're assuming that # # err ≈ 1/(nfreq^p) # # We want to find `p`. # If we take the error for two different values of `nfreq` and find their ratio: # # err2/err1 = nfreq1^p/nfreq2^p # log(err2/err1) = p*log(nfreq1/nfreq2) # p = log(err2/err1)/log(nfreq1/nfreq2) # # p = log.(errs[2:end]./errs[1:end-1])./log.(nfreqs_b[1:end-1]./nfreqs_b[2:end]) # @show b errs p # # But here we'll just use the Polynomials package to fit a line though the error as a function of nfreq on a log-log plot. l = Polynomials.fit(log.(nfreqs_b), log.(errs), 1) # @show l.coeffs[2] @test isapprox(l.coeffs[2], -2; atol=1e-5) end end end end @testset "approximate octave" begin @testset "standard" begin cbands = ApproximateOctaveCenterBands(0, 20) cbands_expected = [1.0, 2.0, 4.0, 8.0, 16.0, 31.5, 63, 125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0, 16e3, 31.5e3, 63e3, 125e3, 250e3, 500e3, 1000e3] @test all(cbands .≈ cbands_expected) for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end lbands = ApproximateOctaveLowerBands(0, 20) lbands_expected = [0.71, 1.42, 2.84, 5.68, 11.0, 22.0, 44.0, 88.0, 177.0, 355.0, 0.71e3, 1.42e3, 2.84e3, 5.68e3, 11.0e3, 22e3, 44e3, 88e3, 177e3, 355e3, 0.71e6] @test all(lbands .≈ lbands_expected) ubands = ApproximateOctaveUpperBands(0, 20) ubands_expected = [1.42, 2.84, 5.68, 11.0, 22.0, 44.0, 88.0, 177.0, 355.0, 0.71e3, 1.42e3, 2.84e3, 5.68e3, 11.0e3, 22e3, 44e3, 88e3, 177e3, 355e3, 0.71e6, 1.42e6] @test all(ubands .≈ ubands_expected) cbands = ApproximateOctaveCenterBands(-20, 0) cbands_expected = [1.0e-6, 2.0e-6, 4.0e-6, 8.0e-6, 16.0e-6, 31.5e-6, 63e-6, 125.0e-6, 250.0e-6, 500.0e-6, 1000.0e-6, 2000.0e-6, 4000.0e-6, 8000.0e-6, 16e-3, 31.5e-3, 63e-3, 125e-3, 250e-3, 500e-3, 1000e-3] @test all(cbands .≈ cbands_expected) for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end lbands = ApproximateOctaveLowerBands(-20, 0) lbands_expected = [0.71e-6, 1.42e-6, 2.84e-6, 5.68e-6, 11.0e-6, 22.0e-6, 44.0e-6, 88.0e-6, 177.0e-6, 355.0e-6, 0.71e-3, 1.42e-3, 2.84e-3, 5.68e-3, 11.0e-3, 22e-3, 44e-3, 88e-3, 177e-3, 355e-3, 0.71] @test all(lbands .≈ lbands_expected) ubands = ApproximateOctaveUpperBands(-20, 0) ubands_expected = [1.42e-6, 2.84e-6, 5.68e-6, 11.0e-6, 22.0e-6, 44.0e-6, 88.0e-6, 177.0e-6, 355.0e-6, 0.71e-3, 1.42e-3, 2.84e-3, 5.68e-3, 11.0e-3, 22e-3, 44e-3, 88e-3, 177e-3, 355e-3, 0.71, 1.42] @test all(ubands .≈ ubands_expected) cbands = ApproximateOctaveCenterBands(2.2, 30.5e3) @test cbands.bstart == 1 @test cbands.bend == 15 lbands = ApproximateOctaveLowerBands(2.2, 30.5e3) @test lbands.bstart == 1 @test lbands.bend == 15 ubands = ApproximateOctaveUpperBands(2.2, 30.5e3) @test ubands.bstart == 1 @test ubands.bend == 15 cbands = ApproximateOctaveCenterBands(23.0e-6, 2.8e-3) @test cbands.bstart == -15 @test cbands.bend == -9 lbands = ApproximateOctaveLowerBands(23.0e-6, 2.8e-3) @test lbands.bstart == -15 @test lbands.bend == -9 ubands = ApproximateOctaveUpperBands(23.0e-6, 2.8e-3) @test ubands.bstart == -15 @test ubands.bend == -9 end @testset "scaler argument" begin for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] cbands = ApproximateOctaveCenterBands(0, 20, scaler) cbands_expected = scaler .* [1.0, 2.0, 4.0, 8.0, 16.0, 31.5, 63, 125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0, 16e3, 31.5e3, 63e3, 125e3, 250e3, 500e3, 1000e3] @test all(cbands .≈ cbands_expected) for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end lbands = ApproximateOctaveLowerBands(0, 20, scaler) lbands_expected = scaler .* [0.71, 1.42, 2.84, 5.68, 11.0, 22.0, 44.0, 88.0, 177.0, 355.0, 0.71e3, 1.42e3, 2.84e3, 5.68e3, 11.0e3, 22e3, 44e3, 88e3, 177e3, 355e3, 0.71e6] @test all(lbands .≈ lbands_expected) ubands = ApproximateOctaveUpperBands(0, 20, scaler) ubands_expected = scaler .* [1.42, 2.84, 5.68, 11.0, 22.0, 44.0, 88.0, 177.0, 355.0, 0.71e3, 1.42e3, 2.84e3, 5.68e3, 11.0e3, 22e3, 44e3, 88e3, 177e3, 355e3, 0.71e6, 1.42e6] @test all(ubands .≈ ubands_expected) cbands = ApproximateOctaveCenterBands(-20, 0, scaler) cbands_expected = scaler .* [1.0e-6, 2.0e-6, 4.0e-6, 8.0e-6, 16.0e-6, 31.5e-6, 63e-6, 125.0e-6, 250.0e-6, 500.0e-6, 1000.0e-6, 2000.0e-6, 4000.0e-6, 8000.0e-6, 16e-3, 31.5e-3, 63e-3, 125e-3, 250e-3, 500e-3, 1000e-3] @test all(cbands .≈ cbands_expected) for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end lbands = ApproximateOctaveLowerBands(-20, 0, scaler) lbands_expected = scaler .* [0.71e-6, 1.42e-6, 2.84e-6, 5.68e-6, 11.0e-6, 22.0e-6, 44.0e-6, 88.0e-6, 177.0e-6, 355.0e-6, 0.71e-3, 1.42e-3, 2.84e-3, 5.68e-3, 11.0e-3, 22e-3, 44e-3, 88e-3, 177e-3, 355e-3, 0.71] @test all(lbands .≈ lbands_expected) ubands = ApproximateOctaveUpperBands(-20, 0, scaler) ubands_expected = scaler .* [1.42e-6, 2.84e-6, 5.68e-6, 11.0e-6, 22.0e-6, 44.0e-6, 88.0e-6, 177.0e-6, 355.0e-6, 0.71e-3, 1.42e-3, 2.84e-3, 5.68e-3, 11.0e-3, 22e-3, 44e-3, 88e-3, 177e-3, 355e-3, 0.71, 1.42] @test all(ubands .≈ ubands_expected) cbands = ApproximateOctaveCenterBands(2.2*scaler, 30.5e3*scaler, scaler) @test cbands.bstart == 1 @test cbands.bend == 15 lbands = ApproximateOctaveLowerBands(2.2*scaler, 30.5e3*scaler, scaler) @test lbands.bstart == 1 @test lbands.bend == 15 ubands = ApproximateOctaveUpperBands(2.2*scaler, 30.5e3*scaler, scaler) @test ubands.bstart == 1 @test ubands.bend == 15 cbands = ApproximateOctaveCenterBands(23.0e-6*scaler, 2.8e-3*scaler, scaler) @test cbands.bstart == -15 @test cbands.bend == -9 lbands = ApproximateOctaveLowerBands(23.0e-6*scaler, 2.8e-3*scaler, scaler) @test lbands.bstart == -15 @test lbands.bend == -9 ubands = ApproximateOctaveUpperBands(23.0e-6*scaler, 2.8e-3*scaler, scaler) @test ubands.bstart == -15 @test ubands.bend == -9 end end @testset "spectrum, normal case" begin freq_min_nb = 55.0 freq_max_nb = 1950.0 df_nb = 2.0 f_nb = freq_min_nb:df_nb:freq_max_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, psd) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) lbands = lower_bands(pbs) cbands = center_bands(pbs) ubands = upper_bands(pbs) # So, the narrowband frequency range is from 55 - 0.5*2 = 54 to 1950 + 0.5*2 = 1951.0 # So we should be using bands 6 to 11. @test band_start(cbands) == 6 @test band_end(cbands) == 11 # Now, let's add up what each band's answer should be. # Do I need to worry about the min and max stuff? # I know that I've picked bands that fully cover the input PSD frequency. # But, say, for the first band, it is possible that the lower edge of the first band is much lower than the lower edge of the first proportional band. # So I do need to do that. # Similar for the last band. # But I don't think it's necessary for the inner ones. for (lband, uband, pbs_b) in zip(lbands, ubands, pbs) istart = searchsortedfirst(f_nb .+ 0.5*df_nb, lband) res_first = psd[istart]*(min(uband, f_nb[istart] + 0.5*df_nb) - max(lband, f_nb[istart] - 0.5*df_nb)) iend = searchsortedfirst(f_nb .+ 0.5*df_nb, uband) if iend > lastindex(f_nb) iend = lastindex(f_nb) end if iend == istart res_last = zero(eltype(psd)) else res_last = psd[iend]*(min(uband, f_nb[iend] + 0.5*df_nb) - max(lband, f_nb[iend] - 0.5*df_nb)) end res = res_first + sum(psd[istart+1:iend-1].*df_nb) + res_last @test pbs_b ≈ res end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, check that the `scaler` argument works. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, psd, scaler) # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp, scaler) msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the original PBS multipiled by `scaler`. @test all(pbs_scaled./scaler .≈ pbs) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs)) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_scaled_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs_scaled)), center_bands(pbs_scaled)[begin], pbs_scaled, freq_scaler(pbs_scaled)) @test all(pbs_scaled_non_lazy .≈ pbs_scaled) @test lower_bands(pbs_scaled_non_lazy) === lower_bands(pbs_scaled) @test center_bands(pbs_scaled_non_lazy) === center_bands(pbs_scaled) @test upper_bands(pbs_scaled_non_lazy) === upper_bands(pbs_scaled) end # Now, for the tonal stuff. scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, msp, scaler, tonal) # Narrowband frequencies go from 55 Hz to 1950 Hz, so check that. cbands = center_bands(pbs_tonal) @test band_start(cbands) == 6 @test band_end(cbands) == 11 # Now make sure we get the right answer. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband; lt=<=) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) # Now for the scaler stuff, can use the same trick for the non-tonal. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler, tonal) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs_tonal) # And the band frequencies should all be scaled. @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs_tonal)) end end @testset "spectrum, lowest narrowband on a right edge" begin freq_min_nb = 87.0 freq_max_nb = 1950.0 df_nb = 2.0 f_nb = freq_min_nb:df_nb:freq_max_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, psd) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) lbands = lower_bands(pbs) cbands = center_bands(pbs) ubands = upper_bands(pbs) # So, the narrowband frequency range is from 87 - 0.5*2 = 86 to 1950 + 0.5*2 = 1951.0 # So we should be using bands 6 to 11. @test band_start(cbands) == 6 @test band_end(cbands) == 11 @test ubands[1] ≈ freq_min_nb + 0.5*df_nb for (lband, uband, pbs_b) in zip(lbands, ubands, pbs) istart = searchsortedfirst(f_nb .+ 0.5*df_nb, lband) res_first = psd[istart]*(min(uband, f_nb[istart] + 0.5*df_nb) - max(lband, f_nb[istart] - 0.5*df_nb)) iend = searchsortedfirst(f_nb .+ 0.5*df_nb, uband) if iend > lastindex(f_nb) iend = lastindex(f_nb) end if iend == istart res_last = zero(eltype(psd)) else res_last = psd[iend]*(min(uband, f_nb[iend] + 0.5*df_nb) - max(lband, f_nb[iend] - 0.5*df_nb)) end res = res_first + sum(psd[istart+1:iend-1].*df_nb) + res_last @test pbs_b ≈ res end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, check that the `scaler` argument works. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, psd, scaler) # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp, scaler) msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the original PBS multipiled by `scaler`. @test all(pbs_scaled./scaler .≈ pbs) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs)) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_scaled_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs_scaled)), center_bands(pbs_scaled)[begin], pbs_scaled, freq_scaler(pbs_scaled)) @test all(pbs_scaled_non_lazy .≈ pbs_scaled) @test lower_bands(pbs_scaled_non_lazy) === lower_bands(pbs_scaled) @test center_bands(pbs_scaled_non_lazy) === center_bands(pbs_scaled) @test upper_bands(pbs_scaled_non_lazy) === upper_bands(pbs_scaled) end # Now, for the tonal stuff. scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, msp, scaler, tonal) # Narrowband frequencies go from 87 Hz to 1950 Hz, so check that. cbands = center_bands(pbs_tonal) @test band_start(cbands) == 6 @test band_end(cbands) == 11 # Now make sure we get the right answer. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband; lt=<=) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) # Now for the scaler stuff, can use the same trick for the non-tonal. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler, tonal) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs_tonal) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs_tonal)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs_tonal)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs_tonal)) end end @testset "spectrum, lowest narrowband on a left edge" begin freq_min_nb = 89.0 freq_max_nb = 1950.0 df_nb = 2.0 f_nb = freq_min_nb:df_nb:freq_max_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, psd) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) lbands = lower_bands(pbs) cbands = center_bands(pbs) ubands = upper_bands(pbs) # So, the narrowband frequency range is from 89 - 0.5*2 = 88 to 1950 + 0.5*2 = 1951.0 # So we should be using bands 7 to 11. # But because of floating point roundoff, the code actually picks 6 as the starting band. @test band_start(cbands) == 6 @test band_end(cbands) == 11 @test lbands[2] ≈ freq_min_nb - 0.5*df_nb for (lband, uband, pbs_b) in zip(lbands, ubands, pbs) istart = searchsortedfirst(f_nb .+ 0.5*df_nb, lband) res_first = psd[istart]*(min(uband, f_nb[istart] + 0.5*df_nb) - max(lband, f_nb[istart] - 0.5*df_nb)) iend = searchsortedfirst(f_nb .+ 0.5*df_nb, uband) if iend > lastindex(f_nb) iend = lastindex(f_nb) end if iend == istart res_last = zero(eltype(psd)) else res_last = psd[iend]*(min(uband, f_nb[iend] + 0.5*df_nb) - max(lband, f_nb[iend] - 0.5*df_nb)) end res = res_first + sum(psd[istart+1:iend-1].*df_nb) + res_last @test pbs_b ≈ res end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, check that the `scaler` argument works. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, psd, scaler) # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp, scaler) msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the original PBS multipiled by `scaler`. @test all(pbs_scaled./scaler .≈ pbs) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs)) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_scaled_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs_scaled)), center_bands(pbs_scaled)[begin], pbs_scaled, freq_scaler(pbs_scaled)) @test all(pbs_scaled_non_lazy .≈ pbs_scaled) @test lower_bands(pbs_scaled_non_lazy) === lower_bands(pbs_scaled) @test center_bands(pbs_scaled_non_lazy) === center_bands(pbs_scaled) @test upper_bands(pbs_scaled_non_lazy) === upper_bands(pbs_scaled) end # Now, for the tonal stuff. scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, msp, scaler, tonal) # Narrowband frequencies go from 89 Hz to 1950 Hz, so check that. cbands = center_bands(pbs_tonal) @test band_start(cbands) == 7 @test band_end(cbands) == 11 # Now make sure we get the right answer. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband; lt=<=) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) # Now for the scaler stuff, can use the same trick for the non-tonal. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler, tonal) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs_tonal) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs_tonal)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs_tonal)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs_tonal)) end end @testset "spectrum, highest narrowband on a left edge" begin freq_min_nb = 55.0 freq_max_nb = 1421.0 df_nb = 2.0 f_nb = freq_min_nb:df_nb:freq_max_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, psd) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) lbands = lower_bands(pbs) cbands = center_bands(pbs) ubands = upper_bands(pbs) # So, the narrowband frequency range is from 55 - 0.5*2 = 54 to 1421.0 + 0.5*2 = 1422.0 # So we should be using bands 6 to 11. @test band_start(cbands) == 6 @test band_end(cbands) == 11 @test lbands[end] ≈ freq_max_nb - 0.5*df_nb # Now, let's add up what each band's answer should be. # Do I need to worry about the min and max stuff? # I know that I've picked bands that fully cover the input PSD frequency. # But, say, for the first band, it is possible that the lower edge of the first band is much lower than the lower edge of the first proportional band. # So I do need to do that. # Similar for the last band. # But I don't think it's necessary for the inner ones. for (lband, uband, pbs_b) in zip(lbands, ubands, pbs) istart = searchsortedfirst(f_nb .+ 0.5*df_nb, lband) res_first = psd[istart]*(min(uband, f_nb[istart] + 0.5*df_nb) - max(lband, f_nb[istart] - 0.5*df_nb)) iend = searchsortedfirst(f_nb .+ 0.5*df_nb, uband) if iend > lastindex(f_nb) iend = lastindex(f_nb) end if iend == istart res_last = zero(eltype(psd)) else res_last = psd[iend]*(min(uband, f_nb[iend] + 0.5*df_nb) - max(lband, f_nb[iend] - 0.5*df_nb)) end res = res_first + sum(psd[istart+1:iend-1].*df_nb) + res_last @test pbs_b ≈ res end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, check that the `scaler` argument works. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, psd, scaler) # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp, scaler) msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the original PBS multipiled by `scaler`. @test all(pbs_scaled./scaler .≈ pbs) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs)) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_scaled_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs_scaled)), center_bands(pbs_scaled)[begin], pbs_scaled, freq_scaler(pbs_scaled)) @test all(pbs_scaled_non_lazy .≈ pbs_scaled) @test lower_bands(pbs_scaled_non_lazy) === lower_bands(pbs_scaled) @test center_bands(pbs_scaled_non_lazy) === center_bands(pbs_scaled) @test upper_bands(pbs_scaled_non_lazy) === upper_bands(pbs_scaled) end # Now, for the tonal stuff. scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, msp, scaler, tonal) # Narrowband frequencies go from 55 Hz to 1421 Hz, so check that. cbands = center_bands(pbs_tonal) @test band_start(cbands) == 6 @test band_end(cbands) == 11 # Now make sure we get the right answer. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband; lt=<=) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) # Now for the scaler stuff, can use the same trick for the non-tonal. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler, tonal) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs_tonal) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs_tonal)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs_tonal)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs_tonal)) end end @testset "spectrum, highest narrowband on a right edge" begin freq_min_nb = 55.0 freq_max_nb = 1419.0 df_nb = 2.0 f_nb = freq_min_nb:df_nb:freq_max_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, psd) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) lbands = lower_bands(pbs) cbands = center_bands(pbs) ubands = upper_bands(pbs) # So, the narrowband frequency range is from 55 - 0.5*2 = 54 to 1419.0 + 0.5*2 = 1420.0 # So we should be using bands 6 to 10. @test band_start(cbands) == 6 @test band_end(cbands) == 10 @test ubands[end] ≈ freq_max_nb + 0.5*df_nb # Now, let's add up what each band's answer should be. # Do I need to worry about the min and max stuff? # I know that I've picked bands that fully cover the input PSD frequency. # But, say, for the first band, it is possible that the lower edge of the first band is much lower than the lower edge of the first proportional band. # So I do need to do that. # Similar for the last band. # But I don't think it's necessary for the inner ones. for (lband, uband, pbs_b) in zip(lbands, ubands, pbs) istart = searchsortedfirst(f_nb .+ 0.5*df_nb, lband) res_first = psd[istart]*(min(uband, f_nb[istart] + 0.5*df_nb) - max(lband, f_nb[istart] - 0.5*df_nb)) iend = searchsortedfirst(f_nb .+ 0.5*df_nb, uband) if iend > lastindex(f_nb) iend = lastindex(f_nb) end if iend == istart res_last = zero(eltype(psd)) else res_last = psd[iend]*(min(uband, f_nb[iend] + 0.5*df_nb) - max(lband, f_nb[iend] - 0.5*df_nb)) end res = res_first + sum(psd[istart+1:iend-1].*df_nb) + res_last @test pbs_b ≈ res end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, check that the `scaler` argument works. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, psd, scaler) # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp, scaler) msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the original PBS multipiled by `scaler`. @test all(pbs_scaled./scaler .≈ pbs) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs)) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_scaled_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs_scaled)), center_bands(pbs_scaled)[begin], pbs_scaled, freq_scaler(pbs_scaled)) @test all(pbs_scaled_non_lazy .≈ pbs_scaled) @test lower_bands(pbs_scaled_non_lazy) === lower_bands(pbs_scaled) @test center_bands(pbs_scaled_non_lazy) === center_bands(pbs_scaled) @test upper_bands(pbs_scaled_non_lazy) === upper_bands(pbs_scaled) end # Now, for the tonal stuff. scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb, df_nb, msp, scaler, tonal) # Narrowband frequencies go from 55 Hz to 1419 Hz, so check that. cbands = center_bands(pbs_tonal) @test band_start(cbands) == 6 @test band_end(cbands) == 10 # Now make sure we get the right answer. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband; lt=<=) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) # Now for the scaler stuff, can use the same trick for the non-tonal. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler, tonal) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs_tonal) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs_tonal)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs_tonal)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs_tonal)) end end end @testset "approximate 1/3rd octave" begin @testset "standard" begin cbands = ApproximateThirdOctaveCenterBands(0, 30) cbands_expected = [1.0, 1.25, 1.6, 2.0, 2.5, 3.15, 4.0, 5.0, 6.3, 8.0, 1.0e1, 1.25e1, 1.6e1, 2.0e1, 2.5e1, 3.15e1, 4.0e1, 5.0e1, 6.3e1, 8.0e1, 1.0e2, 1.25e2, 1.6e2, 2.0e2, 2.5e2, 3.15e2, 4.0e2, 5.0e2, 6.3e2, 8.0e2, 1.0e3] @test all(cbands .≈ cbands_expected) # Test the `cband_number` routine, which goes from an approximate 3rd-octave centerband frequency to the band number. for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end lbands = ApproximateThirdOctaveLowerBands(0, 30) lbands_expected = [0.9, 1.12, 1.4, 1.8, 2.24, 2.8, 3.35, 4.5, 5.6, 7.1, 0.9e1, 1.12e1, 1.4e1, 1.8e1, 2.24e1, 2.8e1, 3.35e1, 4.5e1, 5.6e1, 7.1e1, 0.9e2, 1.12e2, 1.4e2, 1.8e2, 2.24e2, 2.8e2, 3.35e2, 4.5e2, 5.6e2, 7.1e2, 0.9e3] @test all(lbands .≈ lbands_expected) ubands = ApproximateThirdOctaveUpperBands(0, 30) ubands_expected = [1.12, 1.4, 1.8, 2.24, 2.8, 3.35, 4.5, 5.6, 7.1, 0.9e1, 1.12e1, 1.4e1, 1.8e1, 2.24e1, 2.8e1, 3.35e1, 4.5e1, 5.6e1, 7.1e1, 0.9e2, 1.12e2, 1.4e2, 1.8e2, 2.24e2, 2.8e2, 3.35e2, 4.5e2, 5.6e2, 7.1e2, 0.9e3, 1.12e3] @test all(ubands .≈ ubands_expected) cbands = ApproximateThirdOctaveCenterBands(-30, 0) cbands_expected = [1.0e-3, 1.25e-3, 1.6e-3, 2.0e-3, 2.5e-3, 3.15e-3, 4.0e-3, 5.0e-3, 6.3e-3, 8.0e-3, 1.0e-2, 1.25e-2, 1.6e-2, 2.0e-2, 2.5e-2, 3.15e-2, 4.0e-2, 5.0e-2, 6.3e-2, 8.0e-2, 1.0e-1, 1.25e-1, 1.6e-1, 2.0e-1, 2.5e-1, 3.15e-1, 4.0e-1, 5.0e-1, 6.3e-1, 8.0e-1, 1.0] @test all(cbands .≈ cbands_expected) for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end lbands = ApproximateThirdOctaveLowerBands(-30, 0) lbands_expected = [0.9e-3, 1.12e-3, 1.4e-3, 1.8e-3, 2.24e-3, 2.8e-3, 3.35e-3, 4.5e-3, 5.6e-3, 7.1e-3, 0.9e-2, 1.12e-2, 1.4e-2, 1.8e-2, 2.24e-2, 2.8e-2, 3.35e-2, 4.5e-2, 5.6e-2, 7.1e-2, 0.9e-1, 1.12e-1, 1.4e-1, 1.8e-1, 2.24e-1, 2.8e-1, 3.35e-1, 4.5e-1, 5.6e-1, 7.1e-1, 0.9] @test all(lbands .≈ lbands_expected) ubands = ApproximateThirdOctaveUpperBands(-30, 0) ubands_expected = [1.12e-3, 1.4e-3, 1.8e-3, 2.24e-3, 2.8e-3, 3.35e-3, 4.5e-3, 5.6e-3, 7.1e-3, 0.9e-2, 1.12e-2, 1.4e-2, 1.8e-2, 2.24e-2, 2.8e-2, 3.35e-2, 4.5e-2, 5.6e-2, 7.1e-2, 0.9e-1, 1.12e-1, 1.4e-1, 1.8e-1, 2.24e-1, 2.8e-1, 3.35e-1, 4.5e-1, 5.6e-1, 7.1e-1, 0.9, 1.12] @test all(ubands .≈ ubands_expected) end @testset "scaler argument" begin for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] cbands = ApproximateThirdOctaveCenterBands(0, 30, scaler) cbands_expected = scaler .* [1.0, 1.25, 1.6, 2.0, 2.5, 3.15, 4.0, 5.0, 6.3, 8.0, 1.0e1, 1.25e1, 1.6e1, 2.0e1, 2.5e1, 3.15e1, 4.0e1, 5.0e1, 6.3e1, 8.0e1, 1.0e2, 1.25e2, 1.6e2, 2.0e2, 2.5e2, 3.15e2, 4.0e2, 5.0e2, 6.3e2, 8.0e2, 1.0e3] @test all(cbands .≈ cbands_expected) # Test the `cband_number` routine, which goes from an approximate 3rd-octave centerband frequency to the band number. for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end lbands = ApproximateThirdOctaveLowerBands(0, 30, scaler) lbands_expected = scaler .* [0.9, 1.12, 1.4, 1.8, 2.24, 2.8, 3.35, 4.5, 5.6, 7.1, 0.9e1, 1.12e1, 1.4e1, 1.8e1, 2.24e1, 2.8e1, 3.35e1, 4.5e1, 5.6e1, 7.1e1, 0.9e2, 1.12e2, 1.4e2, 1.8e2, 2.24e2, 2.8e2, 3.35e2, 4.5e2, 5.6e2, 7.1e2, 0.9e3] @test all(lbands .≈ lbands_expected) ubands = ApproximateThirdOctaveUpperBands(0, 30, scaler) ubands_expected = scaler .* [1.12, 1.4, 1.8, 2.24, 2.8, 3.35, 4.5, 5.6, 7.1, 0.9e1, 1.12e1, 1.4e1, 1.8e1, 2.24e1, 2.8e1, 3.35e1, 4.5e1, 5.6e1, 7.1e1, 0.9e2, 1.12e2, 1.4e2, 1.8e2, 2.24e2, 2.8e2, 3.35e2, 4.5e2, 5.6e2, 7.1e2, 0.9e3, 1.12e3] @test all(ubands .≈ ubands_expected) cbands = ApproximateThirdOctaveCenterBands(-30, 0, scaler) cbands_expected = scaler .* [1.0e-3, 1.25e-3, 1.6e-3, 2.0e-3, 2.5e-3, 3.15e-3, 4.0e-3, 5.0e-3, 6.3e-3, 8.0e-3, 1.0e-2, 1.25e-2, 1.6e-2, 2.0e-2, 2.5e-2, 3.15e-2, 4.0e-2, 5.0e-2, 6.3e-2, 8.0e-2, 1.0e-1, 1.25e-1, 1.6e-1, 2.0e-1, 2.5e-1, 3.15e-1, 4.0e-1, 5.0e-1, 6.3e-1, 8.0e-1, 1.0] @test all(cbands .≈ cbands_expected) for (i, cband) in enumerate(cbands) @test cband_number(cbands, cband) == (band_start(cbands) + i - 1) end lbands = ApproximateThirdOctaveLowerBands(-30, 0, scaler) lbands_expected = scaler .* [0.9e-3, 1.12e-3, 1.4e-3, 1.8e-3, 2.24e-3, 2.8e-3, 3.35e-3, 4.5e-3, 5.6e-3, 7.1e-3, 0.9e-2, 1.12e-2, 1.4e-2, 1.8e-2, 2.24e-2, 2.8e-2, 3.35e-2, 4.5e-2, 5.6e-2, 7.1e-2, 0.9e-1, 1.12e-1, 1.4e-1, 1.8e-1, 2.24e-1, 2.8e-1, 3.35e-1, 4.5e-1, 5.6e-1, 7.1e-1, 0.9] @test all(lbands .≈ lbands_expected) ubands = ApproximateThirdOctaveUpperBands(-30, 0, scaler) ubands_expected = scaler .* [1.12e-3, 1.4e-3, 1.8e-3, 2.24e-3, 2.8e-3, 3.35e-3, 4.5e-3, 5.6e-3, 7.1e-3, 0.9e-2, 1.12e-2, 1.4e-2, 1.8e-2, 2.24e-2, 2.8e-2, 3.35e-2, 4.5e-2, 5.6e-2, 7.1e-2, 0.9e-1, 1.12e-1, 1.4e-1, 1.8e-1, 2.24e-1, 2.8e-1, 3.35e-1, 4.5e-1, 5.6e-1, 7.1e-1, 0.9, 1.12] @test all(ubands .≈ ubands_expected) end end @testset "spectrum, normal case" begin freq_min_nb = 50.0 # freq_max_nb = 1950.0 nfreq = 951 df_nb = 2.0 # f_nb = freq_min_nb:df_nb:freq_max_nb f_nb = freq_min_nb .+ (0:nfreq-1) .* df_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, psd) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) lbands = lower_bands(pbs) cbands = center_bands(pbs) ubands = upper_bands(pbs) # So, the narrowband frequency range is from 50 - 0.5*2 = 49 to 1950 + 0.5*2 = 1951.0 # So we should be using bands 17 to 33. @test band_start(cbands) == 17 @test band_end(cbands) == 33 for (lband, uband, pbs_b) in zip(lbands, ubands, pbs) istart = searchsortedfirst(f_nb .+ 0.5*df_nb, lband) res_first = psd[istart]*(min(uband, f_nb[istart] + 0.5*df_nb) - max(lband, f_nb[istart] - 0.5*df_nb)) iend = searchsortedfirst(f_nb .+ 0.5*df_nb, uband) if iend > lastindex(f_nb) iend = lastindex(f_nb) end if iend == istart res_last = zero(eltype(psd)) else res_last = psd[iend]*(min(uband, f_nb[iend] + 0.5*df_nb) - max(lband, f_nb[iend] - 0.5*df_nb)) end res = res_first + sum(psd[istart+1:iend-1].*df_nb) + res_last @test pbs_b ≈ res end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, check that the `scaler` argument works. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler # freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, psd, scaler) # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp, scaler) msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the original PBS multipiled by `scaler`. @test all(pbs_scaled./scaler .≈ pbs) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs)) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_scaled_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs_scaled)), center_bands(pbs_scaled)[begin], pbs_scaled, freq_scaler(pbs_scaled)) @test all(pbs_scaled_non_lazy .≈ pbs_scaled) @test lower_bands(pbs_scaled_non_lazy) === lower_bands(pbs_scaled) @test center_bands(pbs_scaled_non_lazy) === center_bands(pbs_scaled) @test upper_bands(pbs_scaled_non_lazy) === upper_bands(pbs_scaled) end # Now, for the tonal stuff. # Putting the narrowband frequencies on nice round numbers ends up being bad for the tonal case, since the tones can fall into different bands for different values of `scaler`, which leads to very different PBS values. # So tweak those a bit. freq_min_nb = 50.1 nfreq = 951 df_nb = 2.0 f_nb = freq_min_nb .+ (0:nfreq-1) .* df_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, psd) msp = psd .* df_nb scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, msp, scaler, tonal) # Narrowband frequencies go from 50 Hz to 1950 Hz, so check that. cbands = center_bands(pbs_tonal) @test band_start(cbands) == 17 @test band_end(cbands) == 33 # Now make sure we get the right answer. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband; lt=<=) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) # Now for the scaler stuff, can use the same trick for the non-tonal. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler # freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler, tonal) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs_tonal) # And the band frequencies should all be scaled. @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs_tonal)) end end @testset "spectrum, lowest narrowband on a right edge" begin freq_min_nb = 55.0 freq_max_nb = 1950.0 df_nb = 2.0 f_nb = freq_min_nb:df_nb:freq_max_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, psd) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) lbands = lower_bands(pbs) cbands = center_bands(pbs) ubands = upper_bands(pbs) # So, the narrowband frequency range is from 50 - 0.5*2 = 49 to 1950 + 0.5*2 = 1951.0 # So we should be using bands 17 to 33. @test band_start(cbands) == 17 @test band_end(cbands) == 33 @test ubands[1] ≈ freq_min_nb + 0.5*df_nb # Now, let's add up what each band's answer should be. # Do I need to worry about the min and max stuff? # I know that I've picked bands that fully cover the input PSD frequency. # But, say, for the first band, it is possible that the lower edge of the first band is much lower than the lower edge of the first proportional band. # So I do need to do that. # Similar for the last band. # But I don't think it's necessary for the inner ones. # # Hmm... first PBS band isn't passing. # The first PBS band goes from 45 Hz to 56 Hz. # This PSD would have just one band there, centered on 55 Hz, extending from 54 to 56 Hz. # So, that bin width should be 2 Hz. # Looks like that's what I'm doing. # Oh, wait. # Maybe I'm adding it twice here? # Let's see... # `f_nb .+ 0.5*df_nb = [56, 57, 58, 59...]` # And the upper band edge for the first PBS is 56. # So is `iend` here 1, or 2? # `iend == 1`, same as `istart`. # So I bet that has something to do with it. # Yeah, looks like I'm adding this band twice, right? # Yep, so need to deal with that. # Fixed it. for (lband, uband, pbs_b) in zip(lbands, ubands, pbs) istart = searchsortedfirst(f_nb .+ 0.5*df_nb, lband) res_first = psd[istart]*(min(uband, f_nb[istart] + 0.5*df_nb) - max(lband, f_nb[istart] - 0.5*df_nb)) iend = searchsortedfirst(f_nb .+ 0.5*df_nb, uband) if iend > lastindex(f_nb) iend = lastindex(f_nb) end if iend == istart res_last = zero(eltype(psd)) else res_last = psd[iend]*(min(uband, f_nb[iend] + 0.5*df_nb) - max(lband, f_nb[iend] - 0.5*df_nb)) end res = res_first + sum(psd[istart+1:iend-1].*df_nb) + res_last @test pbs_b ≈ res end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, check that the `scaler` argument works. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, psd, scaler) # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp, scaler) msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the original PBS multipiled by `scaler`. @test all(pbs_scaled./scaler .≈ pbs) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs)) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_scaled_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs_scaled)), center_bands(pbs_scaled)[begin], pbs_scaled, freq_scaler(pbs_scaled)) @test all(pbs_scaled_non_lazy .≈ pbs_scaled) @test lower_bands(pbs_scaled_non_lazy) === lower_bands(pbs_scaled) @test center_bands(pbs_scaled_non_lazy) === center_bands(pbs_scaled) @test upper_bands(pbs_scaled_non_lazy) === upper_bands(pbs_scaled) end # Now, for the tonal stuff. # Putting the narrowband frequencies on nice round numbers ends up being bad for the tonal case, since the tones can fall into different bands for different values of `scaler`, which leads to very different PBS values. # So tweak those a bit. freq_min_nb = 55.1 df_nb = 2.0 nfreq = length(f_nb) f_nb = freq_min_nb .+ (0:nfreq-1) .* df_nb psd = psd_func.(f_nb) msp = psd .* df_nb scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, msp, scaler, tonal) # Narrowband frequencies go from 55.1 Hz to 1949.1 Hz, so check that. cbands = center_bands(pbs_tonal) @test band_start(cbands) == 17 @test band_end(cbands) == 33 # Now make sure we get the right answer. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband; lt=<=) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) # Now for the scaler stuff, can use the same trick for the non-tonal. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler # freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler, tonal) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs_tonal) # And the band frequencies should all be scaled. @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs_tonal)) end end @testset "spectrum, lowest narrowband on a left edge" begin freq_min_nb = 57.0 freq_max_nb = 1950.0 df_nb = 2.0 f_nb = freq_min_nb:df_nb:freq_max_nb psd = psd_func.(f_nb) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) lbands = lower_bands(pbs) cbands = center_bands(pbs) ubands = upper_bands(pbs) # So, the narrowband frequency range is from 57 - 0.5*2 = 56 to 1950 + 0.5*2 = 1951.0 # So we should be using bands 18 to 33. # But, actually, because of numerical roundoff stuff, the code picks 17. @test band_start(cbands) == 17 @test band_end(cbands) == 33 # Because of floating point inaccuracy stuff, the code picks one proportional band lower than I think it should. @test lbands[2] ≈ freq_min_nb - 0.5*df_nb for (lband, uband, pbs_b) in zip(lbands, ubands, pbs) istart = searchsortedfirst(f_nb .+ 0.5*df_nb, lband) res_first = psd[istart]*(min(uband, f_nb[istart] + 0.5*df_nb) - max(lband, f_nb[istart] - 0.5*df_nb)) iend = searchsortedfirst(f_nb .+ 0.5*df_nb, uband) if iend > lastindex(f_nb) iend = lastindex(f_nb) end if iend == istart res_last = zero(eltype(psd)) else res_last = psd[iend]*(min(uband, f_nb[iend] + 0.5*df_nb) - max(lband, f_nb[iend] - 0.5*df_nb)) end res = res_first + sum(psd[istart+1:iend-1].*df_nb) + res_last @test pbs_b ≈ res end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, check that the `scaler` argument works. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, psd, scaler) # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp, scaler) msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the original PBS multipiled by `scaler`. if length(pbs_scaled) == length(pbs) @test all(pbs_scaled./scaler .≈ pbs) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs)) else # But because of numerical stuff, the unscaled test above grabbed a lower frequency band that it shouldn't have or whatever, so we have to skip that one. # And test that the "extra" band at the beginning is essentially zero. @test pbs[1] ≈ 0 @test all(pbs_scaled./scaler .≈ pbs[2:end]) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs)[2:end]) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)[2:end]) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs)[2:end]) end # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_scaled_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs_scaled)), center_bands(pbs_scaled)[begin], pbs_scaled, freq_scaler(pbs_scaled)) @test all(pbs_scaled_non_lazy .≈ pbs_scaled) @test lower_bands(pbs_scaled_non_lazy) === lower_bands(pbs_scaled) @test center_bands(pbs_scaled_non_lazy) === center_bands(pbs_scaled) @test upper_bands(pbs_scaled_non_lazy) === upper_bands(pbs_scaled) end # Now, for the tonal stuff. # Putting the narrowband frequencies on nice round numbers ends up being bad for the tonal case, since the tones can fall into different bands for different values of `scaler`, which leads to very different PBS values. # So tweak those a bit. freq_min_nb = 57.1 df_nb = 2.0 nfreq = 947 f_nb = freq_min_nb .+ (0:nfreq-1) .* df_nb psd = psd_func.(f_nb) msp = psd .* df_nb scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, msp, scaler, tonal) # Narrowband frequencies go from 57.1 Hz to 1949.1 Hz, so check that. cbands = center_bands(pbs_tonal) @test band_start(cbands) == 18 @test band_end(cbands) == 33 # Now make sure we get the right answer. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband; lt=<=) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) # Now for the scaler stuff, can use the same trick for the non-tonal. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler # freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler, tonal) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs_tonal) # And the band frequencies should all be scaled. @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs_tonal)) end end @testset "spectrum, highest narrowband on a right edge" begin freq_min_nb = 50.0 freq_max_nb = 1799.0 df_nb = 2.0 f_nb = freq_min_nb:df_nb:freq_max_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, psd) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) lbands = lower_bands(pbs) cbands = center_bands(pbs) ubands = upper_bands(pbs) # So, the narrowband frequency range is from 50 - 0.5*2 = 49 to 1799 + 0.5*2 = 1800.0 # So we should be using bands 17 to 32. @test band_start(cbands) == 17 @test band_end(cbands) == 32 @test ubands[end] ≈ freq_max_nb + 0.5*df_nb for (lband, uband, pbs_b) in zip(lbands, ubands, pbs) istart = searchsortedfirst(f_nb .+ 0.5*df_nb, lband) res_first = psd[istart]*(min(uband, f_nb[istart] + 0.5*df_nb) - max(lband, f_nb[istart] - 0.5*df_nb)) iend = searchsortedfirst(f_nb .+ 0.5*df_nb, uband) if iend > lastindex(f_nb) iend = lastindex(f_nb) end if iend == istart res_last = zero(eltype(psd)) else res_last = psd[iend]*(min(uband, f_nb[iend] + 0.5*df_nb) - max(lband, f_nb[iend] - 0.5*df_nb)) end res = res_first + sum(psd[istart+1:iend-1].*df_nb) + res_last @test pbs_b ≈ res end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, check that the `scaler` argument works. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, psd, scaler) # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp, scaler) msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the original PBS multipiled by `scaler`. @test all(pbs_scaled./scaler .≈ pbs) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs)) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_scaled_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs_scaled)), center_bands(pbs_scaled)[begin], pbs_scaled, freq_scaler(pbs_scaled)) @test all(pbs_scaled_non_lazy .≈ pbs_scaled) @test lower_bands(pbs_scaled_non_lazy) === lower_bands(pbs_scaled) @test center_bands(pbs_scaled_non_lazy) === center_bands(pbs_scaled) @test upper_bands(pbs_scaled_non_lazy) === upper_bands(pbs_scaled) end # Now, for the tonal stuff. # Putting the narrowband frequencies on nice round numbers ends up being bad for the tonal case, since the tones can fall into different bands for different values of `scaler`, which leads to very different PBS values. # So tweak those a bit. freq_min_nb = 50.1 df_nb = 2.0 nfreq = length(f_nb) f_nb = freq_min_nb .+ (0:nfreq-1) .* df_nb psd = psd_func.(f_nb) msp = psd .* df_nb scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, msp, scaler, tonal) # Narrowband frequencies go from 50.1 Hz to 1798.1 Hz, so check that. cbands = center_bands(pbs_tonal) @test band_start(cbands) == 17 @test band_end(cbands) == 32 # Now make sure we get the right answer. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband; lt=<=) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) # Now for the scaler stuff, can use the same trick for the non-tonal. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler # freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler, tonal) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs_tonal) # And the band frequencies should all be scaled. @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs_tonal)) end end @testset "spectrum, highest narrowband on a left edge" begin freq_min_nb = 50.0 freq_max_nb = 1801.0 df_nb = 2.0 f_nb = freq_min_nb:df_nb:freq_max_nb psd = psd_func.(f_nb) # pbs = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, psd) msp = psd .* df_nb pbs = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, msp) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs)), center_bands(pbs)[begin], pbs) @test all(pbs_non_lazy .≈ pbs) @test lower_bands(pbs_non_lazy) === lower_bands(pbs) @test center_bands(pbs_non_lazy) === center_bands(pbs) @test upper_bands(pbs_non_lazy) === upper_bands(pbs) lbands = lower_bands(pbs) cbands = center_bands(pbs) ubands = upper_bands(pbs) # So, the narrowband frequency range is from 50 - 0.5*2 = 49 to 1801 + 0.5*2 = 1802.0 # So we should be using bands 17 to 33. @test band_start(cbands) == 17 @test band_end(cbands) == 33 @test lbands[end] ≈ freq_max_nb - 0.5*df_nb for (lband, uband, pbs_b) in zip(lbands, ubands, pbs) istart = searchsortedfirst(f_nb .+ 0.5*df_nb, lband) res_first = psd[istart]*(min(uband, f_nb[istart] + 0.5*df_nb) - max(lband, f_nb[istart] - 0.5*df_nb)) iend = searchsortedfirst(f_nb .+ 0.5*df_nb, uband) if iend > lastindex(f_nb) iend = lastindex(f_nb) end if iend == istart res_last = zero(eltype(psd)) else res_last = psd[iend]*(min(uband, f_nb[iend] + 0.5*df_nb) - max(lband, f_nb[iend] - 0.5*df_nb)) end res = res_first + sum(psd[istart+1:iend-1].*df_nb) + res_last @test pbs_b ≈ res end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs)) @test all(pbs_init_cbands .≈ pbs) # Now, check that the `scaler` argument works. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, psd, scaler) # pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp, scaler) msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the original PBS multipiled by `scaler`. @test all(pbs_scaled./scaler .≈ pbs) # And the band frequencies should all be scaled. @test all(lower_bands(pbs_scaled)./scaler .≈ lower_bands(pbs)) @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs)) @test all(upper_bands(pbs_scaled)./scaler .≈ upper_bands(pbs)) # Creating a non-lazy version of the PBS should give the same stuff as the lazy version. pbs_scaled_non_lazy = ProportionalBandSpectrum(typeof(center_bands(pbs_scaled)), center_bands(pbs_scaled)[begin], pbs_scaled, freq_scaler(pbs_scaled)) @test all(pbs_scaled_non_lazy .≈ pbs_scaled) @test lower_bands(pbs_scaled_non_lazy) === lower_bands(pbs_scaled) @test center_bands(pbs_scaled_non_lazy) === center_bands(pbs_scaled) @test upper_bands(pbs_scaled_non_lazy) === upper_bands(pbs_scaled) end # Now, for the tonal stuff. # Putting the narrowband frequencies on nice round numbers ends up being bad for the tonal case, since the tones can fall into different bands for different values of `scaler`, which leads to very different PBS values. # So tweak those a bit. freq_min_nb = 50.1 df_nb = 2.0 nfreq = length(f_nb) f_nb = freq_min_nb .+ (0:nfreq-1) .* df_nb psd = psd_func.(f_nb) msp = psd .* df_nb scaler = 1 tonal = true pbs_tonal = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb, df_nb, msp, scaler, tonal) # Narrowband frequencies go from 50.1 Hz to 1800.1 Hz, so check that. cbands = center_bands(pbs_tonal) @test band_start(cbands) == 17 @test band_end(cbands) == 33 # Now make sure we get the right answer. lbands = lower_bands(pbs_tonal) ubands = upper_bands(pbs_tonal) for (lband, uband, amp) in zip(lbands, ubands, pbs_tonal) # First index we want in f_nb is the one that is greater than or equal to lband. istart = searchsortedfirst(f_nb, lband) # Last index we want in f_nb is th one that is less than or equal to uband. iend = searchsortedlast(f_nb, uband; lt=<=) # Now check that we get the right answer. @test sum(msp[istart:iend]) ≈ amp end # Make sure I get the same thing if I pass in an initialized proportional center band object. pbs_init_cbands = LazyNBProportionalBandSpectrum(freq_min_nb, df_nb, msp, center_bands(pbs_tonal), tonal) @test all(pbs_init_cbands .≈ pbs_tonal) # Now for the scaler stuff, can use the same trick for the non-tonal. for scaler in [0.1, 0.5, 1.0, 1.5, 2.0] freq_min_nb_scaled = freq_min_nb*scaler # freq_max_nb_scaled = freq_max_nb*scaler df_nb_scaled = df_nb*scaler msp_scaled = psd .* df_nb_scaled pbs_scaled = LazyNBProportionalBandSpectrum(ApproximateThirdOctaveBands, freq_min_nb_scaled, df_nb_scaled, msp_scaled, scaler, tonal) # We've changed the frequencies, but not the PSD, so the scaled PBS should be the same as the original as long as we account for the different frequency bin widths via the `scaler`. @test all(pbs_scaled./scaler .≈ pbs_tonal) # And the band frequencies should all be scaled. @test all(center_bands(pbs_scaled)./scaler .≈ center_bands(pbs_tonal)) end end end @testset "lazy PBS ProportionalBandSpectrum" begin @testset "same bands" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) cbands2 = TPB{ :center}(10, 16) pbs2 = LazyPBSProportionalBandSpectrum(pbs1, cbands2) @test all(pbs2 .≈ pbs1) end end @testset "shift bands by whole indices" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) cbands2 = TPB{:center}(9, 17) pbs2 = LazyPBSProportionalBandSpectrum(pbs1, cbands2) @test pbs2[begin] ≈ 0 @test all(pbs2[begin+1:end-1] .≈ pbs1) @test pbs2[end] ≈ 0 end end @testset "shift bands up by non-whole indices" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) scaler2 = 1.01 cbands2 = TPB{:center}(10, 16, scaler2) pbs2 = LazyPBSProportionalBandSpectrum(pbs1, cbands2) lbands1 = lower_bands(pbs1) ubands1 = upper_bands(pbs1) lbands2 = lower_bands(pbs2) ubands2 = upper_bands(pbs2) for i in 1:length(pbs1) if i < length(pbs1) amp2_left = pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - lbands2[i]) amp2_right = pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(ubands2[i] - lbands1[i+1]) amp2_check = amp2_left + amp2_right else amp2_check = pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - lbands2[i]) end @test pbs2[i] ≈ amp2_check end end end @testset "shift bands down by non-whole indices" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) scaler2 = 0.99 cbands2 = TPB{:center}(10, 16, scaler2) pbs2 = LazyPBSProportionalBandSpectrum(pbs1, cbands2) lbands1 = lower_bands(pbs1) ubands1 = upper_bands(pbs1) lbands2 = lower_bands(pbs2) ubands2 = upper_bands(pbs2) for i in 1:length(pbs1) if i > 1 amp2_left = pbs1[i-1]/(ubands1[i-1] - lbands1[i-1])*(ubands1[i-1] - lbands2[i]) amp2_right = pbs1[i]/(ubands1[i] - lbands1[i])*(ubands2[i] - lbands1[i]) amp2_check = amp2_left + amp2_right else amp2_check = pbs1[i]/(ubands1[i] - lbands1[i])*(ubands2[i] - lbands1[i]) end @test pbs2[i] ≈ amp2_check end end end @testset "output bands too low" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) cbands2 = TPB{:center}(1, 9) pbs2 = LazyPBSProportionalBandSpectrum(pbs1, cbands2) @test all(pbs2 .≈ 0) end end @testset "output bands too high" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) cbands2 = TPB{:center}(17, 20) pbs2 = LazyPBSProportionalBandSpectrum(pbs1, cbands2) @test all(pbs2 .≈ 0) end end @testset "input 3rd-octave, output octave, aligned" begin cbands1 = ExactProportionalBands{3,:center}(32, 49) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) pbs2 = LazyPBSProportionalBandSpectrum(ExactProportionalBands{1}, pbs1) cbands2 = center_bands(pbs2) @test band_start(cbands2) == 11 @test band_end(cbands2) == 16 for i in 1:length(pbs2) j = (i-1)*3 + 1 @test pbs2[i] ≈ sum(pbs1[j:j+2]) end end @testset "input octave, output 3rd-octave, aligned" begin cbands1 = ExactProportionalBands{1,:center}(11, 16) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) pbs2 = LazyPBSProportionalBandSpectrum(ExactProportionalBands{3}, pbs1) cbands2 = center_bands(pbs2) @test band_start(cbands2) == 32 @test band_end(cbands2) == 49 lbands1 = lower_bands(pbs1) ubands1 = upper_bands(pbs1) lbands2 = lower_bands(pbs2) ubands2 = upper_bands(pbs2) for i in 1:length(pbs1) j = (i-1)*3 + 1 @test pbs2[j] ≈ pbs1[i]/(ubands1[i] - lbands1[i])*(ubands2[j] - lbands2[j]) @test pbs2[j+1] ≈ pbs1[i]/(ubands1[i] - lbands1[i])*(ubands2[j+1] - lbands2[j+1]) @test pbs2[j+2] ≈ pbs1[i]/(ubands1[i] - lbands1[i])*(ubands2[j+2] - lbands2[j+2]) end end @testset "input 3rd-octave, output octave, not aligned, scaled up" begin cbands1 = ExactProportionalBands{3,:center}(32, 49) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) cbands2 = ExactProportionalBands{1,:center}(11, 16, 1.01) # pbs2 = LazyPBSProportionalBandSpectrum(ExactProportionalBands{1}, pbs1, 1.01) pbs2 = LazyPBSProportionalBandSpectrum(pbs1, cbands2) lbands1 = lower_bands(pbs1) ubands1 = upper_bands(pbs1) lbands2 = lower_bands(pbs2) ubands2 = upper_bands(pbs2) for i in 1:length(pbs2) if i < length(pbs2) # | . . | . . | # | | | j = (i-1)*3 + 1 amp2_left = (pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - lbands2[i]) + pbs1[j+1] + pbs1[j+2]) j = (i)*3 + 1 amp2_right = pbs1[j]/(ubands1[j] - lbands1[j])*(ubands2[i] - lbands1[j]) amp2_check = amp2_left + amp2_right else j = (i-1)*3 + 1 amp2_check = (pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - lbands2[i]) + pbs1[j+1] + pbs1[j+2]) end @test pbs2[i] ≈ amp2_check end end @testset "input 3rd-octave, output octave, not aligned, scaled down" begin cbands1 = ExactProportionalBands{3,:center}(32, 49) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) cbands2 = ExactProportionalBands{1,:center}(11, 16, 0.99) pbs2 = LazyPBSProportionalBandSpectrum(pbs1, cbands2) lbands1 = lower_bands(pbs1) ubands1 = upper_bands(pbs1) lbands2 = lower_bands(pbs2) ubands2 = upper_bands(pbs2) for i in 1:length(pbs2) j = (i-1)*3 + 1 if i > 1 # | . . | . . | # | | | amp2_left = pbs1[j-1]/(ubands1[j-1] - lbands1[j-1])*(ubands1[j-1] - lbands2[i]) amp2_right = pbs1[j] + pbs1[j+1] + pbs1[j+2]/(ubands1[j+2] - lbands1[j+2])*(ubands2[i] - lbands1[j+2]) amp2_check = amp2_left + amp2_right else amp2_right = pbs1[j] + pbs1[j+1] + pbs1[j+2]/(ubands1[j+2] - lbands1[j+2])*(ubands2[i] - lbands1[j+2]) amp2_check = amp2_right end @test pbs2[i] ≈ amp2_check end end @testset "input octave, output 3rd-octave, not aligned, scaled up" begin cbands1 = ExactProportionalBands{1,:center}(11, 16) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) cbands2 = ExactProportionalBands{3, :center}(32, 49, 1.01) pbs2 = LazyPBSProportionalBandSpectrum(pbs1, cbands2) lbands1 = lower_bands(pbs1) ubands1 = upper_bands(pbs1) lbands2 = lower_bands(pbs2) ubands2 = upper_bands(pbs2) for i in 1:length(pbs1) # | | | # | . . | . . | j = 3*(i - 1) + 1 @test pbs2[j] ≈ pbs1[i]/(ubands1[i] - lbands1[i])*(ubands2[j] - lbands2[j]) @test pbs2[j+1] ≈ pbs1[i]/(ubands1[i] - lbands1[i])*(ubands2[j+1] - lbands2[j+1]) if i < length(pbs1) @test pbs2[j+2] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - lbands2[j+2]) + pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(ubands2[j+2] - lbands1[i+1])) else @test pbs2[j+2] ≈ pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - lbands2[j+2]) end end end @testset "input octave, output 3rd-octave, not aligned, scaled down" begin cbands1 = ExactProportionalBands{1,:center}(11, 16) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) cbands2 = ExactProportionalBands{3, :center}(32, 49, 0.99) pbs2 = LazyPBSProportionalBandSpectrum(pbs1, cbands2) lbands1 = lower_bands(pbs1) ubands1 = upper_bands(pbs1) lbands2 = lower_bands(pbs2) ubands2 = upper_bands(pbs2) for i in 1:length(pbs1) # | | | # | . . | . . | j = 3*(i - 1) + 1 if i > 1 @test pbs2[j] ≈ ( pbs1[i-1]/(ubands1[i-1] - lbands1[i-1])*(ubands1[i-1] - lbands2[j]) + pbs1[i]/(ubands1[i] - lbands1[i])*(ubands2[j] - lbands1[i])) else @test pbs2[j] ≈ pbs1[i]/(ubands1[i] - lbands1[i])*(ubands2[j] - lbands1[i]) end @test pbs2[j+1] ≈ pbs1[i]/(ubands1[i] - lbands1[i])*(ubands2[j+1] - lbands2[j+1]) @test pbs2[j+2] ≈ pbs1[i]/(ubands1[i] - lbands1[i])*(ubands2[j+2] - lbands2[j+2]) end end end @testset "combining proportional band spectrums" begin @testset "same bands" begin nfreq_nb = 800 freq_min_nb = 55.0 freq_max_nb = 1950.0 df_nb = (freq_max_nb - freq_min_nb)/(nfreq_nb - 1) f_nb = freq_min_nb .+ (0:(nfreq_nb-1)).*df_nb psd = psd_func.(f_nb) msp = psd .* df_nb for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] # pbs1_lazy = LazyNBProportionalBandSpectrum(TPB, freq_min_nb, df_nb, psd) pbs1_lazy = LazyNBProportionalBandSpectrum(TPB, freq_min_nb, df_nb, msp) pbs1 = ProportionalBandSpectrum(collect(pbs1_lazy), center_bands(pbs1_lazy)) pbs2 = ProportionalBandSpectrum(collect(pbs1_lazy), center_bands(pbs1_lazy)) pbs3 = ProportionalBandSpectrum(collect(pbs1_lazy), center_bands(pbs1_lazy)) # So, when we add these, the proportional band spectrum should be just 3 times whatever the original was, and all the bands should be the same. pbs_combined = combine([pbs1, pbs2, pbs3], center_bands(pbs1_lazy)) @test lower_bands(pbs_combined) == lower_bands(pbs1_lazy) @test center_bands(pbs_combined) == center_bands(pbs1_lazy) @test upper_bands(pbs_combined) == upper_bands(pbs1_lazy) @test all(pbs_combined .≈ (3 .* pbs1_lazy)) end end @testset "outbands lower than all inbands" begin nfreq_nb = 800 freq_min_nb = 55.0 freq_max_nb = 1950.0 df_nb = (freq_max_nb - freq_min_nb)/(nfreq_nb - 1) f_nb = freq_min_nb .+ (0:(nfreq_nb-1)).*df_nb psd = psd_func.(f_nb) msp = psd .* df_nb for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] # pbs1_lazy = LazyNBProportionalBandSpectrum(TPB, freq_min_nb, df_nb, psd) pbs1_lazy = LazyNBProportionalBandSpectrum(TPB, freq_min_nb, df_nb, msp) pbs1 = ProportionalBandSpectrum(collect(pbs1_lazy), center_bands(pbs1_lazy)) pbs2 = ProportionalBandSpectrum(collect(pbs1_lazy), center_bands(pbs1_lazy)) pbs3 = ProportionalBandSpectrum(collect(pbs1_lazy), center_bands(pbs1_lazy)) # outcbands = ExactProportionalBands{3, :center}(10, 16) outcbands = TPB{:center}(2.0, 10.0) # Make sure the outbands are actually all lower than the input narrowbands. @test last(upper_bands(outcbands)) < freq_min_nb - 0.5*df_nb pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) @test center_bands(pbs_combined) == outcbands @test all(pbs_combined .≈ 0) end end @testset "outbands higher than all inbands" begin nfreq_nb = 800 freq_min_nb = 55.0 freq_max_nb = 1950.0 df_nb = (freq_max_nb - freq_min_nb)/(nfreq_nb - 1) f_nb = freq_min_nb .+ (0:(nfreq_nb-1)).*df_nb psd = psd_func.(f_nb) msp = psd .* df_nb for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] pbs1_lazy = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, freq_min_nb, df_nb, msp) pbs1 = ProportionalBandSpectrum(collect(pbs1_lazy), center_bands(pbs1_lazy)) pbs2 = ProportionalBandSpectrum(collect(pbs1_lazy), center_bands(pbs1_lazy)) pbs3 = ProportionalBandSpectrum(collect(pbs1_lazy), center_bands(pbs1_lazy)) outcbands = TPB{:center}(3000.0, 20000.0) # Make sure the outbands are actually all higher than the input narrowbands. @test first(lower_bands(outcbands)) > freq_max_nb + 0.5*df_nb pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) @test center_bands(pbs_combined) == outcbands @test all(pbs_combined .≈ 0) end end @testset "inbands lined up with outbands" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) cbands2 = TPB{:center}(11, 16) pbs2 = ProportionalBandSpectrum(rand(length(cbands2)), cbands2) cbands3 = TPB{:center}(12, 16) pbs3 = ProportionalBandSpectrum(rand(length(cbands3)), cbands3) # outcbands = ExactProportionalBands{3, :center}(10, 16) outcbands = TPB{:center}(10, 16) pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) @test pbs_combined[1] ≈ pbs1[1] @test pbs_combined[2] ≈ pbs1[2] + pbs2[1] @test all(pbs_combined[3:end] .≈ pbs1[3:end] .+ pbs2[2:end] .+ pbs3) end end @testset "scaled outbands" begin # Since proportional bands are... proportional, we can be clever about the scaler argument. # For example, for a 1/3 octave band, log2(f_center2/f_center1) = (1/3), where f_center1 is a center frequency, and f_center2 is the next highest center frequency after f_center1. # So, log2(f_center2) - log2(f_center1) = 1/3 # log2(f_center2) = log2(f_center1) + 1/3 # f_center2 = 2^(log2(f_center1) + 1/3) = f_center1*2^(1/3) # So if I set the scaler argument to 2^(1/3), that should have the effect of shifting the frequency bands up one unscaled band. # And if I do that twice (i.e., squaring the scaler), that should shift the frequency bands by two. # But this doesn't work with the approximate bands, since those aren't exactly proportional bands. for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, # ApproximateThirdOctaveBands, ApproximateOctaveBands ] # cbands1 = ExactProportionalBands{3, :center}(10, 16) cbands1 = TPB{:center}(10, 16) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) # NO = octave_fraction(cbands1) scaler = cbands1[2]/cbands1[1] # cbands2 = ExactProportionalBands{3, :center}(10, 15, scaler) cbands2 = TPB{:center}(10, 15, scaler) pbs2 = ProportionalBandSpectrum(rand(length(cbands2)), cbands2) scaler = cbands1[3]/cbands1[1] # cbands3 = ExactProportionalBands{3, :center}(10, 14, scaler) cbands3 = TPB{:center}(10, 14, scaler) pbs3 = ProportionalBandSpectrum(rand(length(cbands3)), cbands3) # outcbands = ExactProportionalBands{3, :center}(10, 16) outcbands = TPB{:center}(10, 16) pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) @test pbs_combined[1] ≈ pbs1[1] @test pbs_combined[2] ≈ pbs1[2] + pbs2[1] @test all(pbs_combined[3:end] .≈ pbs1[3:end] .+ pbs2[2:end] .+ pbs3) end end @testset "non-aligned outbands, one input spectrum" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands ] # cbands1 = ExactProportionalBands{3, :center}(10, 16) cbands1 = TPB{:center}(10, 16) lbands1 = lower_bands(cbands1) ubands1 = upper_bands(cbands1) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) # outcbands = ExactProportionalBands{3, :center}(10, 16, 1.1) # Need to make sure the frequency shift (here `1.05`) is small enough to shift the frequency bands by less than 1. # `1.1` was too big for the 12th-octave bands. outcbands = TPB{:center}(10, 16, 1.05) outlbands = lower_bands(outcbands) outubands = upper_bands(outcbands) pbs_combined = combine([pbs1], outcbands) for i in 1:length(pbs_combined)-1 @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(outubands[i] - lbands1[i+1]) ) end i = length(pbs_combined) @test pbs_combined[i] ≈ pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) end end @testset "non-aligned outbands, multiple input spectrums" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) lbands1 = lower_bands(cbands1) ubands1 = upper_bands(cbands1) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) scaler = cbands1[2]/cbands1[1] cbands2 = TPB{:center}(10, 15, scaler) lbands2 = lower_bands(cbands2) ubands2 = upper_bands(cbands2) @test all(cbands2 ./ cbands1[1:end-1] .≈ scaler) pbs2 = ProportionalBandSpectrum(rand(length(cbands2)), cbands2) scaler = cbands1[3]/cbands1[1] cbands3 = TPB{:center}(10, 14, scaler) lbands3 = lower_bands(cbands3) ubands3 = upper_bands(cbands3) @test all(cbands3 ./ cbands1[1:end-2] .≈ scaler) pbs3 = ProportionalBandSpectrum(rand(length(cbands3)), cbands3) scaler = 1.05 outcbands = TPB{:center}(10, 16, scaler) outlbands = lower_bands(outcbands) outubands = upper_bands(outcbands) @test all(outcbands ./ cbands1 .≈ scaler) pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) i = 1 @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(outubands[i] - lbands1[i+1]) + pbs2[i]/(ubands2[i] - lbands2[i])*(outubands[i] - lbands2[i]) ) i = 2 @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(outubands[i] - lbands1[i+1]) + pbs2[i-1]/(ubands2[i-1] - lbands2[i-1])*(ubands2[i-1] - outlbands[i]) + pbs2[i]/(ubands2[i] - lbands2[i])*(outubands[i] - lbands2[i]) + pbs3[i-1]/(ubands3[i-1] - lbands3[i-1])*(outubands[i] - lbands3[i-1]) ) for i in 3:length(pbs_combined)-1 if TPB == ApproximateThirdOctaveBands && i == 6 @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(outubands[i] - lbands1[i+1]) + pbs2[i-1]/(ubands2[i-1] - lbands2[i-1])*(ubands2[i-1] - outlbands[i]) + pbs2[i]/(ubands2[i] - lbands2[i])*(outubands[i] - lbands2[i]) + pbs3[i-2]/(ubands3[i-2] - lbands3[i-2])*(outubands[i] - outlbands[i]) ) else @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(outubands[i] - lbands1[i+1]) + pbs2[i-1]/(ubands2[i-1] - lbands2[i-1])*(ubands2[i-1] - outlbands[i]) + pbs2[i]/(ubands2[i] - lbands2[i])*(outubands[i] - lbands2[i]) + pbs3[i-2]/(ubands3[i-2] - lbands3[i-2])*(ubands3[i-2] - outlbands[i]) + pbs3[i-1]/(ubands3[i-1] - lbands3[i-1])*(outubands[i] - lbands3[i-1]) ) end end i = length(pbs_combined) if TPB == ApproximateThirdOctaveBands @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + pbs2[i-1]/(ubands2[i-1] - lbands2[i-1])*(ubands2[i-1] - outlbands[i]) + pbs3[i-3]/(ubands3[i-3] - lbands3[i-3])*(ubands3[i-3] - outlbands[i]) + pbs3[i-2] ) else @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + pbs2[i-1]/(ubands2[i-1] - lbands2[i-1])*(ubands2[i-1] - outlbands[i]) + pbs3[i-2]/(ubands3[i-2] - lbands3[i-2])*(ubands3[i-2] - outlbands[i]) ) end end end @testset "non-aligned outbands, multiple input spectrums, all same length" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) lbands1 = lower_bands(cbands1) ubands1 = upper_bands(cbands1) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) scaler = cbands1[2]/cbands1[1] cbands2 = TPB{:center}(10, 16, scaler) lbands2 = lower_bands(cbands2) ubands2 = upper_bands(cbands2) pbs2 = ProportionalBandSpectrum(rand(length(cbands2)), cbands2) scaler = cbands1[3]/cbands1[1] cbands3 = TPB{:center}(10, 16, scaler) lbands3 = lower_bands(cbands3) ubands3 = upper_bands(cbands3) pbs3 = ProportionalBandSpectrum(rand(length(cbands3)), cbands3) outcbands = TPB{:center}(10, 16, 1.05) outlbands = lower_bands(outcbands) outubands = upper_bands(outcbands) pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) i = 1 @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(outubands[i] - lbands1[i+1]) + pbs2[i]/(ubands2[i] - lbands2[i])*(outubands[i] - lbands2[i]) ) i = 2 @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(outubands[i] - lbands1[i+1]) + pbs2[i-1]/(ubands2[i-1] - lbands2[i-1])*(ubands2[i-1] - outlbands[i]) + pbs2[i]/(ubands2[i] - lbands2[i])*(outubands[i] - lbands2[i]) + pbs3[i-1]/(ubands3[i-1] - lbands3[i-1])*(outubands[i] - lbands3[i-1]) ) for i in 3:length(pbs_combined)-1 if TPB == ApproximateThirdOctaveBands && i == 6 @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(outubands[i] - lbands1[i+1]) + pbs2[i-1]/(ubands2[i-1] - lbands2[i-1])*(ubands2[i-1] - outlbands[i]) + pbs2[i]/(ubands2[i] - lbands2[i])*(outubands[i] - lbands2[i]) + pbs3[i-2]/(ubands3[i-2] - lbands3[i-2])*(outubands[i] - outlbands[i]) ) else @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(outubands[i] - lbands1[i+1]) + pbs2[i-1]/(ubands2[i-1] - lbands2[i-1])*(ubands2[i-1] - outlbands[i]) + pbs2[i]/(ubands2[i] - lbands2[i])*(outubands[i] - lbands2[i]) + pbs3[i-2]/(ubands3[i-2] - lbands3[i-2])*(ubands3[i-2] - outlbands[i]) + pbs3[i-1]/(ubands3[i-1] - lbands3[i-1])*(outubands[i] - lbands3[i-1]) ) end end i = length(pbs_combined) if TPB == ApproximateThirdOctaveBands @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + # pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(outubands[i] - lbands1[i+1]) + pbs2[i-1]/(ubands2[i-1] - lbands2[i-1])*(ubands2[i-1] - outlbands[i]) + pbs2[i]/(ubands2[i] - lbands2[i])*(outubands[i] - lbands2[i]) + pbs3[i-3]/(ubands3[i-3] - lbands3[i-3])*(ubands3[i-3] - outlbands[i]) + pbs3[i-2] + pbs3[i-1]/(ubands3[i-1] - lbands3[i-1])*(outubands[i] - lbands3[i-1]) ) else @test pbs_combined[i] ≈ ( pbs1[i]/(ubands1[i] - lbands1[i])*(ubands1[i] - outlbands[i]) + # pbs1[i+1]/(ubands1[i+1] - lbands1[i+1])*(outubands[i] - lbands1[i+1]) + pbs2[i-1]/(ubands2[i-1] - lbands2[i-1])*(ubands2[i-1] - outlbands[i]) + pbs2[i]/(ubands2[i] - lbands2[i])*(outubands[i] - lbands2[i]) + pbs3[i-2]/(ubands3[i-2] - lbands3[i-2])*(ubands3[i-2] - outlbands[i]) + pbs3[i-1]/(ubands3[i-1] - lbands3[i-1])*(outubands[i] - lbands3[i-1]) ) end end end @testset "non-aligned wide outbands, multiple input spectrums, all same length" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) lbands1 = lower_bands(cbands1) ubands1 = upper_bands(cbands1) pbs1 = ProportionalBandSpectrum(rand(length(cbands1)), cbands1) scaler = cbands1[2]/cbands1[1] cbands2 = TPB{:center}(10, 16, scaler) lbands2 = lower_bands(cbands2) ubands2 = upper_bands(cbands2) pbs2 = ProportionalBandSpectrum(rand(length(cbands2)), cbands2) scaler = cbands1[3]/cbands1[1] cbands3 = TPB{:center}(10, 16, scaler) lbands3 = lower_bands(cbands3) ubands3 = upper_bands(cbands3) pbs3 = ProportionalBandSpectrum(rand(length(cbands3)), cbands3) outcbands = TPB{:center}(5, 30, 1.05) outlbands = lower_bands(outcbands) outubands = upper_bands(outcbands) pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) @test all(pbs_combined[1:4] .≈ 0) i = 5 j = 1 @test pbs_combined[i] ≈ ( pbs1[j]/(ubands1[j] - lbands1[j])*(outubands[i] - lbands1[j]) ) i = 6 j = 1 @test pbs_combined[i] ≈ ( pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) ) i = 7 j = 2 @test pbs_combined[i] ≈ ( pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) for i in 8:11 j += 1 if TPB == ApproximateThirdOctaveBands && j == 6 @test pbs_combined[i] ≈ ( pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(outubands[i] - outlbands[i]) ) else @test pbs_combined[i] ≈ ( pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) end end i = 12 j = 7 if TPB == ApproximateThirdOctaveBands @test pbs_combined[i] ≈ ( pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + pbs3[j-3]/(ubands3[j-3] - lbands3[j-3])*(ubands3[j-3] - outlbands[i]) + pbs3[j-2] + pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) else @test pbs_combined[i] ≈ ( pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) end i = 13 j = 8 @test pbs_combined[i] ≈ ( # pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + # pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) i = 14 j = 9 @test pbs_combined[i] ≈ ( # pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + # pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + # pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) #+ # pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) @test all(pbs_combined[15:end] .≈ 0) end end @testset "aligned inbands and outbands, multiple input spectrums, lazy PBS" begin outcbands = ExactProportionalBands{1}{:center}(5, 30) outlbands = lower_bands(outcbands) outubands = upper_bands(outcbands) cbands1 = ExactProportionalBands{1}{:center}(11, 16) lbands1 = lower_bands(cbands1) ubands1 = upper_bands(cbands1) # Find a narrowband frequency spacing that will fit in the first outband. nfreqs_nb_1band = 10 freq_min_nb_m_half_df_nb_1band = outlbands[band_start(cbands1) - band_start(outcbands) + 1] freq_max_nb_p_half_df_nb_1band = outubands[band_start(cbands1) - band_start(outcbands) + 1] df_nb = (freq_max_nb_p_half_df_nb_1band - freq_min_nb_m_half_df_nb_1band)/(nfreqs_nb_1band + 1) # Now construct a narrowband frequency that spans the bands I'm interested in. freq_min_nb_m_half_df_nb = outlbands[band_start(cbands1) - band_start(outcbands) + 1] freq_max_nb_p_half_df_nb = outubands[band_end(cbands1) - band_start(outcbands) + 1] n = Int(round((freq_max_nb_p_half_df_nb - freq_min_nb_m_half_df_nb)/df_nb)) + 1 f_lu = range(freq_min_nb_m_half_df_nb, freq_max_nb_p_half_df_nb; length=n) f_nb = 0.5.*(f_lu[2:end] .+ f_lu[1:end-1]) @test step(f_nb) ≈ df_nb @test f_nb[1] > lbands1[1] @test f_nb[end] < ubands1[end] @test (f_nb[1] - 0.5*step(f_nb)) ≈ outlbands[band_start(cbands1) - band_start(outcbands) + 1] @test (f_nb[end] + 0.5*step(f_nb)) ≈ outubands[band_end(cbands1) - band_start(outcbands) + 1] msp1 = rand(length(f_nb)) pbs1 = LazyNBProportionalBandSpectrum(f_nb[1], df_nb, msp1, cbands1) cbands2 = ExactProportionalBands{1}{:center}(12, 16) lbands2 = lower_bands(cbands2) ubands2 = upper_bands(cbands2) # Find a narrowband frequency spacing that will fit in the second outband. nfreqs_nb_1band = 10 freq_min_nb_m_half_df_nb_1band = outlbands[band_start(cbands2) - band_start(outcbands) + 1] freq_max_nb_p_half_df_nb_1band = outubands[band_start(cbands2) - band_start(outcbands) + 1] df_nb = (freq_max_nb_p_half_df_nb_1band - freq_min_nb_m_half_df_nb_1band)/(nfreqs_nb_1band + 1) # Now construct a narrowband frequency that spans the bands I'm interested in. freq_min_nb_m_half_df_nb = outlbands[band_start(cbands2) - band_start(outcbands) + 1] freq_max_nb_p_half_df_nb = outubands[band_end(cbands2) - band_start(outcbands) + 1] n = Int(round((freq_max_nb_p_half_df_nb - freq_min_nb_m_half_df_nb)/df_nb)) + 1 f_lu = range(freq_min_nb_m_half_df_nb, freq_max_nb_p_half_df_nb; length=n) f_nb = 0.5.*(f_lu[2:end] .+ f_lu[1:end-1]) @test step(f_nb) ≈ df_nb @test f_nb[1] > lbands2[1] @test f_nb[end] < ubands2[end] @test (f_nb[1] - 0.5*step(f_nb)) ≈ outlbands[band_start(cbands2) - band_start(outcbands) + 1] @test (f_nb[end] + 0.5*step(f_nb)) ≈ outubands[band_end(cbands2) - band_start(outcbands) + 1] msp2 = rand(length(f_nb)) pbs2 = LazyNBProportionalBandSpectrum(f_nb[1], df_nb, msp2, cbands2) cbands3 = ExactProportionalBands{1}{:center}(13, 16) lbands3 = lower_bands(cbands3) ubands3 = upper_bands(cbands3) # Find a narrowband frequency spacing that will fit in the second outband. nfreqs_nb_1band = 10 freq_min_nb_m_half_df_nb_1band = outlbands[band_start(cbands3) - band_start(outcbands) + 1] freq_max_nb_p_half_df_nb_1band = outubands[band_start(cbands3) - band_start(outcbands) + 1] df_nb = (freq_max_nb_p_half_df_nb_1band - freq_min_nb_m_half_df_nb_1band)/(nfreqs_nb_1band + 1) # Now construct a narrowband frequency that spans the bands I'm interested in. freq_min_nb_m_half_df_nb = outlbands[band_start(cbands3) - band_start(outcbands) + 1] freq_max_nb_p_half_df_nb = outubands[band_end(cbands3) - band_start(outcbands) + 1] n = Int(round((freq_max_nb_p_half_df_nb - freq_min_nb_m_half_df_nb)/df_nb)) + 1 f_lu = range(freq_min_nb_m_half_df_nb, freq_max_nb_p_half_df_nb; length=n) f_nb = 0.5.*(f_lu[2:end] .+ f_lu[1:end-1]) @test step(f_nb) ≈ df_nb @test f_nb[1] > lbands3[1] @test f_nb[end] < ubands3[end] @test (f_nb[1] - 0.5*step(f_nb)) ≈ outlbands[band_start(cbands3) - band_start(outcbands) + 1] @test (f_nb[end] + 0.5*step(f_nb)) ≈ outubands[band_end(cbands3) - band_start(outcbands) + 1] msp3 = rand(length(f_nb)) pbs3 = LazyNBProportionalBandSpectrum(f_nb[1], df_nb, msp3, cbands3) pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) f1_nb = frequency_nb(pbs1) df1_nb = step(f1_nb) f1_nb_l = f1_nb .- 0.5*df1_nb f1_nb_u = f1_nb .+ 0.5*df1_nb f2_nb = frequency_nb(pbs2) df2_nb = step(f2_nb) f2_nb_l = f2_nb .- 0.5*df2_nb f2_nb_u = f2_nb .+ 0.5*df2_nb f3_nb = frequency_nb(pbs3) df3_nb = step(f3_nb) f3_nb_l = f3_nb .- 0.5*df3_nb f3_nb_u = f3_nb .+ 0.5*df3_nb for i in 1:length(pbs_combined) tol = 1e-6 jstart = searchsortedfirst(f1_nb_l, outlbands[i]-tol) jend = searchsortedlast(f1_nb_u, outubands[i]+tol) pbs1_i = sum(msp1[jstart:jend]) jstart = searchsortedfirst(f2_nb_l, outlbands[i]-tol) jend = searchsortedlast(f2_nb_u, outubands[i]+tol) pbs2_i = sum(msp2[jstart:jend]) jstart = searchsortedfirst(f3_nb_l, outlbands[i]-tol) jend = searchsortedlast(f3_nb_u, outubands[i]+tol) pbs3_i = sum(msp3[jstart:jend]) @test isapprox(pbs_combined[i], pbs1_i + pbs2_i + pbs3_i; atol=1e-12) end end @testset "non-aligned inbands, aligned outbands, multiple input spectrums, lazy PBS" begin outcbands = ExactProportionalBands{1}{:center}(5, 30) outlbands = lower_bands(outcbands) outubands = upper_bands(outcbands) # Find a narrowband frequency spacing that will fit in one of the output bands. istart = 1 iend = length(outcbands) nfreqs_nb_1band = 10 freq_min_nb_m_half_df_nb_1band = outlbands[istart] freq_max_nb_p_half_df_nb_1band = outubands[istart] df_nb = (freq_max_nb_p_half_df_nb_1band - freq_min_nb_m_half_df_nb_1band)/(nfreqs_nb_1band + 1) # Now construct a narrowband frequency that spans the bands I'm interested in. freq_min_nb_m_half_df_nb = outlbands[istart] freq_max_nb_p_half_df_nb = outubands[iend] n = Int(round((freq_max_nb_p_half_df_nb - freq_min_nb_m_half_df_nb)/df_nb)) + 1 f_lu = range(freq_min_nb_m_half_df_nb, freq_max_nb_p_half_df_nb; length=n) f_nb = 0.5.*(f_lu[2:end] .+ f_lu[1:end-1]) @test step(f_nb) ≈ df_nb @test (f_nb[1] - 0.5*step(f_nb)) ≈ outlbands[istart] @test (f_nb[end] + 0.5*step(f_nb)) ≈ outubands[iend] msp1 = rand(length(f_nb)) pbs1 = LazyNBProportionalBandSpectrum(ExactProportionalBands{1}, f_nb[1], df_nb, msp1) # Find a narrowband frequency spacing that will fit in one of the output bands. istart = 2 iend = length(outcbands) - 1 nfreqs_nb_1band = 10 freq_min_nb_m_half_df_nb_1band = outlbands[istart] freq_max_nb_p_half_df_nb_1band = outubands[istart] df_nb = (freq_max_nb_p_half_df_nb_1band - freq_min_nb_m_half_df_nb_1band)/(nfreqs_nb_1band + 1) # Now construct a narrowband frequency that spans the bands I'm interested in. freq_min_nb_m_half_df_nb = outlbands[istart] freq_max_nb_p_half_df_nb = outubands[iend] n = Int(round((freq_max_nb_p_half_df_nb - freq_min_nb_m_half_df_nb)/df_nb)) + 1 f_lu = range(freq_min_nb_m_half_df_nb, freq_max_nb_p_half_df_nb; length=n) f_nb = 0.5.*(f_lu[2:end] .+ f_lu[1:end-1]) @test step(f_nb) ≈ df_nb @test (f_nb[1] - 0.5*step(f_nb)) ≈ outlbands[istart] @test (f_nb[end] + 0.5*step(f_nb)) ≈ outubands[iend] msp2 = rand(length(f_nb)) pbs2 = LazyNBProportionalBandSpectrum(ExactProportionalBands{3}, f_nb[1], df_nb, msp2) # Find a narrowband frequency spacing that will fit in one of the output bands. istart = 3 iend = length(outcbands) - 2 nfreqs_nb_1band = 10 freq_min_nb_m_half_df_nb_1band = outlbands[istart] freq_max_nb_p_half_df_nb_1band = outubands[istart] df_nb = (freq_max_nb_p_half_df_nb_1band - freq_min_nb_m_half_df_nb_1band)/(nfreqs_nb_1band + 1) # Now construct a narrowband frequency that spans the bands I'm interested in. freq_min_nb_m_half_df_nb = outlbands[istart] freq_max_nb_p_half_df_nb = outubands[iend] n = Int(round((freq_max_nb_p_half_df_nb - freq_min_nb_m_half_df_nb)/df_nb)) + 1 f_lu = range(freq_min_nb_m_half_df_nb, freq_max_nb_p_half_df_nb; length=n) f_nb = 0.5.*(f_lu[2:end] .+ f_lu[1:end-1]) @test step(f_nb) ≈ df_nb @test (f_nb[1] - 0.5*step(f_nb)) ≈ outlbands[istart] @test (f_nb[end] + 0.5*step(f_nb)) ≈ outubands[iend] msp3 = rand(length(f_nb)) pbs3 = LazyNBProportionalBandSpectrum(ExactProportionalBands{12}, f_nb[1], df_nb, msp3) pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) f1_nb = frequency_nb(pbs1) df1_nb = step(f1_nb) f1_nb_l = f1_nb .- 0.5*df1_nb f1_nb_u = f1_nb .+ 0.5*df1_nb f2_nb = frequency_nb(pbs2) df2_nb = step(f2_nb) f2_nb_l = f2_nb .- 0.5*df2_nb f2_nb_u = f2_nb .+ 0.5*df2_nb f3_nb = frequency_nb(pbs3) df3_nb = step(f3_nb) f3_nb_l = f3_nb .- 0.5*df3_nb f3_nb_u = f3_nb .+ 0.5*df3_nb for i in 1:length(pbs_combined) tol = 1e-6 jstart = searchsortedfirst(f1_nb_l, outlbands[i]-tol) jend = searchsortedlast(f1_nb_u, outubands[i]+tol) pbs1_i = sum(msp1[jstart:jend]) jstart = searchsortedfirst(f2_nb_l, outlbands[i]-tol) jend = searchsortedlast(f2_nb_u, outubands[i]+tol) pbs2_i = sum(msp2[jstart:jend]) jstart = searchsortedfirst(f3_nb_l, outlbands[i]-tol) jend = searchsortedlast(f3_nb_u, outubands[i]+tol) pbs3_i = sum(msp3[jstart:jend]) @test isapprox(pbs_combined[i], pbs1_i + pbs2_i + pbs3_i; rtol=1e-12) end end end @testset "proportional bands with time" begin @testset "no time" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) lbands1 = lower_bands(cbands1) ubands1 = upper_bands(cbands1) # Create some random msp corresponding to the proportional bands defined by lbands1, cbands1, ubands1. nfreq_nb = 800 freq_min_nb = lbands1[1] + 0.1*(ubands1[1] - lbands1[1]) freq_max_nb = ubands1[end] - 0.1*(ubands1[end] - lbands1[end]) df_nb = (freq_max_nb - freq_min_nb)/(nfreq_nb - 1) @test (freq_min_nb - 0.5*df_nb) > lbands1[1] @test (freq_max_nb + 0.5*df_nb) < ubands1[end] f_nb = freq_min_nb .+ (0:(nfreq_nb-1)).*df_nb msp1 = rand(length(f_nb)) pbs1 = LazyNBProportionalBandSpectrum(TPB, f_nb[1], df_nb, msp1) scaler = cbands1[2]/cbands1[1] cbands2 = TPB{:center}(10, 16, scaler) lbands2 = lower_bands(cbands2) ubands2 = upper_bands(cbands2) # Create some random msp corresponding to the proportional bands defined by lbands2, cbands2, ubands2. nfreq_nb = 800 freq_min_nb = lbands2[1] + 0.1*(ubands2[1] - lbands2[1]) freq_max_nb = ubands2[end] - 0.1*(ubands2[end] - lbands2[end]) df_nb = (freq_max_nb - freq_min_nb)/(nfreq_nb - 1) @test (freq_min_nb - 0.5*df_nb) > lbands2[1] @test (freq_max_nb + 0.5*df_nb) < ubands2[end] f_nb = freq_min_nb .+ (0:(nfreq_nb-1)).*df_nb msp2 = rand(length(f_nb)) pbs2 = LazyNBProportionalBandSpectrum(TPB, f_nb[1], df_nb, msp2, freq_scaler(cbands2)) scaler = cbands1[3]/cbands1[1] cbands3 = TPB{:center}(10, 16, scaler) lbands3 = lower_bands(cbands3) ubands3 = upper_bands(cbands3) # Create some random msp corresponding to the proportional bands defined by lbands3, cbands3, ubands3. nfreq_nb = 800 freq_min_nb = lbands3[1] + 0.1*(ubands3[1] - lbands3[1]) freq_max_nb = ubands3[end] - 0.1*(ubands3[end] - lbands3[end]) df_nb = (freq_max_nb - freq_min_nb)/(nfreq_nb - 1) @test (freq_min_nb - 0.5*df_nb) > lbands3[1] @test (freq_max_nb + 0.5*df_nb) < ubands3[end] f_nb = freq_min_nb .+ (0:(nfreq_nb-1)).*df_nb msp3 = rand(length(f_nb)) pbs3 = LazyNBProportionalBandSpectrum(TPB, f_nb[1], df_nb, msp3, freq_scaler(cbands3)) T = time_period([pbs1, pbs2, pbs3]) @test T ≈ -Inf @test time_scaler(pbs1, T) ≈ 1 @test time_scaler(pbs2, T) ≈ 1 @test time_scaler(pbs3, T) ≈ 1 end end @testset "with time" begin @testset "all same time step" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) lbands1 = lower_bands(cbands1) ubands1 = upper_bands(cbands1) t1 = 2.0 dt1 = 0.2 pbs1 = ProportionalBandSpectrumWithTime(rand(length(cbands1)), cbands1, dt1, t1) @test has_observer_time(pbs1) == true @test observer_time(pbs1) ≈ t1 @test timestep(pbs1) ≈ dt1 scaler = cbands1[2]/cbands1[1] cbands2 = TPB{:center}(10, 16, scaler) lbands2 = lower_bands(cbands2) ubands2 = upper_bands(cbands2) t2 = 2.1 dt2 = 0.2 pbs2 = ProportionalBandSpectrumWithTime(rand(length(cbands2)), cbands2, dt2, t2) @test has_observer_time(pbs2) == true @test observer_time(pbs2) ≈ t2 @test timestep(pbs2) ≈ dt2 scaler = cbands1[3]/cbands1[1] cbands3 = TPB{:center}(10, 16, scaler) lbands3 = lower_bands(cbands3) ubands3 = upper_bands(cbands3) t3 = 2.3 dt3 = 0.2 pbs3 = ProportionalBandSpectrumWithTime(rand(length(cbands3)), cbands3, dt3, t3) @test has_observer_time(pbs3) == true @test observer_time(pbs3) ≈ t3 @test timestep(pbs3) ≈ dt3 T = time_period([pbs1, pbs2, pbs3]) @test T ≈ t3 - t1 tscaler1 = dt1/T tscaler2 = dt2/T tscaler3 = dt3/T @test time_scaler(pbs1, T) ≈ tscaler1 @test time_scaler(pbs2, T) ≈ tscaler2 @test time_scaler(pbs3, T) ≈ tscaler3 outcbands = TPB{:center}(5, 30, 1.05) outlbands = lower_bands(outcbands) outubands = upper_bands(outcbands) pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) @test all(pbs_combined[1:4] .≈ 0) i = 5 j = 1 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(outubands[i] - lbands1[j]) ) i = 6 j = 1 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) ) i = 7 j = 2 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) for i in 8:11 j += 1 if TPB == ApproximateThirdOctaveBands && j == 6 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(outubands[i] - outlbands[i]) ) else @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) end end i = 12 j = 7 if TPB == ApproximateThirdOctaveBands @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-3]/(ubands3[j-3] - lbands3[j-3])*(ubands3[j-3] - outlbands[i]) + tscaler3*pbs3[j-2] + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) else @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) end i = 13 j = 8 @test pbs_combined[i] ≈ ( # tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + # tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) i = 14 j = 9 @test pbs_combined[i] ≈ ( # tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + # tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + # tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) #+ # tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) @test all(pbs_combined[15:end] .≈ 0) end end @testset "different time steps" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) lbands1 = lower_bands(cbands1) ubands1 = upper_bands(cbands1) t1 = 2.0 dt1 = 0.2 pbs1 = ProportionalBandSpectrumWithTime(rand(length(cbands1)), cbands1, dt1, t1) @test has_observer_time(pbs1) == true @test observer_time(pbs1) ≈ t1 scaler = cbands1[2]/cbands1[1] cbands2 = TPB{:center}(10, 16, scaler) lbands2 = lower_bands(cbands2) ubands2 = upper_bands(cbands2) t2 = 2.1 dt2 = 0.3 pbs2 = ProportionalBandSpectrumWithTime(rand(length(cbands2)), cbands2, dt2, t2) @test has_observer_time(pbs2) == true @test observer_time(pbs2) ≈ t2 scaler = cbands1[3]/cbands1[1] cbands3 = TPB{:center}(10, 16, scaler) lbands3 = lower_bands(cbands3) ubands3 = upper_bands(cbands3) t3 = 2.3 dt3 = 0.4 pbs3 = ProportionalBandSpectrumWithTime(rand(length(cbands3)), cbands3, dt3, t3) @test has_observer_time(pbs3) == true @test observer_time(pbs3) ≈ t3 T = time_period([pbs1, pbs2, pbs3]) @test T ≈ t3 - t1 tscaler1 = dt1/T tscaler2 = dt2/T tscaler3 = dt3/T @test time_scaler(pbs1, T) ≈ tscaler1 @test time_scaler(pbs2, T) ≈ tscaler2 @test time_scaler(pbs3, T) ≈ tscaler3 outcbands = TPB{:center}(5, 30, 1.05) outlbands = lower_bands(outcbands) outubands = upper_bands(outcbands) pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) @test all(pbs_combined[1:4] .≈ 0) i = 5 j = 1 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(outubands[i] - lbands1[j]) ) i = 6 j = 1 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) ) i = 7 j = 2 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) for i in 8:11 j += 1 if TPB == ApproximateThirdOctaveBands && j == 6 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(outubands[i] - outlbands[i]) ) else @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) end end i = 12 j = 7 if TPB == ApproximateThirdOctaveBands @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-3]/(ubands3[j-3] - lbands3[j-3])*(ubands3[j-3] - outlbands[i]) + tscaler3*pbs3[j-2] + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) else @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) end i = 13 j = 8 @test pbs_combined[i] ≈ ( # tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + # tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) i = 14 j = 9 @test pbs_combined[i] ≈ ( # tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + # tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + # tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) #+ # tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) @test all(pbs_combined[15:end] .≈ 0) end end @testset "mix of with and without time" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) lbands1 = lower_bands(cbands1) ubands1 = upper_bands(cbands1) t1 = 2.0 dt1 = 0.2 pbs1 = ProportionalBandSpectrumWithTime(rand(length(cbands1)), cbands1, dt1, t1) @test has_observer_time(pbs1) == true @test observer_time(pbs1) ≈ t1 scaler = cbands1[2]/cbands1[1] cbands2 = TPB{:center}(10, 16, scaler) lbands2 = lower_bands(cbands2) ubands2 = upper_bands(cbands2) # t2 = 2.1 # dt2 = 0.3 pbs2 = ProportionalBandSpectrum(rand(length(cbands2)), cbands2) @test has_observer_time(pbs2) == false @test observer_time(pbs2) ≈ 0 scaler = cbands1[3]/cbands1[1] cbands3 = TPB{:center}(10, 16, scaler) lbands3 = lower_bands(cbands3) ubands3 = upper_bands(cbands3) t3 = 2.3 dt3 = 0.4 pbs3 = ProportionalBandSpectrumWithTime(rand(length(cbands3)), cbands3, dt3, t3) @test has_observer_time(pbs3) == true @test observer_time(pbs3) ≈ t3 T = time_period([pbs1, pbs2, pbs3]) @test T ≈ t3 - t1 tscaler1 = dt1/T tscaler2 = 1.0 tscaler3 = dt3/T @test time_scaler(pbs1, T) ≈ tscaler1 @test time_scaler(pbs2, T) ≈ tscaler2 @test time_scaler(pbs3, T) ≈ tscaler3 outcbands = TPB{:center}(5, 30, 1.05) outlbands = lower_bands(outcbands) outubands = upper_bands(outcbands) pbs_combined = combine([pbs1, pbs2, pbs3], outcbands) @test all(pbs_combined[1:4] .≈ 0) i = 5 j = 1 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(outubands[i] - lbands1[j]) ) i = 6 j = 1 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) ) i = 7 j = 2 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) for i in 8:11 j += 1 if TPB == ApproximateThirdOctaveBands && j == 6 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(outubands[i] - outlbands[i]) ) else @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) end end i = 12 j = 7 if TPB == ApproximateThirdOctaveBands @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-3]/(ubands3[j-3] - lbands3[j-3])*(ubands3[j-3] - outlbands[i]) + tscaler3*pbs3[j-2] + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) else @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) end i = 13 j = 8 @test pbs_combined[i] ≈ ( # tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + # tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) i = 14 j = 9 @test pbs_combined[i] ≈ ( # tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + # tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + # tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) #+ # tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) ) @test all(pbs_combined[15:end] .≈ 0) end end @testset "different time steps, two dimensional arrays" begin for TPB in [ExactProportionalBands{3}, ExactProportionalBands{1}, ExactProportionalBands{12}, ApproximateThirdOctaveBands, ApproximateOctaveBands] cbands1 = TPB{:center}(10, 16) lbands1 = lower_bands(cbands1) ubands1 = upper_bands(cbands1) t1 = 2.0 dt1 = 0.2 pbs1 = ProportionalBandSpectrumWithTime(rand(length(cbands1)), cbands1, dt1, t1) @test has_observer_time(pbs1) == true @test observer_time(pbs1) ≈ t1 scaler = cbands1[2]/cbands1[1] cbands2 = TPB{:center}(10, 16, scaler) lbands2 = lower_bands(cbands2) ubands2 = upper_bands(cbands2) t2 = 2.1 dt2 = 0.3 pbs2 = ProportionalBandSpectrumWithTime(rand(length(cbands2)), cbands2, dt2, t2) @test has_observer_time(pbs2) == true @test observer_time(pbs2) ≈ t2 scaler = cbands1[3]/cbands1[1] cbands3 = TPB{:center}(10, 16, scaler) lbands3 = lower_bands(cbands3) ubands3 = upper_bands(cbands3) t3 = 2.3 dt3 = 0.4 pbs3 = ProportionalBandSpectrumWithTime(rand(length(cbands3)), cbands3, dt3, t3) @test has_observer_time(pbs3) == true @test observer_time(pbs3) ≈ t3 cbands4 = TPB{:center}(10, 16) lbands4 = lower_bands(cbands1) ubands4 = upper_bands(cbands1) t4 = 2.0 dt4 = 0.2 pbs4 = ProportionalBandSpectrumWithTime(rand(length(cbands4)), cbands4, dt4, t4) @test has_observer_time(pbs4) == true @test observer_time(pbs4) ≈ t4 scaler = cbands4[2]/cbands4[1] cbands5 = TPB{:center}(10, 16, scaler) lbands5 = lower_bands(cbands5) ubands5 = upper_bands(cbands5) t5 = 5.3 dt5 = 0.6 pbs5 = ProportionalBandSpectrumWithTime(rand(length(cbands5)), cbands5, dt5, t5) @test has_observer_time(pbs5) == true @test observer_time(pbs5) ≈ t5 scaler = cbands4[3]/cbands4[1] cbands6 = TPB{:center}(10, 16, scaler) lbands6 = lower_bands(cbands6) ubands6 = upper_bands(cbands6) t6 = 2.1 dt6 = 0.7 pbs6 = ProportionalBandSpectrumWithTime(rand(length(cbands6)), cbands6, dt6, t6) @test has_observer_time(pbs6) == true @test observer_time(pbs6) ≈ t6 pbss = hcat([pbs1, pbs2, pbs3], [pbs4, pbs5, pbs6]) @test size(pbss) == (3, 2) # time period for column 1. Tc1 = time_period(pbss[:, 1]) @test Tc1 ≈ t3 - t1 # time period for column 2. Tc2 = time_period(pbss[:, 2]) @test Tc2 ≈ t5 - t4 tscaler1 = dt1/Tc1 tscaler2 = dt2/Tc1 tscaler3 = dt3/Tc1 tscaler4 = dt4/Tc2 tscaler5 = dt5/Tc2 tscaler6 = dt6/Tc2 @test time_scaler(pbs1, Tc1) ≈ tscaler1 @test time_scaler(pbs2, Tc1) ≈ tscaler2 @test time_scaler(pbs3, Tc1) ≈ tscaler3 @test time_scaler(pbs4, Tc2) ≈ tscaler4 @test time_scaler(pbs5, Tc2) ≈ tscaler5 @test time_scaler(pbs6, Tc2) ≈ tscaler6 outcbands = TPB{:center}(5, 30, 1.05) outlbands = lower_bands(outcbands) outubands = upper_bands(outcbands) pbs_combined = combine(pbss, outcbands) @test all(pbs_combined[1:4] .≈ 0) i = 5 j = 1 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(outubands[i] - lbands1[j]) + tscaler4*pbs4[j]/(ubands4[j] - lbands4[j])*(outubands[i] - lbands4[j]) ) i = 6 j = 1 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler4*pbs4[j]/(ubands4[j] - lbands4[j])*(ubands4[j] - outlbands[i]) + tscaler4*pbs4[j+1]/(ubands4[j+1] - lbands4[j+1])*(outubands[i] - lbands4[j+1]) + tscaler5*pbs5[j]/(ubands5[j] - lbands5[j])*(outubands[i] - lbands5[j]) ) i = 7 j = 2 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) + tscaler4*pbs4[j]/(ubands4[j] - lbands4[j])*(ubands4[j] - outlbands[i]) + tscaler4*pbs4[j+1]/(ubands4[j+1] - lbands4[j+1])*(outubands[i] - lbands4[j+1]) + tscaler5*pbs5[j-1]/(ubands5[j-1] - lbands5[j-1])*(ubands5[j-1] - outlbands[i]) + tscaler5*pbs5[j]/(ubands5[j] - lbands5[j])*(outubands[i] - lbands5[j]) + tscaler6*pbs6[j-1]/(ubands6[j-1] - lbands6[j-1])*(outubands[i] - lbands6[j-1]) ) for i in 8:11 j += 1 if TPB == ApproximateThirdOctaveBands && j == 6 @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(outubands[i] - outlbands[i]) + tscaler4*pbs4[j]/(ubands4[j] - lbands4[j])*(ubands4[j] - outlbands[i]) + tscaler4*pbs4[j+1]/(ubands4[j+1] - lbands4[j+1])*(outubands[i] - lbands4[j+1]) + tscaler5*pbs5[j-1]/(ubands5[j-1] - lbands5[j-1])*(ubands5[j-1] - outlbands[i]) + tscaler5*pbs5[j]/(ubands5[j] - lbands5[j])*(outubands[i] - lbands5[j]) + tscaler6*pbs6[j-2]/(ubands6[j-2] - lbands6[j-2])*(outubands[i] - outlbands[i]) ) else @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) + tscaler4*pbs4[j]/(ubands4[j] - lbands4[j])*(ubands4[j] - outlbands[i]) + tscaler4*pbs4[j+1]/(ubands4[j+1] - lbands4[j+1])*(outubands[i] - lbands4[j+1]) + tscaler5*pbs5[j-1]/(ubands5[j-1] - lbands5[j-1])*(ubands5[j-1] - outlbands[i]) + tscaler5*pbs5[j]/(ubands5[j] - lbands5[j])*(outubands[i] - lbands5[j]) + tscaler6*pbs6[j-2]/(ubands6[j-2] - lbands6[j-2])*(ubands6[j-2] - outlbands[i]) + tscaler6*pbs6[j-1]/(ubands6[j-1] - lbands6[j-1])*(outubands[i] - lbands6[j-1]) ) end end i = 12 j = 7 if TPB == ApproximateThirdOctaveBands @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-3]/(ubands3[j-3] - lbands3[j-3])*(ubands3[j-3] - outlbands[i]) + tscaler3*pbs3[j-2] + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) + tscaler4*pbs4[j]/(ubands4[j] - lbands4[j])*(ubands4[j] - outlbands[i]) + # tscaler4*pbs4[j+1]/(ubands4[j+1] - lbands4[j+1])*(outubands[i] - lbands4[j+1]) + tscaler5*pbs5[j-1]/(ubands5[j-1] - lbands5[j-1])*(ubands5[j-1] - outlbands[i]) + tscaler5*pbs5[j]/(ubands5[j] - lbands5[j])*(outubands[i] - lbands5[j]) + tscaler6*pbs6[j-3]/(ubands6[j-3] - lbands6[j-3])*(ubands6[j-3] - outlbands[i]) + tscaler6*pbs6[j-2] + tscaler6*pbs6[j-1]/(ubands6[j-1] - lbands6[j-1])*(outubands[i] - lbands6[j-1]) ) else @test pbs_combined[i] ≈ ( tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) + tscaler4*pbs4[j]/(ubands4[j] - lbands4[j])*(ubands4[j] - outlbands[i]) + # tscaler4*pbs4[j+1]/(ubands4[j+1] - lbands4[j+1])*(outubands[i] - lbands4[j+1]) + tscaler5*pbs5[j-1]/(ubands5[j-1] - lbands5[j-1])*(ubands5[j-1] - outlbands[i]) + tscaler5*pbs5[j]/(ubands5[j] - lbands5[j])*(outubands[i] - lbands5[j]) + tscaler6*pbs6[j-2]/(ubands6[j-2] - lbands6[j-2])*(ubands6[j-2] - outlbands[i]) + tscaler6*pbs6[j-1]/(ubands6[j-1] - lbands6[j-1])*(outubands[i] - lbands6[j-1]) ) end i = 13 j = 8 @test pbs_combined[i] ≈ ( # tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + # tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) + # tscaler4*pbs4[j]/(ubands4[j] - lbands4[j])*(ubands4[j] - outlbands[i]) + # tscaler4*pbs4[j+1]/(ubands4[j+1] - lbands4[j+1])*(outubands[i] - lbands4[j+1]) + tscaler5*pbs5[j-1]/(ubands5[j-1] - lbands5[j-1])*(ubands5[j-1] - outlbands[i]) + # tscaler5*pbs5[j]/(ubands5[j] - lbands5[j])*(outubands[i] - lbands5[j]) + tscaler6*pbs6[j-2]/(ubands6[j-2] - lbands6[j-2])*(ubands6[j-2] - outlbands[i]) + tscaler6*pbs6[j-1]/(ubands6[j-1] - lbands6[j-1])*(outubands[i] - lbands6[j-1]) ) i = 14 j = 9 @test pbs_combined[i] ≈ ( # tscaler1*pbs1[j]/(ubands1[j] - lbands1[j])*(ubands1[j] - outlbands[i]) + # tscaler1*pbs1[j+1]/(ubands1[j+1] - lbands1[j+1])*(outubands[i] - lbands1[j+1]) + # tscaler2*pbs2[j-1]/(ubands2[j-1] - lbands2[j-1])*(ubands2[j-1] - outlbands[i]) + # tscaler2*pbs2[j]/(ubands2[j] - lbands2[j])*(outubands[i] - lbands2[j]) + tscaler3*pbs3[j-2]/(ubands3[j-2] - lbands3[j-2])*(ubands3[j-2] - outlbands[i]) + # tscaler3*pbs3[j-1]/(ubands3[j-1] - lbands3[j-1])*(outubands[i] - lbands3[j-1]) # tscaler4*pbs4[j]/(ubands4[j] - lbands4[j])*(ubands4[j] - outlbands[i]) + # tscaler4*pbs4[j+1]/(ubands4[j+1] - lbands4[j+1])*(outubands[i] - lbands4[j+1]) + # tscaler5*pbs5[j-1]/(ubands5[j-1] - lbands5[j-1])*(ubands5[j-1] - outlbands[i]) + # tscaler5*pbs5[j]/(ubands5[j] - lbands5[j])*(outubands[i] - lbands5[j]) + tscaler6*pbs6[j-2]/(ubands6[j-2] - lbands6[j-2])*(ubands6[j-2] - outlbands[i]) #+ # tscaler6*pbs6[j-1]/(ubands6[j-1] - lbands6[j-1])*(outubands[i] - lbands6[j-1]) ) @test all(pbs_combined[15:end] .≈ 0) # Transpose to switch the time axis and compare to the original. time_axis = 2 pbss_t = permutedims(pbss, (2, 1)) pbs_combined_t = combine(pbss_t, outcbands, time_axis) @test all(pbs_combined_t .≈ pbs_combined) end end end end end @testset "OASPL" begin @testset "Parseval's theorem" begin fr(t) = 2*cos(1*2*pi*t) + 4*cos(2*2*pi*t) + 6*cos(3*2*pi*t) + 8*cos(4*2*pi*t) fi(t) = 2*sin(1*2*pi*t) + 4*sin(2*2*pi*t) + 6*sin(3*2*pi*t) + 8*sin(4*2*pi*t) f(t) = fr(t) + fi(t) for T in [1.0, 2.0] for n in [19, 20] dt = T/n t = (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt) amp = MSPSpectrumAmplitude(ap) oaspl_time_domain = OASPL(ap) oaspl_freq_domain = OASPL(amp) @test oaspl_freq_domain ≈ oaspl_time_domain end end end @testset "function with know mean squared pressure" begin f(t) = 4*cos(2*2*pi*t) # What's the mean-square of that function? I think the mean-square of # # f(t) = a*cos(2*pi*k*t) # # is a^2/2. So for T in [1.0, 2.0] for n in [19, 20] dt = T/n t = (0:n-1).*dt p = f.(t) msp_expected = 4^2/2 oaspl_expected = 10*log10(msp_expected/p_ref^2) ap = PressureTimeHistory(p, dt) amp = MSPSpectrumAmplitude(ap) oaspl_time_domain = OASPL(ap) oaspl_freq_domain = OASPL(amp) @test oaspl_time_domain ≈ oaspl_expected @test oaspl_freq_domain ≈ oaspl_expected end end end @testset "ANOPP2 comparison" begin fr(t) = 2*cos(1*2*pi*t) + 4*cos(2*2*pi*t) + 6*cos(3*2*pi*t) + 8*cos(4*2*pi*t) fi(t) = 2*sin(1*2*pi*t) + 4*sin(2*2*pi*t) + 6*sin(3*2*pi*t) + 8*sin(4*2*pi*t) f(t) = fr(t) + fi(t) oaspl_a2 = 114.77121254719663 for T in [1, 2] for n in [19, 20] dt = T/n t = (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt) oaspl = OASPL(ap) @test isapprox(oaspl, oaspl_a2, atol=1e-12) end end end end @testset "A-weighting" begin @testset "ANOPP2 comparison" begin fr(t) = 2*cos(1e3*2*pi*t) + 4*cos(2e3*2*pi*t) + 6*cos(3e3*2*pi*t) + 8*cos(4e3*2*pi*t) fi(t) = 2*sin(1e3*2*pi*t) + 4*sin(2e3*2*pi*t) + 6*sin(3e3*2*pi*t) + 8*sin(4e3*2*pi*t) f(t) = fr(t) + fi(t) nbs_A_a2 = Dict( (1, 19)=>[0.0, 4.000002539852234, 21.098932320239594, 47.765983983028875, 79.89329612328712, 6.904751939255882e-29, 3.438658433244509e-29, 3.385314868430938e-29, 4.3828241499153937e-29, 3.334042101984942e-29], (1, 20)=>[0.0, 4.000002539852235, 21.09893232023959, 47.76598398302881, 79.89329612328707, 2.4807405180395723e-29, 3.319538256490389e-29, 1.1860147288201262e-29, 1.5894684286161776e-29, 9.168407004474984e-30, 1.4222371367588704e-31], (2, 19)=>[0.0, 4.137956256384954e-30, 4.00000253985224, 2.1118658029791977e-29, 21.098932320239633, 3.4572972532471526e-29, 47.765983983028924, 1.2630134771692395e-28, 79.89329612328716, 8.284388048614786e-29], (2, 20)=>[0.0, 1.2697180778261437e-30, 4.000002539852251, 4.666290179209354e-29, 21.098932320239584, 3.4300386105764425e-29, 47.76598398302884, 6.100255343320017e-29, 79.89329612328727, 1.801023480958872e-28, 6.029776808298499e-29], ) for T_ms in [1, 2] for n in [19, 20] dt = T_ms*1e-3/n t = (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt) nbs = MSPSpectrumAmplitude(ap) # nbs_A = W_A(nbs) # amp_A = W_A(nbs) amp_A = W_A.(frequency(nbs)).*nbs # ANOPP2.a2_aa_weight(ANOPP2.a2_aa_a_weight, ANOPP2.a2_aa_nbs_enum, ANOPP2.a2_aa_msp, freq, nbs_A_a2) # Wish I could get this to match more closely. But the weighting # function looks pretty nasty numerically (frequencies raised to the # 4th power, and one of the coefficients is about 2.24e16). # @show T_ms n amp_A nbs_A_a2[(T_ms, n)] @test all(isapprox.(amp_A, nbs_A_a2[(T_ms, n)], atol=1e-6)) end end end @testset "1kHz check" begin # A 1kHz signal should be unaffected by A-weighting. fr(t) = 2*cos(1e3*2*pi*t) fi(t) = 2*sin(1e3*2*pi*t) f(t) = fr(t) + fi(t) for T_ms in [1, 2] for n in [19, 20] dt = T_ms*1e-3/n t = (0:n-1).*dt p = f.(t) ap = PressureTimeHistory(p, dt) nbs = MSPSpectrumAmplitude(ap) # amp = amplitude(nbs) # nbs_A = W_A(nbs) # amp_A = W_A(nbs) amp_A = W_A.(frequency(nbs)).*nbs # This is lame. Should be able to get this to match better, # right? @test all(isapprox.(amp_A, nbs, atol=1e-5)) end end end end
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
6152
module GetTestData using ANOPP2 using JLD2 include("test_functions.jl") function gen_nbs() a2_nbs_freq = Dict{Tuple{Int, Int}, Vector{Float64}}() a2_nbs_amp = Dict{Tuple{Int, Int}, Vector{Float64}}() a2_nbs_phase = Dict{Tuple{Int, Int}, Vector{Float64}}() for T in [1, 2] for n in [19, 20] dt = T/n t = (0:n-1).*dt p = apth_for_nbs.(t) t_a2 = range(0, T, length=n) |> collect # This needs to be an array, since we'll eventually be passing it to C/Fortran via ccall. if mod(n, 2) == 0 p_a2 = p else p_a2 = apth_for_nbs.(t_a2) end freq_a2, nbs_msp_a2, nbs_phase_a2 = ANOPP2.a2jl_aa_nbs(ANOPP2.a2_aa_pa, ANOPP2.a2_aa_pa, t_a2, p_a2) a2_nbs_freq[T, n] = freq_a2 a2_nbs_amp[T, n] = nbs_msp_a2 a2_nbs_phase[T, n] = nbs_phase_a2 end end return Dict("a2_nbs_freq"=>a2_nbs_freq, "a2_nbs_amp"=>a2_nbs_amp, "a2_nbs_phase"=>a2_nbs_phase) end function gen_psd() a2_psd_freq = Dict{Tuple{Int, Int}, Vector{Float64}}() a2_psd_amp = Dict{Tuple{Int, Int}, Vector{Float64}}() a2_psd_phase = Dict{Tuple{Int, Int}, Vector{Float64}}() for T in [1, 2] for n in [19, 20] dt = T/n t = (0:n-1).*dt p = apth_for_nbs.(t) t_a2 = range(0, T, length=n) |> collect # This needs to be an array, since we'll eventually be passing it to C/Fortran via ccall. if mod(n, 2) == 0 p_a2 = p else p_a2 = apth_for_nbs.(t_a2) end freq_a2, psd_msp_a2, psd_phase_a2 = ANOPP2.a2jl_aa_psd(ANOPP2.a2_aa_pa, ANOPP2.a2_aa_pa, t_a2, p_a2) a2_psd_freq[T, n] = freq_a2 a2_psd_amp[T, n] = psd_msp_a2 a2_psd_phase[T, n] = psd_phase_a2 end end return Dict("a2_psd_freq"=>a2_psd_freq, "a2_psd_amp"=>a2_psd_amp, "a2_psd_phase"=>a2_psd_phase) end function gen_pbs() # Need a PSD to pass to the routine. freq0 = 1000.0 T = 20/freq0 t0 = 0.13 n = 128 dt = T/n t = (0:n-1).*dt t_a2 = range(0, T, length=n) |> collect # This needs to be an array, since we'll eventually be passing it to C/Fortran via ccall. if mod(n, 2) == 0 p_a2 = apth_for_pbs.(freq0, t) else p_a2 = apth_for_pbs.(freq0, t_a2) end freq_a2, psd_msp_a2, psd_phase_a2 = ANOPP2.a2jl_aa_psd(ANOPP2.a2_aa_pa, ANOPP2.a2_aa_pa, t_a2, p_a2) pbs_freq, pbs = ANOPP2.a2jl_aa_pbs(ANOPP2.a2_aa_psd, ANOPP2.a2_aa_msp, freq_a2, psd_msp_a2, 3.0, ANOPP2.a2_aa_exact) return Dict("a2_pbs_freq"=>pbs_freq, "a2_pbs"=>pbs) end function gen_pbs() # Need a PSD to pass to the routine. freq0 = 1000.0 T = 20/freq0 t0 = 0.13 n = 128 dt = T/n t = (0:n-1).*dt t_a2 = range(0, T, length=n) |> collect # This needs to be an array, since we'll eventually be passing it to C/Fortran via ccall. if mod(n, 2) == 0 p_a2 = apth_for_pbs.(freq0, t) else p_a2 = apth_for_pbs.(freq0, t_a2) end freq_a2, psd_msp_a2, psd_phase_a2 = ANOPP2.a2jl_aa_psd(ANOPP2.a2_aa_pa, ANOPP2.a2_aa_pa, t_a2, p_a2) @show freq_a2 @show psd_msp_a2 pbs_freq, pbs = ANOPP2.a2jl_aa_pbs(ANOPP2.a2_aa_psd, ANOPP2.a2_aa_msp, freq_a2, psd_msp_a2, 3.0, ANOPP2.a2_aa_exact) return Dict("a2_pbs_freq"=>pbs_freq, "a2_pbs"=>pbs) end function gen_pbs2() n_freq = 2232 psd_freq = 45.0 .+ 5 .* (0:n_freq-1) df = psd_freq[2] - psd_freq[1] msp_amp = 20 .+ 10 .* (1:n_freq)./n_freq # psd_amp = msp_amp ./ df freq_a2 = psd_freq |> collect # psd_msp_a2 = psd_amp |> collect msp_amp_a2 = msp_amp |> collect pbs_freq, pbs = ANOPP2.a2jl_aa_pbs(ANOPP2.a2_aa_nbs, ANOPP2.a2_aa_msp, freq_a2, msp_amp_a2, 3.0, ANOPP2.a2_aa_exact) return Dict("a2_pbs_freq"=>pbs_freq, "a2_pbs"=>pbs) end function gen_pbs3() nfreq = 800 freq_min_nb = 55.0 freq_max_nb = 1950.0 df = (freq_max_nb - freq_min_nb)/(nfreq - 1) psd_freq = freq_min_nb .+ (0:nfreq-1).*df psd_amp = psd_func.(psd_freq) freq_a2 = psd_freq |> collect psd_amp_a2 = psd_amp |> collect pbs_freq, pbs = ANOPP2.a2jl_aa_pbs(ANOPP2.a2_aa_psd, ANOPP2.a2_aa_msp, freq_a2, psd_amp_a2, 3.0, ANOPP2.a2_aa_exact) return Dict("a2_pbs_freq"=>pbs_freq, "a2_pbs"=>pbs) end psd_func_pbs4(freq) = 3*freq/1e1 + (4e-1)*(freq/1e1)^2 + (5e-2)*(freq/1e1)^3 + (6e-3)*(freq/1e1)^4 psd_func_pbs5(freq) = 100*(sin(2*pi/(100)*freq) + 2) function gen_pbs4() freq_min_nb = 1e-2 freq_max_nb = 1e5 # nfreq = 100000 # df = (freq_max_nb - freq_min_nb)/(nfreq - 1) # psd_freq = freq_min_nb .+ (0:nfreq-1).*df df = 1e-3 psd_freq = freq_min_nb:df:freq_max_nb @show psd_freq length(psd_freq) psd_amp = psd_func_pbs5.(psd_freq) freq_a2 = psd_freq |> collect psd_amp_a2 = psd_amp |> collect pbs_freq_exact, pbs_exact = ANOPP2.a2jl_aa_pbs(ANOPP2.a2_aa_psd, ANOPP2.a2_aa_msp, freq_a2, psd_amp_a2, 1.0, ANOPP2.a2_aa_exact) pbs_freq_approx, pbs_approx = ANOPP2.a2jl_aa_pbs(ANOPP2.a2_aa_psd, ANOPP2.a2_aa_msp, freq_a2, psd_amp_a2, 1.0, ANOPP2.a2_aa_approximate) pbs_freq_pref, pbs_pref = ANOPP2.a2jl_aa_pbs(ANOPP2.a2_aa_psd, ANOPP2.a2_aa_msp, freq_a2, psd_amp_a2, 1.0, ANOPP2.a2_aa_preferred) return Dict("a2_pbs_freq_exact"=>pbs_freq_exact, "a2_pbs_exact"=>pbs_exact, "a2_pbs_freq_approx"=>pbs_freq_approx, "a2_pbs_approx"=>pbs_approx, "a2_pbs_freq_pref"=>pbs_freq_pref, "a2_pbs_pref"=>pbs_pref) end function main() # nbs_data = gen_nbs() # save(joinpath(@__DIR__, "nbs-new.jld2"), nbs_data) # psd_data = gen_psd() # save(joinpath(@__DIR__, "psd-new.jld2"), psd_data) # pbs_data = gen_pbs() # save(joinpath(@__DIR__, "pbs-new.jld2"), pbs_data) # pbs3_data = gen_pbs3() # save(joinpath(@__DIR__, "pbs3-new.jld2"), pbs3_data) pbs4_data = gen_pbs4() @show pbs4_data save(joinpath(@__DIR__, "pbs4-new.jld2"), pbs4_data) return nothing end end # module
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
code
712
function apth_for_nbs(t) fr = 2*cos(1*2*pi*t) + 4*cos(2*2*pi*t) + 6*cos(3*2*pi*t) + 8*cos(4*2*pi*t) fi = 2*sin(1*2*pi*t) + 4*sin(2*2*pi*t) + 6*sin(3*2*pi*t) + 8*sin(4*2*pi*t) return fr + fi end function apth_for_pbs(freq0, t) return 6 + 8*cos(1*2*pi*freq0*t + 0.2) + 2.5*cos(2*2*pi*freq0*t - 3.0) + 9*cos(3*2*pi*freq0*t + 3.1) end psd_func(freq) = 3*freq/1e3 + (4e-1)*(freq/1e3)^2 + (5e-2)*(freq/1e3)^3 + (6e-3)*(freq/1e3)^4 psd_func_int(freq_l, freq_r) = (((1/2)*3*(freq_r/1e3)^2 + (1/3)*(4e-1)*(freq_r/1e3)^3 + (1/4)*(5e-2)*(freq_r/1e3)^4 + (1/5)*(6e-3)*(freq_r/1e3)^5) - ((1/2)*3*(freq_l/1e3)^2 + (1/3)*(4e-1)*(freq_l/1e3)^3 + (1/4)*(5e-2)*(freq_l/1e3)^4 + (1/5)*(6e-3)*(freq_l/1e3)^5))*1e3
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
docs
934
[![Tests](https://github.com/OpenMDAO/AcousticMetrics.jl/actions/workflows/test.yaml/badge.svg)](https://github.com/OpenMDAO/AcousticMetrics.jl/actions/workflows/test.yaml) [![](https://img.shields.io/badge/docs-dev-blue.svg)](https://OpenMDAO.github.io/AcousticMetrics.jl/dev) # Introduction AcousticMetrics.jl is a Julia package for computing various metrics useful in acoustics. Currently implemented metrics include: * Various narrowband spectra * Pressure amplitude * Mean-squared pressure amplitude (MSP) * Power Spectral Density (PSD) * Phase * Proportional band spectra * Approximate octave and third-octave spectra * Exact proportional spectra of any octave fraction > 0. * Lazy representations of proportional band spectra constructed from either other narrowband or proportional band spectra * Integrated metrics * Unweighted and A-weighted Overall Sound Pressure Level (OASPL)
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
docs
2145
```@meta CurrentModule = AMDocs ``` # API Reference ## Fourier Transforms ```@docs AcousticMetrics.rfft! AcousticMetrics.irfft! ``` ## Pressure Time History ```@docs AbstractPressureTimeHistory PressureTimeHistory AcousticMetrics.pressure AcousticMetrics.inputlength AcousticMetrics.timestep(pth::AbstractPressureTimeHistory) AcousticMetrics.starttime(pth::AbstractPressureTimeHistory) AcousticMetrics.time ``` ## Narrowband Metrics ```@docs AbstractNarrowbandSpectrum AcousticMetrics.halfcomplex AcousticMetrics.timestep(sm::AbstractNarrowbandSpectrum) AcousticMetrics.starttime(sm::AbstractNarrowbandSpectrum) AcousticMetrics.samplerate AcousticMetrics.frequency AcousticMetrics.frequencystep AcousticMetrics.istonal PressureSpectrumAmplitude PressureSpectrumPhase MSPSpectrumAmplitude MSPSpectrumPhase PowerSpectralDensityAmplitude PowerSpectralDensityPhase ``` ### Proportional Bands and Proportional Band Spectra ```@docs AbstractProportionalBands AcousticMetrics.octave_fraction AcousticMetrics.lower_center_upper AcousticMetrics.freq_scaler AcousticMetrics.band_start AcousticMetrics.band_end AcousticMetrics.lower_bands AcousticMetrics.upper_bands AcousticMetrics.center_bands AcousticMetrics.cband_number ExactProportionalBands ExactOctaveCenterBands ExactOctaveLowerBands ExactOctaveUpperBands ExactThirdOctaveCenterBands ExactThirdOctaveLowerBands ExactThirdOctaveUpperBands ApproximateThirdOctaveBands ApproximateThirdOctaveCenterBands ApproximateThirdOctaveLowerBands ApproximateThirdOctaveUpperBands ApproximateOctaveBands ApproximateOctaveCenterBands ApproximateOctaveLowerBands ApproximateOctaveUpperBands AbstractProportionalBandSpectrum AcousticMetrics.has_observer_time AcousticMetrics.observer_time AcousticMetrics.timestep(pbs::AbstractProportionalBandSpectrum) AcousticMetrics.amplitude AcousticMetrics.time_period AcousticMetrics.time_scaler LazyNBProportionalBandSpectrum AcousticMetrics.frequency_nb AcousticMetrics.lazy_pbs ProportionalBandSpectrum LazyPBSProportionalBandSpectrum ProportionalBandSpectrumWithTime AcousticMetrics.combine ``` ## Weighting ```@docs W_A ``` ## Integrated Metrics ```@docs OASPL ```
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
docs
1397
# Developer Notes ## CompatHelper.jl, Running Tests, and Signing Commits CompatHelper.jl will automatically look for new versions of AcousticMetrics.jl's dependencies and, if it finds any breaking versions, open PRs with changes to AcousticMetrics.jl's `Project.toml` to incorporate the new versions. But! The PR won't automatically run the GitHub Action tests: https://github.com/peter-evans/create-pull-request/blob/main/docs/concepts-guidelines.md#triggering-further-workflow-runs A workaround is to manually close and immediately re-open the PR, which will run the tests and isn't too much work. The next problem: commits created by CompatHelper.jl/the github-actions bot aren't signed, and AcousticMetrics.jl is set up to require signed commits when merging into the `main` branch. So, what to do? Check out the CompatHelper.jl PR locally, manually sign the commits, then submit a new PR with the freshly-signed commits using this procedure: * First, check the CompatHelper.jl PR locally: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/checking-out-pull-requests-locally?tool=cli * Next, manually sign the CompatHelper.jl commit using git rebase: https://superuser.com/questions/397149/can-you-gpg-sign-old-commits# * Then push the branch with the newly signed commits to my fork, and merge * Close the CompatHelper.jl PR :-/
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
docs
693
```@meta CurrentModule = AMDocs ``` # Introduction AcousticMetrics.jl is a Julia package for computing various metrics useful in acoustics. Currently implemented metrics include: * Various narrowband spectra * Pressure amplitude * Mean-squared pressure amplitude (MSP) * Power Spectral Density (PSD) * Phase * Proportional band spectra * Approximate octave and third-octave spectra * Exact proportional spectra of any octave fraction integer > 0. * Lazy representations of proportional band spectra constructed from either narrowband or proportional band spectra * Integrated metrics * Unweighted and A-weighted Overall Sound Pressure Level (OASPL)
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
docs
1651
```@meta CurrentModule = AMDocs ``` # Software Quality Assurance ## Tests AcousticMetrics.jl uses the usual Julia testing framework to implement and run tests. The tests can be run locally after installing AcousticMetrics.jl, and are also run automatically on GitHub Actions. To run the tests locally, from the Julia REPL, type `]` to enter the Pkg prompt, then ```julia-repl (docs) pkg> test AcousticMetrics Testing AcousticMetrics Test Summary: | Pass Total Time Fourier transforms | 16 16 9.0s Test Summary: | Pass Total Time Pressure Spectrum | 108 108 1.7s Test Summary: | Pass Total Time Mean-squared Pressure Spectrum | 88 88 8.0s Test Summary: | Pass Total Time Power Spectral Density | 88 88 0.9s Test Summary: | Pass Total Time Proportional Band Spectrum | 1066 1066 5.3s Test Summary: | Pass Total Time OASPL | 16 16 0.3s Test Summary: | Pass Total Time A-weighting | 8 8 0.5s Testing AcousticMetrics tests passed (docs) pkg> ``` (The output associated with installing all the dependencies the tests need isn't shown above.) ## Signed Commits The AcousticMetrics.jl GitHub repository requires all commits to the `main` branch to be signed. See the [GitHub docs on signing commits](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification) for more information. ## Reporting Bugs Users can use the [GitHub Issues](https://docs.github.com/en/issues/tracking-your-work-with-issues/about-issues) feature to report bugs and submit feature requests.
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "Apache-2.0" ]
0.7.1
c9c326c4aff2be9866fa03a900380fb8dcdaaca8
docs
21103
```@meta CurrentModule = AMDocs ``` # `AcousticMetrics.jl` ## The Fourier Transform ### The Basics `AcousticMetrics.jl` uses the FFTW library, a very popular implementation of the fast Fourier transform (FFT) algorithm. The FFT is a method of computing the discrete Fourier transform (DFT) that reduces the computational complexity from ``n^2`` to ``n \log(n)``, where ``n`` is the length of the input to the transform. [FFTW's definition of the discrete Fourier transform](http://www.fftw.org/fftw3_doc/The-1d-Discrete-Fourier-Transform-_0028DFT_0029.html#The-1d-Discrete-Fourier-Transform-_0028DFT_0029) is ```math y_k = \sum_{j=0}^{n-1} x_j e^{-2 \pi \imath jk/n} ``` where ``\imath=\sqrt{-1}``. The goal of a Fourier transform is to take a function (let's call it a "signal") and express it as a sum of sinusoids. Let's imagine we have a very simple signal ```math p(t) = A \cos(ωt+φ) ``` where we call ``A`` the amplitude, ``ω`` the frequency, and ``φ`` the phase. `AcousticAnalogies.jl` is interested in (surprise!) acoustics, which are real numbers, so we'll assume that ``A``, ``ω``, and ``φ`` are all real. Say we evaluate that function ``n`` times over a period ``T``, and assume that ``ω = 2πm/T``, i.e., that the period of our signal is some integer fraction of the sampling period ``T``, since ```math \frac{2 π}{ω} = \frac{2 π}{\frac{2πm}{T}} = \frac{T}{m}. ``` We'll refer to each of those samples as ``p_j = p(t_j)``, where ``t_j`` is the time at sample ``j`` and ``j`` is an index from ``0`` to ``n-1``. What is the discrete Fourier transform of our signal ``p_j``? We should be able to figure that out if we can express our signal ``p(t)`` as something that looks like FFTW's definition of the DFT. How can we do that? Well, first we need to remember that ```math \cos(α+β) = \cos(α)\cos(β) - \sin(α)\sin(β) ``` which lets us rewrite our signal as ```math p(t) = A \cos(ωt+φ) = A\left[ \cos(ωt)\cos(φ) - \sin(ωt)\sin(φ) \right] = A \cos(φ)\cos(ωt) - A\sin(φ)\sin(ωt). ``` Now, if we also remember that ```math e^{ix} = \cos(x) + \imath \sin(x). ``` we can replace ``\sin(ωt)`` with ```math \sin(ωt) = \frac{e^{\imath ωt} - e^{-\imath ωt}}{2\imath} = \frac{-\imath e^{\imath ωt} + \imath e^{-\imath ωt}}{2} ``` and ``\cos(ωt)`` with ```math \cos(ωt) = \frac{e^{\imath ωt} + e^{-\imath ωt}}{2}. ``` Throw all that together and we get ```math p(t) = \frac{A}{2}\left[ \cos(φ) + \imath \sin(φ)\right] e^{\imath ωt} + \frac{A}{2}\left[ \cos(φ) - \imath \sin(φ)\right] e^{-\imath ωt} ``` This is looking much closer to the definition of the DFT that we started with. Next thing we need to do is realize that since we've sampled our signal ``p(t)`` at ``n`` different times, equally spaced, over a time period ``T``, then we can replace ``t`` with ```math t_j = j \frac{T}{n} = j Δt ``` where ``T/n`` is ``Δt``, the time step size. We've previously said that ``ω=\frac{2πm}{T}``, which implies that ```math ω t_j = \left( \frac{2πm}{T} \right) \left(j \frac{T}{n} \right) = \frac{2πmj}{n} ``` So if we throw that in there, we find ```math p(t_j) = p_j = \frac{A}{2}\left[ \cos(φ) + \imath \sin(φ)\right] e^{2π\imath jm/n} + \frac{A}{2}\left[ \cos(φ) - \imath \sin(φ)\right] e^{-2π\imath jm/n} ``` That's very close to the DFT definition. So what happens if we evaluate the DFT of that last expression? We'll call the discrete Fourier transform of the signal ``\hat{p}_k``. So... ```math \begin{aligned} \hat{p}_k &= \sum_{j=0}^{n-1} p_j e^{-2 \pi \imath jk/n} \\ &= \sum_{j=0}^{n-1} \left( \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{2π\imath jm/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-2π\imath jm/n} \right) e^{-2 \pi \imath jk/n} \\ &= \sum_{j=0}^{n-1} \left( \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{2π\imath j(m-k)/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-2π\imath j(m+k)/n} \right) \\ &=\frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] \sum_{j=0}^{n-1} e^{2π\imath j(m-k)/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] \sum_{j=0}^{n-1} e^{-2π\imath j(m+k)/n} \end{aligned} ``` Pretty close now. Let's think about those two summations in the last expression. First assume that ``m - k = q \ne 0``, where ``m`` and ``k`` are both integers. Then the first sum would be ```math \sum_{j=0}^{n-1} e^{2π\imath j(m-k)/n} = \sum_{j=0}^{n-1} e^{2π\imath jq/n}. ``` That's a signal that has period ```math \frac{2π}{2πq/n} = n/q ``` that we're sampling ``n`` times. So we're sampling a sinusoid an integer number of times over its period, and summing it up. That will give us... zero. Same thing will happen to the second sum if ``m+k=r \ne 0``: we'll also get zero. So now we just have to figure out what happens when ``m - k = 0`` and ``m + k = 0``, i.e., when ``k ± m``. Let's try ``k = m`` first. The first sum will be ```math \sum_{j=0}^{n-1} e^{2π\imath j(m-m)/n} = \sum_{j=0}^{n-1} e^{0} = \sum_{j=0}^{n-1} 1 = n ``` and the second sum will be ```math \sum_{j=0}^{n-1} e^{-2π\imath j(m+m)/n} = \sum_{j=0}^{n-1} e^{-4π\imath jm/n} = 0 ``` from the previous discussion, since ``m+k=2m \ne 0``. For ``k = -m``, the first sum will be zero, since ``m - -m = 2m \ne 0``, and the second sum will be ```math \sum_{j=0}^{n-1} e^{2π\imath j(m-m))/n} = n ``` again. Great! So now we finally can write down the DFT of our example signal ```math p(t) = A \cos(ωt+φ) = A \cos\left(\left[\frac{2πm}{T}\right]t+φ\right), ``` which is (wish I could figure out how to do the `cases` LaTeX environment)... ```math \begin{aligned} \hat{p}_m & = \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] n \\ \hat{p}_{-m} & = \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] n \\ \hat{p}_{k} & = 0\,\text{otherwise}. \end{aligned} ``` ### Some Special Cases There are two special cases that we need to consider: the mean component and the Nyquist component of the DFT. Let's try the mean component first. #### The Mean Component So imagine if we start out with the same signal ```math p(t) = A \cos(ωt+φ) ``` but say that ``ω = 0``. Since ``ω = 2πm/T``, that implies that ``m = 0`` also. But anyway, let's rewrite that in terms of powers of ``e``: ```math \begin{aligned} p(t) &= A \cos(ωt+φ) \\ &= \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{\imath ωt} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-\imath ωt} \\ &= \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{0} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{0}\\ &= \frac{A}{2}\left[2\cos(φ)\right]\\ &= A\cos(φ). \end{aligned} ``` What happens if we plug that into the definition of the discrete Fourier transform? We'll, it's not hard to see that we'll get ```math \begin{aligned} \hat{p}_0 &= A\cos(φ)n \\ \hat{p}_k &= 0 \,\text{otherwise}. \end{aligned} ``` So the two takeaways are: * the mean component doesn't contain a ``\frac{1}{2}`` factor * the mean component is always real Next, the Nyquist frequency component. #### Nyquist Component The Nyquist component is the one with two samples per period, which corresponds to ``k=n/2`` in the DFT definition. It is the highest frequency component that the DFT can resolve. So that means our signal will look like ```math \begin{aligned} p(t) &= A \cos(ωt+φ) \\ &= \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{2π\imath jm/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-2π\imath jm/n} \\ &= \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{2π\imath j(\frac{n}{2})/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-2π\imath j(\frac{n}{2})/n} \end{aligned} ``` Now here's where things get a bit tricky. We've got a ``\frac{n}{2}/n`` term that we'd love to replace with ``\frac{1}{2}``. That works fine if ``n`` is even, but what if ``n`` is odd? We'll have to look at both cases. First, the even case. #### Nyquist Component with Even-Length Input If ``n`` is even, then we can do ```math \begin{aligned} p(t) &= A \cos(ωt+φ) \\ &= \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{2π\imath j(\frac{n}{2})/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-2π\imath j(\frac{n}{2})/n} \\ &= \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{π\imath j} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-π\imath j} \end{aligned} ``` and then realize that ``e^{π\imath j} = e^{-π\imath j} = \left(-1\right)^j``, which let's us simplify a bit further to ```math p(t) = \frac{A}{2}\left[2\cos(φ)]e^{π\imath j}\right] = A\cos(φ)e^{π\imath j} ``` The next step is to think about which components of the DFT we need to worry about. If we shove that into the DFT, we'll need to just focus on the ``e^{-2π\imath j (n/2)/n} = e^{-π\imath j}`` component of the DFT, and we'll eventually end up with ```math \begin{aligned} \hat{p}_{n/2} &= A\cos(φ)n \\ \hat{p}_k &= 0 \,\text{otherwise}, \end{aligned} ``` and so the takeaways are identical to the mean component: * the Nyquist component for ``n`` even doesn't contain a ``\frac{1}{2}`` factor * the Nyquist component for ``n`` even is always real #### Nyquist Component with Odd-Length Input So, we're starting with ```math \begin{aligned} p(t) &= A \cos(ωt+φ) \\ &= \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{2π\imath j(\frac{n}{2})/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-2π\imath j(\frac{n}{2})/n} \end{aligned} ``` Now, the trick here is that there *is* no Nyquist frequency component with an odd-input DFT. We'll never end up with ``e^{±2π\imath j(\frac{n}{2})/n} = e^{±π\imath}`` components in the DFT definition, since ``n`` is odd. For example, if ``n=9``, the ``k`` in the original definition of the DFT ```math y_k = \sum_{j=0}^{n-1} x_j e^{-2 \pi \imath jk/n} ``` will never take on the value ``n/2``, since that would be ``4\frac{1}{2}`` and ``k`` is an integer. So this special case isn't special for odd input lengths. ### Order of Outputs We're almost ready to compare our example signal to the output of the FFTW library. The last thing we need to think about is how FFTW's output is ordered. FFT libraries have different conventions, but [here is what FFTW does](http://www.fftw.org/fftw3_doc/The-1d-Discrete-Fourier-Transform-_0028DFT_0029.html#The-1d-Discrete-Fourier-Transform-_0028DFT_0029): > Note also that we use the standard “in-order” output ordering—the k-th output > corresponds to the frequency k/n (or k/T, where T is your total sampling > period). For those who like to think in terms of positive and negative > frequencies, this means that the positive frequencies are stored in the first > half of the output and the negative frequencies are stored in backwards order > in the second half of the output. (The frequency -k/n is the same as the > frequency (n-k)/n.) So for our original example signal ```math \begin{aligned} p(t) &= A \cos(ωt+φ) \\ \hat{p}_m & = \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] n \\ \hat{p}_{-m} & = \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] n \\ \hat{p}_{k} & = 0\,\text{otherwise}. \end{aligned} ``` we would expect ``\hat{p}_m`` to appear in the ``1+m`` position (since ``m`` starts from 0, but the first "position" is 1 for Julia arrays), and ``\hat{p}_{-m}`` to appear in the ``1+n-m`` position. But things get a bit more complicated if we use a real-input FFT (which AcousticMetrics.jl does). See the next section. ### Real-Input FFTs and Half-Complex Format If we look back at the results for the DFT of our simple signal ```math \begin{aligned} p(t) &= A \cos(ωt+φ) \\ \hat{p}_m & = \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] n \\ \hat{p}_{-m} & = \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] n \\ \hat{p}_{k} & = 0\,\text{otherwise}. \end{aligned} ``` we can't help but notice that the negative and positive frequency results are closely related. If we know one, we can figure out the other. And if we want to find ``A`` and ``φ``, we just need either the positive or negative result. For example, if ``a_r`` and ``a_i`` are the real and imaginary parts of ``\hat{p}_m``, respectively, then ```math A = \frac{2}{n}\sqrt{a_r^2 + a_i^2} ``` and ```math φ = \arctan(a_i/a_r). ``` For the Nyquist frequency, though, we know that ```math \begin{aligned} \hat{p}_{n/2} &= A\cos(φ)n \\ \hat{p}_k &= 0 \,\text{otherwise}, \end{aligned} ``` and so ``a_r = A\cos(φ)n`` and ``a_i = 0``. We have only one non-zero component, so we'll have to define ```math A = a_r/n ``` and ```math φ = 0. ``` Or, wait, maybe it would be better to make `A = abs(a_r)/n` and `φ = π` if `a_r < 0`, and `φ = 0` otherwise. So, for real-input FFTs, FFTW only gives you the non-negative frequencies of the DFT. Finally, if we want to avoid complex numbers entirely, we can use the "real-to-real" transform that returns the DFT in the [halfcomplex format](https://www.fftw.org/fftw3_doc/The-Halfcomplex_002dformat-DFT.html). This returns the frequencies in an order similar to the standard in-order manner discussed previously, but only returns the non-negative portion of the spectrum. Specifically, the FFTW manual shows that the order of outputs will be ```math r_0, r_1, r_2, ..., r_{n/2}, i_{(n+1)/2-1}, ..., i_2, i_1 ``` where ``r_k`` and ``i_k`` are the real and imaginary parts of component ``k``, and division by 2 is rounded down. An example makes this a bit more clear. Let's imagine we have a signal of length 8, so ``n = 8``. Then the output we'll get from FFTW will be ```math r_0, r_1, r_2, r_3, r_4, i_3, i_2, i_1 ``` ``i_0`` and ``i_4`` are "missing," but that doesn't bother us since we know that both of those are always zero for a real-input even-length DFT. What if we had an odd-length input signal? Let's try ``n=9`` this time. Then the output will be ```math r_0, r_1, r_2, r_3, r_4, i_4, i_3, i_2, i_1 ``` This time the ``i_4`` component isn't "missing," which is a good thing, since it's not zero. ### Time Offset So far we've been assuming that the time ``t`` starts at 0. What if that's not true, i.e., that ``t_j = t_0 + j\frac{T}{n}``? Then ```math ω t_j = \left( \frac{2πm}{T} \right) \left(t_0 + j \frac{T}{n} \right) = \frac{2πm t_0}{T} + \frac{2πmj}{n} ``` and the signal is now ```math \begin{aligned} p(t_j) = p_j &= A\cos(ω[t_0 + j\frac{T}{n}] + φ) \\ &= \frac{A}{2}\left[ \cos(φ) + \imath \sin(φ)\right] e^{2π\imath m t_0/T + 2π\imath jm/n} + \frac{A}{2}\left[ \cos(φ) - \imath \sin(φ)\right] e^{-2π\imath m t_0/T - 2π\imath jm/n} \\ &= \frac{A}{2}\left[ \cos(φ) + \imath \sin(φ)\right] e^{2π\imath m t_0/T} e^{2π\imath jm/n} + \frac{A}{2}\left[ \cos(φ) - \imath \sin(φ)\right] e^{-2π\imath m t_0/T} e^{-2π\imath jm/n}. \end{aligned} ``` Next, we substitute the signal into the definition of the DFT: ```math \begin{aligned} \hat{p}_k &= \sum_{j=0}^{n-1} p_j e^{-2 \pi \imath jk/n} \\ &=\frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{2π\imath m t_0/T} \sum_{j=0}^{n-1} e^{2π\imath j(m-k)/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-2π\imath m t_0/T} \sum_{j=0}^{n-1} e^{-2π\imath j(m+k)/n} \end{aligned} ``` then use the same arguments we used before for the summations to find that ```math \begin{aligned} \hat{p}_m & = \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{2π\imath m t_0/T} n \\ \hat{p}_{-m} & = \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-2π\imath m t_0/T} n \\ \hat{p}_{k} & = 0\,\text{otherwise}. \end{aligned} ``` Let's work on the non-zero ``\hat{p}`` components a bit. First, the positive-``m`` one: ```math \begin{aligned} \hat{p}_m & = \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{2π\imath m t_0/T} n \\ & = \frac{A}{2}\left[e^{\imath φ} \right] e^{2π\imath m t_0/T} n \\ & = \frac{A}{2}\left[e^{\imath φ + 2π\imath m t_0/T} \right] n \\ & = \frac{A}{2}\left[e^{\imath (φ + 2π m t_0/T)} \right] n \\ & = \frac{A}{2}\left[\cos(φ + 2π m t_0/T) + \imath \sin(φ+ 2π m t_0/T) \right] n \end{aligned} ``` Then, the negative-``m`` one: ```math \begin{aligned} \hat{p}_{-m} & = \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-2π\imath m t_0/T} n \\ & = \frac{A}{2}\left[e^{-\imath φ} \right] e^{-2π\imath m t_0/T} n \\ & = \frac{A}{2}\left[e^{-\imath φ - 2π\imath m t_0/T} \right] n \\ & = \frac{A}{2}\left[e^{-\imath (φ + 2π m t_0/T)} \right] n \\ & = \frac{A}{2}\left[\cos(φ + 2π m t_0/T) - \imath \sin(φ+ 2π m t_0/T) \right] n \end{aligned} ``` So now, if we want to find ``A`` and ``φ`` from the ``\hat{p}_m`` components ``a_r`` ```math a_r = \frac{A}{2}\cos(φ + 2π m t_0/T)n ``` and ``a_i`` ```math a_i = \frac{A}{2}\sin(φ + 2π m t_0/T)n ``` we can use the same formula for ``A`` ```math A = \frac{2}{n}\sqrt{a_r^2 + a_i^2} ``` and a slightly modified version of the formula for ``φ`` ```math φ = \arctan(a_i/a_r) - 2π m t_0/T. ``` And we don't need to worry about the two special cases discussed previously, since the ``φ`` angle for both the mean and Nyquist components is always zero. What about the special cases we discussed previously (the mean and the Nyquist frequencies)? Obviously shifting the mean component shouldn't change anything. But what about the Nyquist frequency? #### Nyquist Component with a Time Offset We know from previous work that the odd-input length Nyquist component isn't special, so we'll ignore that case. So, for the even-input length Nyquist component, we'll start with the same signal ```math \begin{aligned} p(t_j) = p_j &= \frac{A}{2}\left[ \cos(φ) + \imath \sin(φ)\right] e^{2π\imath m t_0/T + 2π\imath jm/n} + \frac{A}{2}\left[ \cos(φ) - \imath \sin(φ)\right] e^{-2π\imath m t_0/T - 2π\imath jm/n} \\ &= \frac{A}{2}\left[ \cos(φ) + \imath \sin(φ)\right] e^{2π\imath m t_0/T} e^{2π\imath jm/n} + \frac{A}{2}\left[ \cos(φ) - \imath \sin(φ)\right] e^{-2π\imath m t_0/T} e^{-2π\imath jm/n}, \end{aligned} ``` and then say that ``m = n/2``, like previously ```math \begin{aligned} p_j &= \frac{A}{2}\left[ \cos(φ) + \imath \sin(φ)\right] e^{2π\imath (n/2) t_0/T} e^{2π\imath j(n/2)/n} + \frac{A}{2}\left[ \cos(φ) - \imath \sin(φ)\right] e^{-2π\imath (n/2) t_0/T} e^{-2π\imath j(n/2)/n} \\ &= \frac{A}{2}\left[ \cos(φ) + \imath \sin(φ)\right] e^{π\imath n t_0/T} e^{π\imath j} + \frac{A}{2}\left[ \cos(φ) - \imath \sin(φ)\right] e^{-π\imath n t_0/T} e^{-π\imath j} \\ &= \frac{A}{2}\left[e^{\imath φ}\right] e^{π\imath n t_0/T} e^{π\imath j} + \frac{A}{2}\left[ e^{-\imath φ}\right] e^{-π\imath n t_0/T} e^{-π\imath j} \\ &= \frac{A}{2}\left[e^{\imath (φ + πn t_0/T)}\right] e^{π\imath j} + \frac{A}{2}\left[ e^{-\imath (φ + πn t_0/T)}\right] e^{-π\imath j} \\ &= \frac{A}{2}\left[\cos(φ + πn t_0/T) + \imath \sin(φ + πn t_0/T)\right] e^{π\imath j} + \frac{A}{2}\left[ \cos(φ + πn t_0/T) - \imath \sin(φ + πn t_0/T)\right] e^{-π\imath j} \\ &= A\cos(φ + πn t_0/T)e^{π\imath j} \\ &= A\cos(φ + 2π (n/2) t_0/T)e^{2π\imath j (n/2)/n} \end{aligned} ``` So this means the Fourier transform is ```math \hat{p}_{n/2} = A\cos(φ + 2π (n/2) t_0/T) n ``` and so we can find ```math A = a_r/n ``` and ```math φ = -2π(n/2) t_0/T. ``` ### What if the signal frequency isn't a multiple of the sampling frequency? ```math p(t) = A \cos(ωt+φ) ``` Say we evaluate that function ``n`` times over a period ``T``, just like before. But this time we will assume that ``ω = 2π(m+a)/T`` where ``0 \lt a \lt 1``, i.e., that the period of our signal is *not* some integer fraction of the sampling period ``T``. What will the Fourier transform of that be? We can reuse a bunch of our previous work. This expression for the signal ``p(t)`` still applies: ```math p(t) = \frac{A}{2}\left[ \cos(φ) + \imath \sin(φ)\right] e^{\imath ωt} + \frac{A}{2}\left[ \cos(φ) - \imath \sin(φ)\right] e^{-\imath ωt} ``` But now we just need to substitute our new expression for ``ω t_j``, ```math ω t_j = \left( \frac{2π(m+a)}{T} \right) \left(j \frac{T}{n} \right) = \frac{2π(m+a)j}{n}, ``` which will give us ```math p(t_j) = p_j = \frac{A}{2}\left[ \cos(φ) + \imath \sin(φ)\right] e^{2π\imath j(m+a)/n} + \frac{A}{2}\left[ \cos(φ) - \imath \sin(φ)\right] e^{-2π\imath j(m+a)/n} ``` Now, if we do the FFT: ```math \begin{aligned} \hat{p}_k &= \sum_{j=0}^{n-1} p_j e^{-2 \pi \imath jk/n} \\ &= \sum_{j=0}^{n-1} \left( \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{2π\imath j(m+a)/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-2π\imath j(m+a)/n} \right) e^{-2 \pi \imath jk/n} \\ &= \sum_{j=0}^{n-1} \left( \frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] e^{2π\imath j(m+a-k)/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] e^{-2π\imath j(m+a+k)/n} \right) \\ &=\frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] \sum_{j=0}^{n-1} e^{2π\imath j(m+a-k)/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] \sum_{j=0}^{n-1} e^{-2π\imath j(m+a+k)/n} \\ &=\frac{A}{2}\left[\cos(φ) + \imath \sin(φ) \right] \sum_{j=0}^{n-1} e^{2π\imath j(m-k)/n} e^{2π\imath aj/n} + \frac{A}{2}\left[\cos(φ) - \imath \sin(φ) \right] \sum_{j=0}^{n-1} e^{-2π\imath j(m+k)/n} e^{2π\imath aj/n} \end{aligned} ```
AcousticMetrics
https://github.com/OpenMDAO/AcousticMetrics.jl.git
[ "MIT" ]
1.13.0
76c8dd8965e9b4c8639012c8815e1842c1306a66
code
9809
module PkgButlerEngine import Mustache import Pkg function configure_pkg(path::AbstractString; channel=:auto, template=:auto) channel in (:auto, :stable, :dev) || error("Invalid value for channel.") template in (:auto, :default, :bach) || error("Invalid value for template.") path_for_butler_workflows_folder = joinpath(path, ".github", "workflows") path_for_main_butler_workflow = joinpath(path_for_butler_workflows_folder, "jlpkgbutler-butler-workflow.yml") path_for_main_butler_dev_workflow = joinpath(path_for_butler_workflows_folder, "jlpkgbutler-butler-dev-workflow.yml") mkpath(path_for_butler_workflows_folder) if channel == :auto channel = isfile(path_for_main_butler_dev_workflow) ? :dev : :stable end if channel == :stable if isfile(path_for_main_butler_dev_workflow) rm(path_for_main_butler_dev_workflow, force=true) end cp(joinpath(@__DIR__, "..", "templates", "jlpkgbutler-butler-workflow.yml"), path_for_main_butler_workflow, force=true) elseif channel == :dev if isfile(path_for_main_butler_workflow) rm(path_for_main_butler_workflow, force=true) end cp(joinpath(@__DIR__, "..", "templates", "jlpkgbutler-butler-dev-workflow.yml"), path_for_main_butler_dev_workflow, force=true) end path_for_config_file = joinpath(path, ".jlpkgbutler.toml") if template !== :auto if isfile(path_for_config_file) config_content = Pkg.TOML.parsefile(path_for_config_file) if haskey(config_content, "template") if template !== :auto config_content["template"] = string(template) open(path_for_config_file, "w") do f Pkg.TOML.print(f, config_content) end end elseif template !== :default config_content["template"] = string(template) open(path_for_config_file, "w") do f Pkg.TOML.print(f, config_content) end end elseif template !== :default open(path_for_config_file, "w") do f Pkg.TOML.print(f, Dict{String,Any}("template" => string(template))) end end end end function cp_with_mustache(src, dest, vals) content = read(src, String) open(dest, "w") do file Mustache.render(file, content, vals, tags=("\$[[", "]]")) end end function ensure_project_has_julia_compat(path) proj_file = isfile(joinpath(path, "JuliaProject.toml")) ? joinpath(path, "JuliaProject.toml") : joinpath(path, "Project.toml") pkg_toml_content = Pkg.TOML.parsefile(proj_file) if !haskey(pkg_toml_content, "compat") pkg_toml_content["compat"] = Dict{String,String}() end if !haskey(pkg_toml_content["compat"], "julia") pkg_toml_content["compat"]["julia"] = "1" open(proj_file, "w") do f Pkg.TOML.print(f, pkg_toml_content) end end end function ensure_project_uses_new_enough_documenter(path) doc_proj_file = isfile(joinpath(path, "docs", "JuliaProject.toml")) ? joinpath(path, "docs", "JuliaProject.toml") : joinpath(path, "docs", "Project.toml") if isfile(doc_proj_file) pkg_toml_content = Pkg.TOML.parsefile(doc_proj_file) if haskey(pkg_toml_content, "compat") && haskey(pkg_toml_content["compat"], "Documenter") documenter_compat_bound = pkg_toml_content["compat"]["Documenter"] version_bound = Pkg.Types.semver_spec(documenter_compat_bound) # This is the list of versions that don't work invalid_versions = Pkg.Types.semver_spec("0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.10,0.11,0.12,0.13,0.14,0.15,0.16,0.17,0.18,0.19,0.20,0.21,0.22,0.23") if !isempty(intersect(invalid_versions, version_bound)) # TODO This is a bit crude. Ideally we would try to see whether a version >= 0.24 is listed in the compat section, and if so, # only remove the offending versions. pkg_toml_content["compat"]["Documenter"] = "~0.24" open(doc_proj_file, "w") do f Pkg.TOML.print(f, pkg_toml_content) end end end end end function construct_version_matrix(path) proj_file = isfile(joinpath(path, "JuliaProject.toml")) ? joinpath(path, "JuliaProject.toml") : joinpath(path, "Project.toml") pkg_toml_content = Pkg.TOML.parsefile(proj_file) julia_compat_bound = pkg_toml_content["compat"]["julia"] version_spec = Pkg.Types.semver_spec(julia_compat_bound) versions = [v"1.0.5" => "'1.0'", v"1.1.1" => "'1.1'", v"1.2.0" => "'1.2'", v"1.3.1" => "'1.3'", v"1.4.2" => "'1.4'", v"1.5.4" => "'1.5'", v"1.6.7" => "'1.6'", v"1.7.3" => "'1.7'", v"1.8.5" => "'1.8'", v"1.9.4" => "'1.9'", v"1.10.0" => "'1.10'"] compat_versions = filter(i -> i[1] in version_spec, versions) return join(map(i -> i[2], compat_versions), ", ") end function construct_matrix_exclude_list(path) path_for_config_file = joinpath(path, ".jlpkgbutler.toml") if isfile(path_for_config_file) config_content = Pkg.TOML.parsefile(path_for_config_file) if haskey(config_content, "strategy-matrix-exclude") option_value = config_content["strategy-matrix-exclude"] line_ending = Sys.iswindows() ? "\r\n" : "\n" exclude_configs = split(option_value, ";", keepempty=false) exclude_configs = strip.(exclude_configs) ret = "" for ec in exclude_configs lines = split(ec, ",", keepempty=false) lines = strip.(lines) ret *= line_ending * " "^10 * "- " * lines[1] * (length(lines) > 1 ? line_ending * join(string.(" "^12, lines[2:end]), line_ending) : "") end return ret end end return "" end function configure_tagbot!(path, view_vals) path_for_config_file = joinpath(path, ".jlpkgbutler.toml") if isfile(path_for_config_file) config_content = Pkg.TOML.parsefile(path_for_config_file) if haskey(config_content, "custom-registry") option_value = config_content["custom-registry"] view_vals["include_custom_registry"] = "true" view_vals["JL_CUSTOM_REGISTRY"] = option_value end end end function add_compathelper(path) path_for_butler_workflows_folder = joinpath(path, ".github", "workflows") path_for_compathelper_workflow = joinpath(path_for_butler_workflows_folder, "jlpkgbutler-compathelper-workflow.yml") cp(joinpath(@__DIR__, "..", "templates", "jlpkgbutler-compathelper-workflow.yml"), path_for_compathelper_workflow, force=true) end function detect_template(path) path_for_config_file = joinpath(path, ".jlpkgbutler.toml") if isfile(path_for_config_file) config_content = Pkg.TOML.parsefile(path_for_config_file) if haskey(config_content, "template") return lowercase(config_content["template"]) end end return "default" end function update_pkg_bach(path) if isfile(joinpath(path, ".travis.yml")) rm(joinpath(path, ".travis.yml"), force=true) end if isfile(joinpath(path, "appveyor.yml")) rm(joinpath(path, "appveyor.yml"), force=true) end if isfile(joinpath(path, ".appveyor.yml")) rm(joinpath(path, ".appveyor.yml"), force=true) end end function update_pkg(path::AbstractString) configure_pkg(path) template = detect_template(path) path_for_butler_workflows_folder = joinpath(path, ".github", "workflows") path_for_ci_master_workflow = joinpath(path_for_butler_workflows_folder, "jlpkgbutler-ci-master-workflow.yml") path_for_ci_pr_workflow = joinpath(path_for_butler_workflows_folder, "jlpkgbutler-ci-pr-workflow.yml") path_for_docdeploy_workflow = joinpath(path_for_butler_workflows_folder, "jlpkgbutler-docdeploy-workflow.yml") path_for_codeformat_workflow = joinpath(path_for_butler_workflows_folder, "jlpkgbutler-codeformat-pr-workflow.yml") path_for_tagbot_workflow = joinpath(path_for_butler_workflows_folder, "jlpkgbutler-tagbot-workflow.yml") path_for_docs_make_file = joinpath(path, "docs", "make.jl") mkpath(path_for_butler_workflows_folder) ensure_project_has_julia_compat(path) view_vals = Dict{String,Any}() view_vals["JL_VERSION_MATRIX"] = construct_version_matrix(path) # if template == "bach" # view_vals["include_codeformat_lint"] = "true" # end view_vals["ADDITIONAL_MATRIX_EXCLUDES"] = construct_matrix_exclude_list(path) configure_tagbot!(path, view_vals) cp_with_mustache(joinpath(@__DIR__, "..", "templates", "jlpkgbutler-ci-master-workflow.yml"), path_for_ci_master_workflow, view_vals) cp_with_mustache(joinpath(@__DIR__, "..", "templates", "jlpkgbutler-ci-pr-workflow.yml"), path_for_ci_pr_workflow, view_vals) cp_with_mustache(joinpath(@__DIR__, "..", "templates", "jlpkgbutler-tagbot-workflow.yml"), path_for_tagbot_workflow, view_vals) if isfile(path_for_docs_make_file) cp(joinpath(@__DIR__, "..", "templates", "jlpkgbutler-docdeploy-workflow.yml"), path_for_docdeploy_workflow, force=true) elseif isfile(path_for_docdeploy_workflow) rm(path_for_docdeploy_workflow, force=true) end ensure_project_uses_new_enough_documenter(path) add_compathelper(path) isfile(path_for_codeformat_workflow) && rm(path_for_codeformat_workflow, force=true) if template == "bach" update_pkg_bach(path) cp(joinpath(@__DIR__, "..", "templates", "jlpkgbutler-codeformat-pr-workflow.yml"), path_for_codeformat_workflow, force=true) end end end # module
PkgButlerEngine
https://github.com/davidanthoff/PkgButlerEngine.jl.git
[ "MIT" ]
1.13.0
76c8dd8965e9b4c8639012c8815e1842c1306a66
code
643
using PkgButlerEngine using Test @testset "PkgButlerEngine" begin mktempdir() do temp_path temp_path_of_project = joinpath(temp_path, "TestPackage") cp(joinpath(@__DIR__, "with_problems"), temp_path_of_project) PkgButlerEngine.update_pkg(temp_path_of_project) @test isfile(joinpath(temp_path_of_project, ".github", "workflows", "jlpkgbutler-butler-workflow.yml")) @test isfile(joinpath(temp_path_of_project, ".github", "workflows", "jlpkgbutler-ci-master-workflow.yml")) @test isfile(joinpath(temp_path_of_project, ".github", "workflows", "jlpkgbutler-ci-pr-workflow.yml")) end end
PkgButlerEngine
https://github.com/davidanthoff/PkgButlerEngine.jl.git
[ "MIT" ]
1.13.0
76c8dd8965e9b4c8639012c8815e1842c1306a66
docs
1739
# PkgButlerEngine.jl ## Overview The backend engine for the Julia Package Butler. This is a low level package that most users will not directly use. Most users will want to use the [`julia-pkgbutler` GitHub Action](https://github.com/davidanthoff/julia-pkgbutler). ## Functionality The Julia Package Butler currently makes the following changes to a package repository: - The GitHub Action workflow for the Package Butler itself is updated to the latest version. - If the `Project.toml` doesn't have a version bound for `julia` in the `compat` section, it will add a version bound declaring the package compatible with Julia 1.0. - It will add GitHub Action workflows for continuous integration. These workflows are automatically configured to only run on Julia versions that are compatible with the `compat` entry for Julia in the `Project.toml` file of the package. - If a `docs/make.jl` file exists, a GitHub Action workflow that builds and deploys documentation is added to the package. - If a `docs/Project.toml` file exists, the butler will ensure that the version bound on Documenter.jl is no lower than 0.24 (the first version to support building documentation with GitHub Actions). - Enable [CompatHelper.jl](https://github.com/bcbi/CompatHelper.jl) for the repository. - Enable [TagBot](https://github.com/JuliaRegistries/TagBot) for the repository. When the `bach` template is used, these additional channges are made: - Travis and Appveyor configuration files are removed. - Whenever any Julia file on `master` is not properly formatted, a PR with formatting changes is opened (based on https://github.com/julia-vscode/DocumentFormat.jl). - Any PR has an additional check whether Julia code files are properly formatted.
PkgButlerEngine
https://github.com/davidanthoff/PkgButlerEngine.jl.git
[ "MIT" ]
1.13.0
76c8dd8965e9b4c8639012c8815e1842c1306a66
docs
844
# $[[PKG_NAME]] [![Project Status: Active - The project has reached a stable, usable state and is being actively developed.](http://www.repostatus.org/badges/latest/active.svg)](http://www.repostatus.org/#active) [![](https://img.shields.io/badge/docs-stable-blue.svg)](https://$[[PKG_OWNER]].github.io/$[[PKG_NAME]].jl/stable/) ![](https://github.com/$[[PKG_OWNER]]/$[[PKG_NAME]].jl/workflows/Run%20tests/badge.svg) [![codecov](https://codecov.io/gh/$[[PKG_OWNER]]/$[[PKG_NAME]].jl/branch/master/graph/badge.svg)](https://codecov.io/gh/$[[PKG_OWNER]]/$[[PKG_NAME]].jl) ## Overview $[[PKG_DESCRIPTION]] ## Installation You can add the package at the Julia package REPL with: ``` pkg> add $[[PKG_NAME]] ``` ## Getting started To get started, take a look at the [documentation](https://$[[PKG_OWNER]].github.io/$[[PKG_NAME]].jl/stable/).
PkgButlerEngine
https://github.com/davidanthoff/PkgButlerEngine.jl.git
[ "MIT" ]
1.13.0
76c8dd8965e9b4c8639012c8815e1842c1306a66
docs
35
This is an artifical test project.
PkgButlerEngine
https://github.com/davidanthoff/PkgButlerEngine.jl.git
[ "MIT" ]
0.1.0
7a3a220f1ebab80c72bd021a5263209352e6d014
code
510
using FlowLock using Documenter DocMeta.setdocmeta!(FlowLock, :DocTestSetup, :(using FlowLock); recursive=true) makedocs(; modules=[FlowLock], authors="Átila Saraiva Quintela Soares", sitename="FlowLock.jl", format=Documenter.HTML(; canonical="https://AtilaSaraiva.github.io/FlowLock.jl", edit_link="main", assets=String[], ), pages=[ "Home" => "index.md", ], ) deploydocs(; repo="github.com/AtilaSaraiva/FlowLock.jl", devbranch="main", )
FlowLock
https://github.com/AtilaSaraiva/FlowLock.jl.git
[ "MIT" ]
0.1.0
7a3a220f1ebab80c72bd021a5263209352e6d014
code
101
module FlowLock using SHA: sha256 using MacroTools: postwalk include("flow.jl") export @flow end
FlowLock
https://github.com/AtilaSaraiva/FlowLock.jl.git
[ "MIT" ]
0.1.0
7a3a220f1ebab80c72bd021a5263209352e6d014
code
1956
function checkFileExists(path, sha256sum) file_exists = isfile(path) if file_exists file_sha256 = open(path) do io io |> sha256 |> bytes2hex end if file_sha256 == sha256sum return true else return false end else return false end end function flow(path::Expr, expr::Expr) newExpr = postwalk(x -> x == :path ? path.args[2] : x, expr) return quote filePath = $(esc(path.args[2])) hash_file = filePath * ".hash" # Check if hash file exists flag = true if isfile(hash_file) # Read the saved hash saved_hash = read(hash_file, String) # Check if the file exists and hash matches if $checkFileExists(filePath, saved_hash) flag = false end end if flag $(esc(newExpr)) # Compute new hash and save it new_hash = open(filePath) do io io |> $sha256 |> $bytes2hex end open(hash_file, "w") do io write(io, new_hash) end end end end """ @flow path="path/to/file" expr A macro that checks if the file in the `path` exists and if it didn't change and only runs the `expr` if needed. After the first run, create a file with the same name as `path` with the .hash suffix, and writes the sha256 sum hash. # Example ```jldoctest julia> filePath, _ = mktemp(); julia> @flow path=filePath begin println("This should only show once") write(filePath, "some random text") end This should only show once 64 julia> @flow path=filePath begin println("This should only show once") write(filePath, "some random text") end ``` """ macro flow(path::Expr, expr::Expr) if path.args[1] != :path error("Missing path keyword argument") end return flow(path, expr) end
FlowLock
https://github.com/AtilaSaraiva/FlowLock.jl.git
[ "MIT" ]
0.1.0
7a3a220f1ebab80c72bd021a5263209352e6d014
code
399
using FlowLock using Test @testset "FlowLock.jl" begin counter = 1 path = "this should not change" filePath, _ = mktemp() @flow path=filePath begin counter += 1 write(path, "oi1") end @flow path=filePath begin counter += 1 end rm(filePath) rm(filePath * ".hash") @test counter == 2 @test path == "this should not change" end
FlowLock
https://github.com/AtilaSaraiva/FlowLock.jl.git
[ "MIT" ]
0.1.0
7a3a220f1ebab80c72bd021a5263209352e6d014
docs
1563
# FlowLock [![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://AtilaSaraiva.github.io/FlowLock.jl/stable/) [![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://AtilaSaraiva.github.io/FlowLock.jl/dev/) [![Build Status](https://github.com/AtilaSaraiva/FlowLock.jl/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/AtilaSaraiva/FlowLock.jl/actions/workflows/CI.yml?query=branch%3Amain) [![Coverage](https://codecov.io/gh/AtilaSaraiva/FlowLock.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/AtilaSaraiva/FlowLock.jl) This packages provides a macro called `@flow` that wraps a piece of code that creates a file and only runs the code if the file is missing or changed. Here is a snippet of how it works: ```julia julia> using FlowLock julia> filePath, _ = mktemp(); julia> @flow path=filePath begin println("This should only show once") write(filePath, "some random text") end This should only show once 64 julia> @flow path=filePath begin println("This should only show once") write(filePath, "some random text") end ``` ## How it works The macro `@flow` prepends some code to check if the `path` file already exists and if its sha256 sum and the hash stored in a another file. If it matches, the piece of code passed to the macro is not ran. If doesn't match the piece of code is ran and afterwards creates with the same path as the `path` file but with the suffix ".hash". This file contains the sha256 sum hash encoding of the output file.
FlowLock
https://github.com/AtilaSaraiva/FlowLock.jl.git
[ "MIT" ]
0.1.0
7a3a220f1ebab80c72bd021a5263209352e6d014
docs
180
```@meta CurrentModule = FlowLock ``` # FlowLock Documentation for [FlowLock](https://github.com/AtilaSaraiva/FlowLock.jl). ```@index ``` ```@autodocs Modules = [FlowLock] ```
FlowLock
https://github.com/AtilaSaraiva/FlowLock.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
3994
""" Lets shift to using Regex to extract file information """ function OLDDataPathExtraction(path::String, calibration_file::String; verbose=false, extract_photons=true ) #The last line of the string should be the nd information if verbose println(path) end path_array = splitpath(path) #first we can split the file path into seperate strings nt = (Path=path,) #println(path_array) date_res = findmatch(path_array, date_regex; verbose=verbose) if !isnothing(date_res) nt = merge(nt, date_res) end animal_res = findmatch(path_array, animal_regex; verbose=verbose) if !isnothing(animal_res) nt = merge(nt, animal_res) end genotype_res = findmatch(path, genotype_regex; verbose = verbose) if !isnothing(genotype_res) nt = merge(nt, genotype_res) end age_res = findmatch(path, age_regex; verbose = verbose) if !isnothing(age_res) || parse(Int, age_res.Age) > 30 if age_res.Age == "Adult" nt = merge(nt, ("Age" => "30")) else nt = merge(nt, age_res) end end #now lets look for a condition in the possible conds cond_res = findmatch(path, cond_regex) if cond_res.Condition == "Drugs" cond = "BaCl_LAP4" elseif cond_res.Condition == "NoDrugs" || cond_res.Condition == "No drugs" cond = "BaCl" else cond = cond_res.Condition end nt = merge(nt, (Condition=cond,)) pc_res = findmatch(path, pc_regex) if !isnothing(pc_res) nt = merge(nt, pc_res) if pc_res.Photoreceptors == "Rods" #No further label is needed nt = merge(nt, (Wavelength = "520",)) else color_res = findmatch(path, color_regex) if color_res.Color == "Blue" || color_res.Color == "365" || color_res.Color == "365UV" nt = merge(nt, (Wavelength = "365",)) elseif color_res.Color == "Green" || color_res.Color == "525" || color_res.Color == "525Green" nt = merge(nt, (Wavelength="520",)) elseif color_res.Color == "520" || color_res.Color == "520Green" nt = merge(nt, (Wavelength="520",)) else println(color_res) end end end nd_res = findmatch(path, nd_regex; verbose = verbose) if !isnothing(nd_res) nt = merge(nt, nd_res) end percent_res = findmatch(path, percent_regex; verbose = verbose) if !isnothing(nd_res) nt = merge(nt, percent_res) end if extract_photons #extract the stimulus from the data stim_timestamps = extract_stimulus(path)[1].timestamps stim_time = round(Int64, (stim_timestamps[2] - stim_timestamps[1]) * 1000) #println(stim_time) nt = merge(nt, (Stim_Time=stim_time,)) #now lets extract photons if nd_res.ND == "0.5" #println(intensity_info.Percent) println(nd_res) println(nt.Wavelength) photons = photon_lookup( parse(Int64, nt.Wavelength), 0, parse(Int64, nd_res.Percent), calibration_file ) println(photons) photons = (photons*stim_time) / (10^0.5) nt = merge(nt, (Photons=photons,)) else #println(intensity_info.Percent) photons = photon_lookup( parse(Int64, nt.Wavelength), parse(Int64, nd_res.ND), parse(Int64, percent_res.Percent), calibration_file ) .* stim_time nt = merge(nt, (Photons=photons,)) end end #println(nt) #return nt |> clean_nt_numbers return nt |> parseNamedTuple end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
27762
""" """ function run_A_wave_analysis(all_files::DataFrame; measure_minima = false, a_cond = "BaCl_LAP4", t_pre = 1.0, t_post = 2.0, #Extend these to see the end of the a-wave peak_method = false, lb = [1.0, 1.0, 0.1], #Default rmin = 100, kmin = 0.1, nmin = 0.1 p0 = [500.0, 1000.0, 2.0], #Default r = 500.0, k = 200.0, n = 2.0 ub = [Inf, Inf, 10.0], #Default rmax = 2400, kmax = 800 verbose=false, ) a_files = all_files |> @filter(_.Condition == a_cond) |> DataFrame #Extract all A-wave responses if isnothing(a_files) return nothing else a_files[!, :Path] = string.(a_files[!, :Path]) #Make sure the path is a string uniqueData = a_files |> @unique({_.Year, _.Month, _.Date, _.Number, _.Wavelength, _.Photoreceptor, _.Genotype}) |> DataFrame #Pull out all unique files if verbose println("Completed data query") end qTrace = DataFrame() #Make empty dataframes for all traces qExperiment = DataFrame() #Make empty dataframe for all experiments for (idx, i) in enumerate(eachrow(uniqueData)) #Walk through each unique data qData = a_files |> @filter( (_.Year, _.Month, _.Date, _.Number, _.Wavelength, _.Photoreceptor, _.Genotype) == (i.Year, i.Month, i.Date, i.Number, i.Wavelength, i.Photoreceptor, i.Genotype) ) |> DataFrame #Pull out the data dataFile = readABF(qData.Path) if verbose println("Completeing A-wave analysis for $idx out of $(size(uniqueData,1))") println("Path: $(i.Path)") end for data in eachchannel(dataFile) #walk through each row of the data iterator #println(qData.Age) if isa(qData.Age[1], String) matches = match(r"P(?'Age'\d*|)", qData.Age[1]) isadult = match(r"Adult", qData.Age[1]) if !isnothing(matches) age = parse(Int, matches[:Age]) #Extract the age elseif !isnothing(isadult) age = 30 end else age = qData.Age[1] end ch = data.chNames[1] #Extract channel information gain = data.chTelegraph[1] #Extract the gain if gain == 1 if verbose println("Gain is a different") end data / 100.0 end #======================DATA ANALYSIS========================# if age <= 11 #If the data is below P11 calculate the response differently filt_data = data_filter(data, avg_swp = false, t_pre = t_pre, t_post=t_post) #This function is found in the filter pipeline responses = minimas = minimum(filt_data, dims=2)[:, 1, :] maximas = maximum(filt_data, dims=2)[:, 1, :] Resps = abs.(responses) rmaxes = minimum(responses, dims=1) else filt_data = data_filter(data, avg_swp = false, t_pre = t_pre, t_post=t_post) if peak_method #println(argmin(filt_data)) peak_time = filt_data.t[argmin(filt_data)[1][2]] #println(peak_time) truncate_data!(filt_data, t_pre = -peak_time) #Cut the trace before peak end if measure_minima responses = minimas = minimum(filt_data, dims=2)[:, 1, :] #println(minimas) else responses = saturated_response(filt_data) minimas = minimum(filt_data, dims=2)[:, 1, :] end maximas = maximum(filt_data, dims=2)[:, 1, :] Resps = abs.(responses) rmaxes = minimum(responses, dims=1) end Peak_Times = time_to_peak(filt_data) #Calculate the time to peak Integrated_Times = abs.(integral(filt_data)) #Calculate the area under the curve Percent_Recoveries = percent_recovery_interval(filt_data, rmaxes) #Calculate the 60% recovery #======================GLUING TOGETHER THE QUERY========================# #now we can walk through each one of the responses and add it to the qTrace for swp = 1:size(data, 1) #Walk through all sweep info, since sweeps will represent individual datafiles most of the time #inside_row = qData[idx, :Path] #We can break down each individual subsection by the inside row push!(qTrace, ( Path=qData[swp, :Path], Year=qData[swp, :Year], Month=qData[swp, :Month], Date=qData[swp, :Date], Age=qData[swp, :Age], Number=qData[swp, :Number], Genotype=qData[swp, :Genotype], Condition = qData[swp, :Condition], Photoreceptor=qData[swp, :Photoreceptor], Wavelength=qData[swp, :Wavelength], Photons=qData[swp, :Photons], Channel=ch, Gain=gain, Response=Resps[swp], Minima=minimas[swp], Maxima=maximas[swp], Peak_Time=Peak_Times[swp], Integrated_Time=Integrated_Times[swp], Percent_Recovery=Percent_Recoveries[swp] #Recovery_Tau=Recovery_Taus[swp], Tau_GOF=Tau_GOFs[swp], ) ) end #This section we need to extract Rdim responses. norm_resp = Resps ./ maximum(Resps) rdim_idxs = findall(0.20 .< norm_resp .< 0.50) if isempty(rdim_idxs) rdim_idx = argmin(Resps) else rdim_min = argmin(Resps[rdim_idxs]) rdim_idx = rdim_idxs[rdim_min] end if size(Resps,1) > 2 #if Resps |> vec p0 = [maximum(Resps), median(qData[:, :Photons]), 2.0] fit, rsq = ePhys.IRfit(qData[:, :Photons], Resps |> vec, p0 = p0 ) #Fitting each data trace to a IR curve push!(qExperiment, ( Year=qData[1, :Year], Month=qData[1, :Month], Date=qData[1, :Date], Age=qData[1, :Age], Number=qData[1, :Number], Genotype=qData[1, :Genotype], Channel = ch, Photoreceptor=qData[1, :Photoreceptor], Wavelength=qData[1, :Wavelength], rmax = maximum(Resps), RMAX_fit = fit.param[1], K_fit = fit.param[2], N_fit = fit.param[3], RSQ_fit = rsq, #, MSE_fit = mse_FIT, rdim=Resps[rdim_idx], integration_time=Integrated_Times[rdim_idx], time_to_peak=Peak_Times[rdim_idx], percent_recovery = mean(Percent_Recoveries) #really strange these usually are averaged #recovery_tau=Recovery_Taus[rdim_idx], )) else #Fitting each data trace to a IR curve push!(qExperiment, ( Year=qData[1, :Year], Month=qData[1, :Month], Date=qData[1, :Date], Age=qData[1, :Age], Number=qData[1, :Number], Genotype=qData[1, :Genotype], Channel = ch, Photoreceptor=qData[1, :Photoreceptor], Wavelength=qData[1, :Wavelength], rmax = maximum(Resps), RMAX_fit = 0.0, K_fit = 0.0, N_fit = 0.0, RSQ_fit = 0.0, #, MSE_fit = mse_FIT, rdim=Resps[rdim_idx], integration_time=Integrated_Times[rdim_idx], time_to_peak=Peak_Times[rdim_idx], percent_recovery = mean(Percent_Recoveries) #really strange these usually are averaged #recovery_tau=Recovery_Taus[rdim_idx], )) end end end qConditions = summarize_data(qTrace, qExperiment; lb = lb, p0 = p0, ub = ub) return qTrace, qExperiment, qConditions end end #We can update this with our updated channel analysis function run_B_wave_analysis(all_files::DataFrame; t_pre = 1.0, t_post = 2.0, #This can be very quick, the initiation of the b-wave is faster a_cond = "BaCl_LAP4", b_cond = "BaCl", verbose=false, ) trace_A = all_files |> @filter(_.Condition == a_cond) |> DataFrame trace_AB = all_files |> @filter(_.Condition == b_cond) |> DataFrame if isempty(trace_AB) return nothing elseif isempty(trace_A) #println("Here") #return nothing b_files = trace_AB |> @join(trace_AB, {_.Year, _.Month, _.Date, _.Number, _.Photons, _.Wavelength, _.Photoreceptor, _.Genotype}, {_.Year, _.Month, _.Date, _.Number, _.Photons, _.Wavelength, _.Photoreceptor, _.Genotype}, {__..., A_condition = _.Condition, A_Path = _.Path, }) |> DataFrame if verbose println("Completed data query") end else b_files = trace_A |> @join(trace_AB, {_.Year, _.Month, _.Date, _.Number, _.Photons, _.Wavelength, _.Photoreceptor, _.Genotype}, {_.Year, _.Month, _.Date, _.Number, _.Photons, _.Wavelength, _.Photoreceptor, _.Genotype}, {__..., A_condition = _.Condition, A_Path = _.Path, }) |> DataFrame if verbose println("Completed data query") end end b_files[!, :Path] = string.(b_files[!, :Path]) #XLSX.jl converts things into Vector{Any} b_files[!, :A_Path] = string.(b_files[!, :A_Path]) #XLSX.jl converts things into Vector{Any} uniqueData = b_files |> @unique({_.Year, _.Month, _.Date, _.Number, _.Wavelength, _.Photoreceptor, _.Genotype}) |> DataFrame qTrace = DataFrame() qExperiment = DataFrame() for (idx, i) in enumerate(eachrow(uniqueData)) #We ca if verbose println("Completeing B-wave analysis for $idx out of $(size(uniqueData,1))") println("Path: $(i.Path)") end qData = b_files |> @filter( (_.Year, _.Month, _.Date, _.Number, _.Wavelength, _.Photoreceptor, _.Genotype) == (i.Year, i.Month, i.Date, i.Number, i.Wavelength, i.Photoreceptor, i.Genotype) ) |> DataFrame data_AB = readABF(qData.Path) #Read the AB data filt_data_AB = data_filter(data_AB, avg_swp = false, t_pre = t_pre, t_post=t_post) data_A = readABF(qData.A_Path) filt_data_A = data_filter(data_A, avg_swp = false, t_pre = t_pre, t_post=t_post) #if we want to subtract we need to filter first sub_data = filt_data_AB - filt_data_A for (ch_idx, data_ch) in enumerate(eachchannel(sub_data)) #walk through each row of the data iterator unsubtracted_data_ch = getchannel(filt_data_AB, ch_idx) ch = data_ch.chNames[1] #Extract channel information gain = data_ch.chTelegraph[1] #Calculate the response based on the age if gain == 1 if verbose println("Gain is a different") end data_ch / 100 end #======================DATA ANALYSIS========================# #data from P11 doesn't always make sense, so we can invert it #if age == 11 # data_ch * -1 #end responses = Resps = maximas = maximum(data_ch, dims=2)[:, 1, :] rmaxes = maximum(responses, dims=1) Unsubtracted_Resp = abs.(minima_to_peak(unsubtracted_data_ch)) #This is the unsubtracted Response minimas = minimum(data_ch, dims=2)[:, 1, :] maximas = maximum(data_ch, dims=2)[:, 1, :] Peak_Times = time_to_peak(data_ch) Integrated_Times = integral(data_ch) Percent_Recoveries = percent_recovery_interval(data_ch, rmaxes) #======================GLUING TOGETHER THE QUERY========================# for swp = 1:size(data_ch, 1) #Walk through all sweep info, since sweeps will represent individual datafiles most of the time push!(qTrace, ( Path=qData[swp, :Path], A_Path=qData[swp, :A_Path], Year=qData[swp, :Year], Month=qData[swp, :Month], Date=qData[swp, :Date], Age=qData[swp, :Age], Number=qData[swp, :Number], Genotype=qData[swp, :Genotype], Condition = qData[swp, :Condition], Photoreceptor=qData[swp, :Photoreceptor], Wavelength=qData[swp, :Wavelength], Photons=qData[swp, :Photons], Channel=ch, Gain=gain, Response=Resps[swp], Unsubtracted_Response=Unsubtracted_Resp[swp], Minima=minimas[swp], Maxima=maximas[swp], Peak_Time=Peak_Times[swp], Integrated_Time=Integrated_Times[swp], Percent_Recovery=Percent_Recoveries[swp] ) ) end norm_resp = Resps ./ maximum(Resps) #println(norm_resp) rdim_idxs = findall(0.20 .< norm_resp .< 0.50) if isempty(rdim_idxs) rdim_idx = argmin(Resps) else rdim_min = argmin(Resps[rdim_idxs]) rdim_idx = rdim_idxs[rdim_min] end if size(Resps,1) > 2 p0 = [maximum(Resps), median(qData[:, :Photons]), 2.0] fit, rsq = ePhys.IRfit(qData[:, :Photons], Resps |> vec, p0 = p0 ) #println(fit, rsq) push!(qExperiment, ( Year=qData[1, :Year], Month=qData[1, :Month], Date=qData[1, :Date], Age=qData[1, :Age], Number=qData[1, :Number], Genotype=qData[1, :Genotype], Photoreceptor=qData[1, :Photoreceptor], Wavelength=qData[1, :Wavelength], Channel = ch, Photons=qData[1, :Photons], rmax=maximum(Resps), RMAX_fit = fit.param[1], K_fit = fit.param[2], N_fit = fit.param[3], RSQ_fit = rsq, #, MSE_fit = mse_FIT, unsubtracted_rmax=maximum(Unsubtracted_Resp), rdim=Resps[rdim_idx], integration_time=Integrated_Times[rdim_idx], time_to_peak=Peak_Times[rdim_idx], percent_recovery=maximum(Percent_Recoveries) #sum(Percent_Recoveries) / length(Percent_Recoveries) #really strange these usually are averaged )) else push!(qExperiment, ( Year=qData[1, :Year], Month=qData[1, :Month], Date=qData[1, :Date], Age=qData[1, :Age], Number=qData[1, :Number], Genotype=qData[1, :Genotype], Photoreceptor=qData[1, :Photoreceptor], Wavelength=qData[1, :Wavelength], Channel = ch, Photons=qData[1, :Photons], rmax=maximum(Resps), RMAX_fit = 0.0, K_fit = 0.0, N_fit = 0.0, RSQ_fit = 0.0, #, MSE_fit = mse_FIT, unsubtracted_rmax=maximum(Unsubtracted_Resp), rdim=Resps[rdim_idx], integration_time=Integrated_Times[rdim_idx], time_to_peak=Peak_Times[rdim_idx], percent_recovery=maximum(Percent_Recoveries) #sum(Percent_Recoveries) / length(Percent_Recoveries) #really strange these usually are averaged )) end end end qConditions = summarize_data(qTrace, qExperiment) return qTrace, qExperiment, qConditions end """ There is no version of G component analysis that is not subtractive """ function run_G_wave_analysis(all_files::DataFrame; t_pre = 1.0, t_post = 2.0, verbose=false, ) trace_ABG = all_files |> @filter(_.Condition == "NoDrugs") |> DataFrame trace_AB = all_files |> @filter(_.Condition == "BaCl") |> DataFrame if isempty(trace_ABG) return nothing elseif isempty(trace_AB) return nothing else g_files = trace_AB |> @join(trace_ABG, {_.Year, _.Month, _.Date, _.Number, _.Photons, _.Wavelength, _.Photoreceptor, _.Genotype}, {_.Year, _.Month, _.Date, _.Number, _.Photons, _.Wavelength, _.Photoreceptor, _.Genotype}, {__..., AB_condition = _.Condition, AB_Path = _.Path, }) |> DataFrame g_files[!, :Path] = string.(g_files[!, :Path]) #XLSX.jl converts things into Vector{Any} g_files[!, :AB_Path] = string.(g_files[!, :AB_Path]) #XLSX.jl converts things into Vector{Any} uniqueData = g_files |> @unique({_.Year, _.Month, _.Date, _.Number, _.Wavelength, _.Photoreceptor, _.Genotype}) |> DataFrame qTrace = DataFrame() qExperiment = DataFrame() for (idx, i) in enumerate(eachrow(uniqueData)) #We ca qData = g_files |> @filter( (_.Year, _.Month, _.Date, _.Number, _.Wavelength, _.Photoreceptor, _.Genotype) == (i.Year, i.Month, i.Date, i.Number, i.Wavelength, i.Photoreceptor, i.Genotype) ) |> DataFrame data_ABG = readABF(qData.Path) filt_data_ABG = data_filter(data_ABG, avg_swp = false, t_pre = t_pre, t_post = t_post) data_AB = readABF(qData.AB_Path) filt_data_AB = data_filter(data_AB, avg_swp = false, t_pre = t_pre, t_post = t_post) #if we want to subtract we need to filter first sub_data = filt_data_ABG - filt_data_AB if verbose println("Completeing Glial analysis for $idx out of $(size(uniqueData,1))") println("Path: $(i.Path)") end for (ch_idx, data_ch) in enumerate(eachchannel(sub_data)) #walk through each row of the data iterator ch = data_ch.chNames[1] #Extract channel information gain = data_ch.chTelegraph[1] if gain == 1 data_ch / 100.0 end #Calculate the response based on the age unsubtracted_data_ch = getchannel(filt_data_ABG, ch_idx) #======================DATA ANALYSIS========================# responses = minimas = minimum(data_ch, dims=2)[:, 1, :] maximas = maximum(data_ch, dims=2)[:, 1, :] rmaxes = minimum(responses, dims=1) Resps = abs.(responses) Unsubtracted_Resp = abs.(minimum(unsubtracted_data_ch, dims=2)[:, 1, :]) #This is the unsubtracted Response #minimas = minimum(data_ch, dims=2)[:, 1, :] Peak_Times = time_to_peak(data_ch) Integrated_Times = integral(data_ch) Percent_Recoveries = percent_recovery_interval(data_ch, rmaxes) #======================GLUING TOGETHER THE QUERY========================# #now we can walk through each one of the responses and add it to the qTrace for swp = 1:size(data_ch, 1) #Walk through all sweep info, since sweeps will represent individual datafiles most of the time #inside_row = qData[idx, :Path] #We can break down each individual subsection by the inside row push!(qTrace, ( Path=qData[swp, :Path], AB_Path=qData[swp, :AB_Path], Year=qData[swp, :Year], Month=qData[swp, :Month], Date=qData[swp, :Date], Age=qData[swp, :Age], Number=qData[swp, :Number], Genotype=qData[swp, :Genotype], Condition = qData[swp, :Condition], Photoreceptor=qData[swp, :Photoreceptor], Wavelength=qData[swp, :Wavelength], Photons=qData[swp, :Photons], Channel=ch, Gain=gain, Response=Resps[swp], Unsubtracted_Response=Unsubtracted_Resp[swp], Minima=minimas[swp], Maxima=maximas[swp], Peak_Time=Peak_Times[swp], Integrated_Time=Integrated_Times[swp], Percent_Recovery=Percent_Recoveries[swp]) ) end norm_resp = Resps ./ maximum(Resps) #println(norm_resp) rdim_idxs = findall(0.20 .< norm_resp .< 0.50) if isempty(rdim_idxs) rdim_idx = argmin(Resps) else rdim_min = argmin(Resps[rdim_idxs]) rdim_idx = rdim_idxs[rdim_min] end if size(Resps,1) > 2 #println(size(Resps)) #if Resps |> vec p0 = [maximum(Resps), median(qData[:, :Photons]), 2.0] fit, rsq = ePhys.IRfit(qData[:, :Photons], Resps |> vec, p0 = p0 ) push!(qExperiment, ( Year=qData[1, :Year], Month=qData[1, :Month], Date=qData[1, :Date], Age=qData[1, :Age], Number=qData[1, :Number], Genotype=qData[1, :Genotype], Channel = ch, Photoreceptor=qData[1, :Photoreceptor], Wavelength=qData[1, :Wavelength], Photons=qData[1, :Photons], rmax=maximum(Resps), RMAX_fit = fit.param[1], K_fit = fit.param[2], N_fit = fit.param[3], RSQ_fit = rsq, #, MSE_fit = mse_FIT, unsubtracted_rmax=maximum(Unsubtracted_Resp), rdim=Resps[rdim_idx], integration_time=Integrated_Times[rdim_idx], time_to_peak=Peak_Times[rdim_idx], percent_recovery=maximum(Percent_Recoveries) #This is maximum vs average )) else push!(qExperiment, ( Year=qData[1, :Year], Month=qData[1, :Month], Date=qData[1, :Date], Age=qData[1, :Age], Number=qData[1, :Number], Genotype=qData[1, :Genotype], Channel = ch, Photoreceptor=qData[1, :Photoreceptor], Wavelength=qData[1, :Wavelength], Photons=qData[1, :Photons], rmax=maximum(Resps), RMAX_fit = 0.0, K_fit = 0.0, N_fit = 0.0, RSQ_fit = 0.0, #, MSE_fit = mse_FIT, unsubtracted_rmax=maximum(Unsubtracted_Resp), rdim=Resps[rdim_idx], integration_time=Integrated_Times[rdim_idx], time_to_peak=Peak_Times[rdim_idx], percent_recovery=maximum(Percent_Recoveries) #This is maximum vs average )) end end end qConditions = summarize_data(qTrace, qExperiment) return qTrace, qExperiment, qConditions end end function runAnalysis(datasheet::DataFrame; measure_minima = false, verbose = false) resA = ePhys.run_A_wave_analysis(datasheet; measure_minima = measure_minima, verbose = verbose) resB = ePhys.run_B_wave_analysis(datasheet, verbose = verbose) resG = ePhys.run_G_wave_analysis(datasheet, verbose = verbose) return (datasheet, resA, resB, resG) end function add_analysis_sheets(results, save_file::String; append="A") trace, experiments, conditions = results dataset = openDatasheet(save_file; sheetName = "all", typeConvert = false) rewrite_sheets = ["trace_$append", "experiments_$append", "conditions_$append"] for sheet in rewrite_sheets #We need to add any new sheets to the current sheet. Kind of a waste but it works if sheet == "trace_$append" dataset[sheet] = trace elseif sheet == "experiments_$append" dataset[sheet] = experiments elseif sheet == "conditions_$append" dataset[sheet] = conditions end end XLSX.openxlsx(save_file, mode = "w") do xf #This will always add a sheet1 for sName in keys(dataset) #println("$sName") if sName == "All_Files" XLSX.rename!(xf[1], "All_Files") else XLSX.addsheet!(xf, sName) end XLSX.writetable!(xf[sName], dataset[sName]) end end end function runAnalysis(datafile::String; kwargs...) print("Opening datafile $(datafile)... ") datasheet = openDatasheet(datafile, sheetName = "All_Files") println("complete") datasheet, resA, resB, resG = runAnalysis(datasheet; kwargs...) if !isnothing(resA) add_analysis_sheets(resA, datafile; append="A") end if !isnothing(resB) add_analysis_sheets(resB, datafile; append="B") end if !isnothing(resG) add_analysis_sheets(resG, datafile; append="G") end end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
23603
#This file contains the calibration data function extract_categories(cond_df::DataFrame) categories = [] for row in eachrow(cond_df) a = [row.Age, row.Genotype, row.Photoreceptor, row.Wavelength] push!(categories, a) end categories end function extractIR(trace_datafile::DataFrame, category; measure = :Response, kwargs...) allIR = trace_datafile |> @filter(_.Age == category[1]) |> @filter(_.Genotype == category[2]) |> @filter(_.Photoreceptor == category[3]) |> @filter(_.Wavelength == category[4]) |> DataFrame intensity = allIR.Photons response = abs.(allIR[!, measure]) #generate the fits for the equation r = measure == :Minima ? abs(minimum(response)) : maximum(response) #println(intensity) fit = HILLfit(intensity, response; r = r, rmax = (r + 1000), kwargs...) model(I, p) = map(i -> p[1]*IR(i, p[2], p[3]), I) fitResponse = model(intensity, fit.param) allIR[!, :FitVariable] = fitResponse allIR[!, :Residual] = fit.resid select!(allIR, [ :Path, :Year, :Month, :Date, :Age, :Number, :Genotype, :Photoreceptor, :Wavelength, :Channel, :Gain, :Photons, measure, :FitVariable, :Residual ] ) return allIR, fit end function runTraceAnalysis(dataset::Dict{String, DataFrame}; a_cond = "BaCl_LAP4", b_cond = "BaCl", g_cond = "NoDrugs", sample_rate = 10_000.0, t_pre=1.0, t_post=1.0, measure_minima = false, measure_abs = false, subtraction = true, verbose = true, ) uniqueData = dataset["ALL_FILES"] |> @unique({_.Date, _.Number, _.Wavelength, _.Photoreceptor, _.Genotype, _.Condition}) |> DataFrame #Pull out all unique files if verbose println("Completed data query") end qTrace = DataFrame() #Make empty dataframes for all traces for (idx, i) in enumerate(eachrow(uniqueData)) #Walk through each unique data if verbose println("Completeing analysis for $idx out of $(size(uniqueData,1))") println("Path: $(i.Path)") end qData = dataset["ALL_FILES"] |> @filter( (_.Date, _.Number, _.Wavelength, _.Photoreceptor, _.Genotype) == (i.Date, i.Number, i.Wavelength, i.Photoreceptor, i.Genotype) ) |> DataFrame #Pull out the data #Determine whether or not a subtraction should be done if subtraction if i.Condition == a_cond qTRIALa = qTRIAL = qData |> @filter(_.Condition == a_cond) |> DataFrame qTRIAL[!, :SubPath] .= "NONE" #There is no subtraction #pull out only A-wave files data = readABF(qTRIALa.Path, sort_by_date = false) dataABF = data_filter(data, avg_swp = false, t_pre = t_pre, t_post=t_post, sample_rate = sample_rate) #This function is found in the filter pipeline #println(dataABF |> size) elseif i.Condition == b_cond #println("Analysis of B-wave file") qTRIALb = qData |> @filter(_.Condition == b_cond) |> DataFrame qTRIALa = qData |> @filter(_.Condition == a_cond) |> DataFrame qTRIAL = SubFiles = qTRIALa |> @join(qTRIALb, {_.Date, _.Number, _.Photons, _.Wavelength, _.Photoreceptor, _.Genotype}, {_.Date, _.Number, _.Photons, _.Wavelength, _.Photoreceptor, _.Genotype}, {__..., SubPath = _.Path, }) |> DataFrame if isempty(qTRIAL) println("\t Experiment $(i.Path) was incomplete") continue end #println(SubFiles.Path) data = readABF(SubFiles.Path, sort_by_date = false) #Read the AB data filt_data = data_filter(data, avg_swp = false, t_pre = t_pre, t_post=t_post, sample_rate = sample_rate,) dataSUB = readABF(SubFiles.SubPath, sort_by_date = false) filt_dataSUB = data_filter(dataSUB, avg_swp = false, t_pre = t_pre, t_post=t_post, sample_rate = sample_rate,) #if we want to subtract we need to filter first dataABF = filt_data - filt_dataSUB #println(size(dataABF)) elseif i.Condition == g_cond #println("Analysis of Glial files") qTRIALb = qData |> @filter(_.Condition == b_cond) |> DataFrame qTRIALg = qData |> @filter(_.Condition == g_cond) |> DataFrame qTRIAL = SubFiles = qTRIALb |> @join(qTRIALg, {_.Date, _.Number, _.Photons, _.Wavelength, _.Photoreceptor, _.Genotype}, {_.Date, _.Number, _.Photons, _.Wavelength, _.Photoreceptor, _.Genotype}, {__..., SubPath = _.Path, }) |> DataFrame if isempty(qTRIAL) println("\t Experiment $(i.Path) was incomplete") continue end #println(qTRIAL |> size) data = readABF(SubFiles.Path, sort_by_date = false) #Read the AB data filt_data = data_filter(data, avg_swp = false, t_pre = t_pre, t_post=t_post, sample_rate = sample_rate) dataSUB = readABF(SubFiles.SubPath, sort_by_date = false) filt_dataSUB = data_filter(dataSUB, avg_swp = false, t_pre = t_pre, t_post=t_post, sample_rate = sample_rate) #if we want to subtract we need to filter first dataABF = filt_data - filt_dataSUB #println(size(dataABF)) end else if i.Condition == a_cond qTRIAL = qData |> @filter(_.Condition == a_cond) |> DataFrame qTRIAL[!, :SubPath] .= "NONE" #There is no subtraction #pull out only A-wave files data = readABF(qTRIAL.Path, sort_by_date = false) dataABF = data_filter(data, avg_swp = false, t_pre = t_pre, t_post=t_post, sample_rate = sample_rate) #This function is found in the filter pipeline #println(dataABF |> size) elseif i.Condition == b_cond #println("Analysis of B-wave file") qTRIAL = qData |> @filter(_.Condition == b_cond) |> DataFrame qTRIAL[!, :SubPath] .= "NONE" #There is no subtraction #println(SubFiles.Path) data = readABF(qTRIAL.Path, sort_by_date = false) #Read the AB data dataABF = data_filter(data, avg_swp = false, t_pre = t_pre, t_post=t_post, sample_rate = sample_rate) #This function is found in the filter pipeline #println(size(dataABF)) elseif i.Condition == g_cond #println("Analysis of Glial files") qTRIAL = qData |> @filter(_.Condition == g_cond) |> DataFrame qTRIAL[!, :SubPath] .= "NONE" #There is no subtraction #println(qTRIAL |> size) data = readABF(qTRIAL.Path, sort_by_date = false) #Read the AB data dataABF = data_filter(data, avg_swp = false, t_pre = t_pre, t_post=t_post, sample_rate = sample_rate) #This function is found in the filter pipeline end end #Conduct the analysis for each channel for (ch_idx, data_ch) in enumerate(eachchannel(dataABF)) #walk through each row of the data iterator gain = data.chTelegraph[ch_idx] if gain == 1 #Extract the gain == 1 if verbose println("Gain is a different") end data / 100.0 end minimas = minimum(data_ch, dims=2)[:, 1, :] maximas = maximum(data_ch, dims=2)[:, 1, :] if measure_abs responses = maximum(abs.(hcat(maximas, minimas)), dims = 2) |> vec else if i.Condition == a_cond if measure_minima || i.Photoreceptor == "Cones" responses = abs.(minimas) verbose ? println("Only minimas") : nothing else responses = abs.(saturated_response(data_ch)) end elseif i.Condition == b_cond responses = abs.(maximas) elseif i.Condition == g_cond responses = abs.(minimas) end end Peak_Times = time_to_peak(data_ch) Integrated_Times = integral(data_ch) rmax = maximum(responses, dims = 1) Percent_Recoveries = percent_recovery_interval(data_ch, rmax) for swp in axes(data_ch, 1) #Walk through all sweep info, since sweeps will represent individual datafiles most of the time #println(swp) #inside_row = qData[idx, :Path] #We can break down each individual subsection by the inside row push!(qTrace, ( Path=qTRIAL[swp, :Path], SubPath = qTRIAL[swp, :SubPath], #SubPath= isnothing(SubFiles) ? SubFiles[swp, :SubPath] : nothing, Date=qTRIAL[swp, :Date], Age=qTRIAL[swp, :Age], Number=qTRIAL[swp, :Number], Genotype=qTRIAL[swp, :Genotype], Condition = qTRIAL[swp, :Condition], Photoreceptor=qTRIAL[swp, :Photoreceptor], Wavelength=qTRIAL[swp, :Wavelength], Photons=qTRIAL[swp, :Photons], Channel=data_ch.chNames[1], Gain=gain, Response=responses[swp], Minima=minimas[swp], Maxima=maximas[swp], Peak_Time=Peak_Times[swp], Integrated_Time=Integrated_Times[swp], Percent_Recovery=Percent_Recoveries[swp] #Recovery_Tau=Recovery_Taus[swp], Tau_GOF=Tau_GOFs[swp], ) ) end end end dataset["TRACES"] = qTrace return dataset end function runExperimentAnalysis(dataset::Dict{String, DataFrame}; lb = [1.0, 1.0, 0.1], #Default rmin = 100, kmin = 0.1, nmin = 0.1 p0 = :determine, ub = [Inf, Inf, 10.0], #Default rmax = 2400, kmax = 800 verbose = false, ) EXPERIMENTS = dataset["TRACES"] |> @unique({_.Date, _.Age, _.Number, _.Genotype, _.Channel, _.Condition, _.Photoreceptor, _.Wavelength}) |> DataFrame #println(EXPERIMENTS) qExperiment = DataFrame() #Make empty dataframe for all experiments for exp in eachrow(EXPERIMENTS) INFO = ( Date = exp.Date, Age = exp.Age, Number = exp.Number, Genotype = exp.Genotype, Channel = exp.Channel, Photoreceptor = exp.Photoreceptor, Condition = exp.Condition, Wavelength = exp.Wavelength ) matched = matchExperiment(dataset["TRACES"], INFO) rdim_idx = findRDIM(matched.Response) #println(rdim_idx) if size(matched.Response,1) > 2 if p0 == :determine p0 = [maximum(matched.Response), median(matched.Photons), 2.0] end #Fitting each data trace to a IR curve fit, fit_RSQ = HILLfit(matched.Photons, matched.Response; p0 = p0, lb = lb, ub = ub) fit_RMAX = fit.param[1] fit_K = fit.param[2] fit_N = fit.param[3] if verbose println("Fit r-squared = $fit_RSQ") end else #Fitting each data trace to a IR curve fit_RMAX = 0.0 fit_K = 0.0 fit_N = 0.0 fit_RSQ = 0.0 #println("\t ONly $(length(responses)) responses. IR curve couldn't be fit") end frame = (;INFO... , rmax = maximum(matched.Response), rdim = matched.Response[rdim_idx], RMAX_fit = fit_RMAX, K_fit = fit_K, N_fit = fit_N, RSQ_fit = fit_RSQ, #, MSE_fit = mse_FIT, integration_time = matched.Integrated_Time[rdim_idx], time_to_peak = matched.Peak_Time[rdim_idx], percent_recovery = mean(matched.Percent_Recovery) #really strange these usually are averaged #recovery_tau=Recovery_Taus[rdim_idx], ) push!(qExperiment, frame) end dataset["EXPERIMENTS"] = qExperiment return dataset end function runConditionsAnalysis(dataset::Dict{String, DataFrame}; verbose = false, kwargs...) #filter out all flags qExps = dataset["EXPERIMENTS"] qTraces = matchExperiment(dataset["TRACES"], qExps) qConditions = qExps |> @groupby({_.Age, _.Genotype, _.Photoreceptor, _.Wavelength, _.Condition}) |> @map({ Age = _.Age[1], Genotype = _.Genotype[1], Photoreceptor = _.Photoreceptor[1], Wavelength = _.Wavelength[1], Condition = _.Condition[1], N = length(_), Rmax = mean(_.rmax), Rmax_sem = sem(_.rmax), Rdim = mean(_.rdim), Rdim_sem = sem(_.rdim), Integrated_Time = mean(_.integration_time), Integrated_Time_sem = sem(_.integration_time), Time_to_peak = mean(_.time_to_peak), Time_To_Peak_sem = sem(_.time_to_peak), Percent_Recovery = mean(_.percent_recovery), Percent_Recovery_sem = sem(_.percent_recovery), K_IR = mean(_.K_fit), K_SEM_IR = sem(_.K_fit), RMAX_COLL = 0.0, K_COLL = 0.0, N_COLL = 0.0, RSQ_COLL = 0.0 #Recovery_Tau = mean(_.recovery_tau), Recovery_Tau_sem = sem(_.recovery_tau), }) |> DataFrame #iterate through each conditon and generate a collated IR curve for (idx, cond) in enumerate(eachrow(qConditions)) qIND_COND = qTraces |> @filter(_.Age == cond.Age) |> @filter(_.Genotype == cond.Genotype) |> @filter(_.Condition == cond.Condition) |> @filter(_.Photoreceptor == cond.Photoreceptor) |> @filter(_.Wavelength == cond.Wavelength) |> DataFrame if verbose println("Summarizing conditions $cond") end if size(qIND_COND, 1) > 2 fit, rsq = HILLfit(qIND_COND.Photons, qIND_COND.Response; kwargs...) qConditions[idx, :RMAX_COLL] = fit.param[1] qConditions[idx, :K_COLL] = fit.param[2] qConditions[idx, :N_COLL] = fit.param[3] qConditions[idx, :RSQ_COLL] = rsq end end dataset["CONDITIONS"] = qConditions return dataset end sem(x) = std(x)/sqrt(length(x)) function runStatsAnalysis(dataset; control = "WT", stat_metrics = [:rmax, :rdim, :K_fit, :time_to_peak, :percent_recovery, :integration_time], verbose = true, ) qEXP = dataset["EXPERIMENTS"] #unflagged_exps = qEXP |> @filter(_.INCLUDE == true) |> DataFrame exps = DataFrame[] for stat in stat_metrics if verbose println("Running $stat") end res_stat = qEXP |> @groupby({_.Genotype, _.Age, _.Condition, _.Photoreceptor}) |> @map({Genotype = key(_)[1], Age = key(_)[2], Condition = key(_)[3], Photoreceptor = key(_)[4], N = length(_), METRIC = string(stat), AVG = 0.0, STD = 0.0, SEM = 0.0, CI = 0.0, LOWER = 0.0, UPPER = 0.0, P = 0.0, SIGN = "-" })|> DataFrame for (idx, info) in enumerate(eachrow(res_stat)) #first load the control data ctrl_data = qEXP |> @filter(_.Genotype == control && _.Age == info.Age && _.Condition == info.Condition && _.Photoreceptor == info.Photoreceptor) |> DataFrame exp_data = qEXP |> @filter(_.Genotype == info.Genotype && _.Age == info.Age && _.Condition == info.Condition && _.Photoreceptor == info.Photoreceptor) |> DataFrame res_stat[idx, :AVG] = mean(exp_data[:, stat]) res_stat[idx, :STD] = std(exp_data[:, stat]) res_stat[idx, :SEM] = sem(exp_data[:, stat]) res_stat[idx, :CI] = CI = 1.96*sem(exp_data[:, stat]) res_stat[idx, :LOWER] = mean(exp_data[:, stat]) - CI res_stat[idx, :UPPER] = mean(exp_data[:, stat]) + CI verbose ? println(stat) : nothing verbose ? println("Size data = $(size(exp_data,1))") : nothing verbose ? println("Size control data = $(size(ctrl_data,1))") : nothing verbose ? println(ctrl_data[:, stat]) : nothing verbose ? println(exp_data[:, stat]) : nothing if size(exp_data,1) > 1 && size(ctrl_data,1) > 1 && sum(ctrl_data[:, stat]) != sum(exp_data[:, stat]) try res_stat[idx, :P] = P = UnequalVarianceTTest(ctrl_data[:, stat], exp_data[:, stat]) |> pvalue res_stat[idx, :SIGN] = "*" catch #error res_stat[idx, :P] = 1.0 res_stat[idx, :SIGN] = "-" end else res_stat[idx, :P] = 1.0 res_stat[idx, :SIGN] = "-" end end push!(exps, res_stat) end stats = vcat(exps...) dataset["STATS"] = stats return dataset end function runDataAnalysis(filenames::Vector{String}; #Options for the createDataset seperate_dates = false, #Options for runTraceAnalysis a_cond = "BaCl_LAP4", b_cond = "BaCl", g_cond = "NoDrugs", sample_rate = 10_000.0, t_pre=1.0, t_post=1.0, measure_minima = false, measure_abs = false, subtraction = true, #Options for runExperimentAnalysis lb = [1.0, 1.0, 0.1], #Default rmin = 100, kmin = 0.1, nmin = 0.1 p0 = :determine, ub = [Inf, Inf, 10.0], #Default rmax = 2400, kmax = 800 #Options for runStatsAnalysis control = "WT", stat_metrics = [:rmax, :rdim, :K_fit, :time_to_peak, :percent_recovery, :integration_time], #General options debug::Bool = false, verbose = 1, #3 modes -> 0: nothing, 1: only shows progress, 2: shows progress and inside of functions ) now = Dates.now() verbose > 0 ? println("[$now]: Begin analyzing data: ") : nothing dataset = createDataset(filenames; seperate_dates = seperate_dates, verbose = verbose==2, debug = debug); verbose > 0 ? println("\t [$(Dates.now() - now)] Files") : nothing now = Dates.now() dataset = runTraceAnalysis(dataset, a_cond = a_cond, b_cond = b_cond, g_cond = g_cond, sample_rate = sample_rate, t_pre = t_pre, t_post = t_post, measure_minima = measure_minima, measure_abs = measure_abs, subtraction = subtraction, verbose = verbose==2, ); verbose > 0 ? println("\t [$(Dates.now() - now)] Traces") : nothing now = Dates.now() dataset = runExperimentAnalysis(dataset, lb = lb, p0 = p0, ub = ub, verbose = verbose==2); verbose > 0 ? println("\t [$(Dates.now() - now)] Experiments") : nothing now = Dates.now() dataset = runConditionsAnalysis(dataset, verbose = verbose==2); verbose > 0 ? println("\t [$(Dates.now() - now)] Conditions") : nothing now = Dates.now() dataset = runStatsAnalysis(dataset, control = control, stat_metrics = stat_metrics, verbose = verbose==2); verbose > 0 ? println("\t [$(Dates.now() - now)] Stats ") : nothing return dataset end function runDataAnalysis(data::Experiment; verbose = false, subtraction = true) filenames = joinpath(splitpath(data.HeaderDict["abfPath"])[1:end-1]...) |> parseABF return runDataAnalysis(filenames; verbose = verbose, subtraction = subtraction) end function runDataAnalysis(fileroot::String; verbose = false, subtraction = true) filenames = fileroot |> parseABF return runDataAnalysis(filenames; verbose = verbose, subtraction = subtraction) end #This can be used for IR and STF, but not for Tau or LP model function GenerateFitFrame(df_TRACE, xData, yData; MODEL = HILL_MODEL, #These function lb = (100.0, 1.0, 0.1), #Default rmin = 100, kmin = 0.1, nmin = 0.1 p0 = (500.0, 200.0, 2.0), #Default r = 500.0, k = 200.0, n = 2.0 ub = (2400, 400, 4.0), #Default rmax = 2400, kmax = 800 verbose = true ) df_EXP = df_TRACE |> @unique({_.Year, _.Month, _.Date, _.Number, _.Channel}) |> @map({ _.Year, _.Month, _.Date, _.Number, _.Channel, RMAX = 0.0, K = 0.0, N = 0.0, RSQ = 0.0, rmin = lb[1], r = p0[1], rmax = ub[1], #The highest b-wave ever seen is (2400) kmin = lb[2], k = p0[2], kmax = ub[2], #Half of the highest a-wave ever seen (800), nmin = lb[3], n = p0[3], nmax = ub[3], MSE = 0.0, SS_Resid = 0.0, SS_Total = 0.0, }) |> DataFrame for (idx, exp) in enumerate(eachrow(df_EXP)) #println(exp) YEAR = exp.Year MONTH = exp.Month DATE = exp.Date NUMBER = exp.Number CHANNEL = exp.Channel if verbose print("Fitting exp $idx: $(YEAR)_$(MONTH)_$(DATE)_$(NUMBER)_$(CHANNEL)") end exp_traces = df_TRACE |> @filter((_.Year, _.Month, _.Date, _.Number, _.Channel) == (exp.Year, exp.Month, exp.Date, exp.Number, exp.Channel)) |> DataFrame println(exp_traces) #Conduct the fitting p0 = [exp.r, exp.k, exp.n] ub = [exp.rmax, exp.kmax, exp.nmax] lb = [exp.rmin, exp.kmin, exp.nmin] exp_STF = curve_fit(MODEL, exp_traces[:, xData], exp_traces[:, yData], p0, lower = lb, upper = ub) df_EXP[idx, :RMAX] = exp_STF.param[1] df_EXP[idx, :K] = exp_STF.param[2] df_EXP[idx, :N] = exp_STF.param[3] df_EXP[idx, :SS_Resid] = ss_resid = sum(exp_STF.resid.^2) df_EXP[idx, :MSE] = ss_resid/size(exp_traces,1) yTrue = exp_traces[:, yData] yMean = mean(yTrue) df_EXP[idx, :SS_Total] = ss_total = sum((yTrue .- yMean).^2) df_EXP[idx, :RSQ] = 1 - (ss_resid/ss_total) if verbose println("RSQ is at $(println(df_EXP[idx, :RSQ]))") end end return df_EXP end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
9076
""" # Example ```julia-repl dataset["EXPERIMENTS"] test = convertDate_inFrame!(dataset["EXPERIMENTS"]) dataset["EXPERIMENTS"] saveDataset(dataset, save_file) ``` """ function convertDate_inFrame!(df::DataFrame) df[!, :Date] = Date.(parse.(Int64, df[!, :Year]), parse.(Int64, df[!, :Month]), parse.(Int64, df[!, :Date])) select!(df, Not(:Year)) select!(df, Not(:Month)) return df end """ This function cleans the data out of a dataframe if the dataframe is already open """ function cleanDatasheet!(xf::XLSX.XLSXFile, sheetname::String) if sheetname ∈ XLSX.sheetnames(xf) sheet = xf[sheetname] nrows, ncols = size(sheet[:]) names = map(col -> sheet[1, col], 1:ncols) eraser = [] for name in names eraser_col = (name, fill("", nrows - 1)...) push!(eraser, eraser_col) end XLSX.writetable!(xf[sheetname], fill(Tuple(fill("", nrows)), ncols), names ) else println("Sheetname not in sheets") println("Choose from one of these sheets:") for sn in XLSX.sheetnames(xf) println("-> $sn") end end end """ This function cleans the data out of a dataframe and saves it """ function cleanDatasheet!(filename::String, sheetname::String) XLSX.openxlsx(filename, mode="rw") do xf cleanDataFrame!(xf, sheetname) end end """ dataset = createDataset(files::Vector{String}[; verbose = false, run_analysis = true]) This function creates a dataset from a group of files. The best thing to do is to point to your datafiles root and then use the parseABF function. - If run_analysis is selected, the function runTraceAnalysis will automatically be run on the dataset # Examples ```julia-repl data_root = "\\user\\myroot" data_files = parseABF(data_root) dataset = createDataset(data_files) ``` """ function createDataset(all_files::Vector{String}; seperate_dates = false, verbose::Bool = false, debug::Bool = false, ) dataframe = DataFrame() for (idx, file) in enumerate(all_files) verbose ? print("Analyzing file $idx of $(size(all_files, 1)): $file ...") : nothing try entry = DataPathExtraction(file) if isnothing(entry) && verbose #Throw this in the case that the entry cannot be fit println("Failed") #elseif length(entry) != size(dataframe, 2) # println("Entry does not match dataframe size. Probably an extra category") else verbose ? println("Success") : nothing push!(dataframe, entry) end catch error verbose ? println("$file \n $error") : nothing if debug throw(error) end end end if !(seperate_dates) convertDate_inFrame!(dataframe) end return Dict("ALL_FILES" => dataframe) end createDataset(file_root::String; verbose = false, run_analysis = true, kwargs...) = createDataset(file_root |> parseABF; verbose = verbose, run_analysis = run_analysis, kwargs...) """ dataset = openDataset(datafile::String, [; typeConvert = true, sheetnames::Union{String, Vector{String}} = ["ALL_FILES", "TRACES", "EXPERIMENTS", "CONDITIONS", "STATS"] ]) This function opens a saved dataset as an excel file. # Example ```julia-repl datafile = "\\user\\myroot\\datafile.xlsx" dataset = openDataset(datafile) ``` """ function openDataset(datafile::String; typeConvert=true, sheetnames=nothing, verbose = true, debug = false ) if isnothing(sheetnames) xf = XLSX.readxlsx(datafile) df_set = Dict{String, DataFrame}() for sn in XLSX.sheetnames(xf) df_set[sn] = openDataset(datafile; typeConvert = typeConvert, sheetnames = sn) end return df_set elseif !isnothing(sheetnames) && isa(sheetnames, String) try df = DataFrame(XLSX.readtable(datafile, sheetnames)) if typeConvert df = safe_convert(df) #This converts the categories to a type in the first position end return df catch error if debug throw(error) end if verbose println("Table doesn't exist yet") end return DataFrame() end end end """ This function will read and update the datasheet with the new files """ function updateDataset(data_file::String, all_files::Vector{String}; reset::Bool=false, savefile::Bool=true) if reset #If this is selected, completely reset the analysis else df = openDatasheet(data_file; sheetName = "ALL_FILES") #First, open the old datasheet nrows, ncols = size(df) println("Searching for files that need to be added and removed") old_files = df[:, :Path] #Pull out a list of old files #This searches for files that occur in all files, but not in old files, indicating they need added to the analysis files_to_add = findall(isnothing, indexin(all_files, old_files)) #This searches for files that are in old files, but not not in all files, indicating they may need deleted files_to_remove = findall(isnothing, indexin(old_files, all_files)) duplicate_files = findall(nonunique(df)) if !isempty(files_to_add) #There are no files to add println("Adding $(length(files_to_add)) files") new_files = all_files[files_to_add] df_new = createDatasheet(new_files) df = vcat(df, df_new) #Add the new datasheet to the old one end if !isempty(files_to_remove) println("Removing $(length(files_to_add)) files") deleteat!(df, files_to_remove) end if !isempty(duplicate_files) println("Removing $(length(duplicate_files)) duplicated files") deleteat!(df, duplicate_files) end #Sort the dataframe df = df |> @orderby(_.Year) |> @thenby(_.Month) |> @thenby(_.Date) |> @thenby(_.Animal) |> @thenby(_.Genotype) |> @thenby(_.Condition) |> @thenby(_.Wavelength) |> @thenby(_.Photons) |> DataFrame if savefile println("Saving file... ") XLSX.openxlsx(data_file, mode="rw") do xf print("Erasing file") cleanDatasheet!(xf, "All_Files") #This cleans all data from All_Files #This re-writes it XLSX.writetable!(xf["All_Files"], collect(DataFrames.eachcol(df)), DataFrames.names(df) ) end println("Complete") end return df end end function copyDataset(datafile::String, sheetname = "all", backup = true) root = joinpath(splitpath(datafile)[1:end-1]) if isfile(datafile) #we should only do these things if the datafile exists new_fn = "$root\\temp.xlsx" XLSX.openxlsx(new_fn, mode = "w") do xf sheet1 = xf[1] #Sheet 1 should be renamed XLSX.rename!(sheet1, "ALL_FILES") XLSX.addsheet!(xf, "TRACES") XLSX.addsheet!(xf, "EXPERIMENTS") XLSX.addsheet!(xf, "CONDITIONS") #XLSX.addsheet!(xf, "STATISTICS") #XLSX.addsheet!(xf, "FITTING") #XLSX.addsheet!(xf, "SYNAPTIC TRANSFER FUNCTION") end end end """ This function quicksaves the datafile you are working with """ function backupDataset(datafile::String) date = now() root = joinpath(splitpath(datafile)[1:end-1]) filename = splitpath(datafile)[end][1:end-5] backup_file = "$(root)\\$(year(date))_$(month(date))_$(day(date))_$(filename)_BACKUP_$(hour(date))_$(minute(date))_$(second(date)).xlsx" cp(datafile, backup_file) end function saveDataset(dataset::Dict{String, DataFrame}, filename::String; categories = [ "ALL_FILES", "TRACES", "EXPERIMENTS", "CONDITIONS", "STATS"] ) XLSX.openxlsx(filename, mode = "w") do xf sheet_ALL = xf[1] #Sheet 1 should be renamed XLSX.rename!(sheet_ALL, categories[1]) XLSX.writetable!(sheet_ALL, dataset[categories[1]]) for i in eachindex(categories)[2:end] XLSX.addsheet!(xf, categories[i]) if haskey(dataset, categories[i]) sheet_TRACES = xf[i] if !isempty(dataset[categories[i]]) XLSX.writetable!(sheet_TRACES, dataset[categories[i]]) end end end end end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
11627
""" photon_lookup(wavelength, nd, percent, stim_time, calibration_file[,sheet_name]) Uses the calibration file or datasheet to look up the photon density. The Photon datasheet should be either """ function photon_lookup(wavelength::Real, nd::Real, percent::Real; path = :default, sheet_name::String="Current_Test") try if path == :default df = DataFrame(XLSX.readtable(calibration_path(), sheet_name)) else df = DataFrame(XLSX.readtable(path, sheet_name)) end Qi = df |> @filter(_.Wavelength == wavelength) |> @filter(_.ND == nd) |> @filter(_.Intensity == percent) |> #@filter(_.stim_time == stim_time) |> @map(_.Photons) |> DataFrame #%% if !isempty(Qi) #Only return f an entry exists return Qi.value[1] end catch error if isa(error, AssertionError) println("Calibration File Incorrectly specified") end throw(error) end end function photon_lookup(photon::Real; path = :default, sheet_name::String="Current_Test") try if path == :default df = DataFrame(XLSX.readtable(calibration_path(), sheet_name)) else df = DataFrame(XLSX.readtable(path, sheet_name)) end Qi = df |> @filter(_.Photons == photon) |> @map({Wavelength = _.Wavelength, ND = _.ND, Intensity = _.Intensity}) |> DataFrame if !isempty(Qi) #Only return f an entry exists return (Qi.Wavelength[1], Qi.ND[1], Qi.Intensity[1]) end catch error if isa(error, AssertionError) println("Calibration File Incorrectly specified") end throw(error) end end """ This function converts a dataframe of Any to one matching each row type. catchNaN allows it to catch NaN errors from excel """ function safe_convert(dataframe::DataFrame; verbose = false) new_obj = DataFrame(dataframe) for (idx, col) in enumerate(eachcol(dataframe)) #println(names(dataframe)[idx]) typ = typeof(col[1]) #Check if there are #We will try to convert each row. If it does not work, we can remove the NaN #println(col) if ("NaN" ∈ col) #Check if there exists a word NaN in the row (excel will call these strings) #print("Is NaN") #debugging statements whereNaN = findall(col .== "NaN") #println("At position $whereNaN") for idxNaN in whereNaN if verbose println("Indexes where a NaN is: $idxNaN") end col[idxNaN] = NaN #Instead use a NaN Floating point objects end new_obj[:, idx] = convert.(typ, col) elseif !all(isa.(col, typ))#if col[1] #This is for if there is a Int to Float64 error whereNotSame = findall(map(!, isa.(col, typ))) irregular_type = col[whereNotSame[1]] |> typeof if verbose println(col[whereNotSame[1]]) println("Column type: $typ") println("Irregular type: $(irregular_type)") end if irregular_type == Int64 && typ == Float64 #This can cause an error new_obj[!, idx] = convert.(typ, col) #Convert all values to Float64 else new_obj[!, idx] = convert.(irregular_type, col) #Convert all values to Float64 end else #conv = convert.(typ, col) new_obj[!, idx] = convert.(typ, col) end end return new_obj end function parseColumn!(T::Type, dataframe::DataFrame, col::Symbol) if all(isa.(dataframe[!, col], T)) println("Already converted") else dataframe[! , col] = parse.(T, dataframe[:, col]) end end parseColumn!(dataframe::DataFrame, col::Symbol) = parseColumn!(Int64, dataframe, col) #============================================================================================= These fuctions help extract experiments =============================================================================================# """ If you pass either a named tuple or a dataframe row, this will pull out all the related """ function matchExperiment(trace::DataFrame, info::NamedTuple) return_traces = copy(trace) if haskey(info, :Date) return_traces = return_traces |> @filter(_.Date == info.Date) |> DataFrame end if haskey(info, :Number) return_traces = return_traces |> @filter(_.Number == info.Number) |> DataFrame end if haskey(info,:Photoreceptor) return_traces = return_traces |> @filter(_.Photoreceptor == info.Photoreceptor) |> DataFrame end if haskey(info, :Condition) return_traces = return_traces |> @filter(_.Condition == info.Condition) |> DataFrame end if haskey(info, :Channel) return_traces = return_traces |> @filter(_.Channel == info.Channel) |> DataFrame end if haskey(info, :Genotype) return_traces = return_traces |> @filter(_.Genotype == info.Genotype) |> DataFrame end if haskey(info, :Age) return_traces = return_traces |> @filter(_.Age == info.Age) |> DataFrame end return return_traces end matchExperiment(trace::DataFrame, row::DataFrameRow) = matchExperiment(trace, NamedTuple(row)) function matchExperiment(trace::DataFrame, rows::DataFrame) return_dataset = DataFrame() for row in eachrow(rows) dataset_i = matchExperiment(trace, row) return_dataset = vcat(return_dataset, dataset_i) end return return_dataset end function excludeExperiment(trace::DataFrame, info::NamedTuple) data_opposite_match = matchExperiment(trace, info) excluded_experiment = DataFrame() for row in eachrow(trace) if row ∉ eachrow(data_opposite_match) push!(excluded_experiment, row) end end return excluded_experiment end excludeExperiment(trace::DataFrame, row::DataFrameRow) = excludeExperiment(trace, NamedTuple(row)) function excludeExperiment(trace::DataFrame, rows::DataFrame) return_dataset = DataFrame() for row in eachrow(rows) dataset_i = excludeExperiment(trace, row) return_dataset = vcat(return_dataset, dataset_i) end return return_dataset end function match_excludeExperiment(trace::DataFrame, match_rows, exclude_rows) matched = matchExperiment(trace, match_rows) excluded = excludeExperiment(matched, exclude_rows) return excluded end """ This function takes the whole dataset and then returns it as a partition of that dataset """ function matchDataset(dataset::Dict{String, DataFrame}, info) new_dataset = Dict{String, DataFrame}() new_dataset["ALL_FILES"] = matchExperiment(dataset["ALL_FILES"], info) new_dataset["TRACES"] = matchExperiment(dataset["TRACES"], info) new_dataset["EXPERIMENTS"] = matchExperiment(dataset["EXPERIMENTS"], info) new_dataset = runConditionsAnalysis(new_dataset) new_dataset = runStatsAnalysis(new_dataset) return new_dataset end function excludeDataset(dataset::Dict{String, DataFrame}, info) new_dataset = Dict{String, DataFrame}() new_dataset["ALL_FILES"] = excludeExperiment(dataset["ALL_FILES"], info) new_dataset["TRACES"] = excludeExperiment(dataset["TRACES"], info) new_dataset["EXPERIMENTS"] = excludeExperiment(dataset["EXPERIMENTS"], info) new_dataset = runConditionsAnalysis(new_dataset) new_dataset = runStatsAnalysis(new_dataset) return new_dataset end function concatDatasets(dataset1::Dict{String, DataFrame}, dataset2::Dict{String, DataFrame}) dataset1["ALL_FILES"] = vcat(dataset1["ALL_FILES"], dataset2["ALL_FILES"]) dataset1["TRACES"] = vcat(dataset1["TRACES"], dataset2["TRACES"]) dataset1["EXPERIMENTS"] = vcat(dataset1["EXPERIMENTS"], dataset2["EXPERIMENTS"]) dataset1["CONDITIONS"] = vcat(dataset1["CONDITIONS"], dataset2["CONDITIONS"]) dataset1["STATS"] = vcat(dataset1["STATS"], dataset2["STATS"]) return dataset1 end """ This function matches the experiments in info and then switches the include flag to false """ function flagExperiment(trace::DataFrame, info) return_traces = copy(trace) matched = matchExperiment(trace, info) matched_indexes = indexin(eachrow(matched), eachrow(trace)) return_traces[matched_indexes, :INCLUDE] .= false return return_traces end function flagExperiment!(trace::DataFrame, info) matched = matchExperiment(trace, info) matched_indexes = indexin(eachrow(matched), eachrow(trace)) trace[matched_indexes, :INCLUDE] .= false end function unflagALL!(dataset) dataset["TRACES"][:, :INCLUDE] .= true dataset["EXPERIMENTS"][:, :INCLUDE] .= true end """ This function won't work right until we can open data from XLSX files """ function analyzeXLSX(filename::String, data::Experiment; verbose = false) filenames = joinpath(splitpath(data.HeaderDict["abfPath"])[1:end-1]...) |> parseABF verbose ? print("Analyzing data for $filename \n Begin...") : nothing dataset = createDataset(filenames, verbose = verbose) verbose ? print("Files, ") : nothing dataset = runTraceAnalysis(dataset, verbose = verbose) verbose ? print("Traces, ") : nothing dataset = runExperimentAnalysis(dataset, verbose = verbose) verbose ? print("Experiments. Completed ") : nothing dataset = runConditionsAnalysis(dataset, verbose = verbose) verbose ? print("Conditions, ") : nothing dataset = runStatsAnalysis(dataset, verbose = verbose) verbose ? println("Stats. Completed.") : nothing XLSX.openxlsx(filename, mode = "rw") do xf for key in keys(dataset) XLSX.addsheet!(xf, key) sheet = xf[key] XLSX.writetable!(sheet, dataset[key]) end end return dataset end #Extend the writeXLSX import ElectroPhysiology.writeXLSX #we need to do this or it will get caught in a recursion import ElectroPhysiology.Experiment function writeXLSX(filename::String, data::Experiment, mode::Symbol; verbose = true, kwargs...) println("Editing the function") if mode == :analysis dataset = runDataAnalysis(data; verbose = false) #we need to change the photons of the experiment here qPhotons = dataset["TRACES"] |> @unique(_.Photons) |> DataFrame @assert length(qPhotons.Photons) == size(data, 1) setIntensity(data.stimulus_protocol, qPhotons.Photons) ##Set the intensity of the flash intensities println(dataset["TRACES"]) verbose ? print("Saving excel file... ") : nothing writeXLSX(filename, data; verbose = verbose, kwargs...) verbose ? println("Completed") : nothing XLSX.openxlsx(filename, mode = "rw") do xf for key in keys(dataset) XLSX.addsheet!(xf, key) sheet = xf[key] XLSX.writetable!(sheet, dataset[key]) end end else verbose ? print("Saving excel file... ") : nothing writeXLSX(filename, data; verbose = verbose, kwargs...) verbose ? println("Completed") : nothing end end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
3539
import Base.NamedTuple date_regex = r"(?'Year'\d{2,4})_(?'Month'\d{1,2})_(?'Date'\d{1,2})_(?'Description'.+)" #animal_file_regex = r"(?'Animal'\D+)(?'Number'\d)_(?'Age'.+)_(?'Genotype'.+)" nd_file_regex = r"nd(?'ND'.{1,3})_(?'Percent'\d{1,3})p_.+abf" animal_regex = r"(_m|(?'Animal'Mouse|Zebrafish|Organoid)|m)(?'Number'\d)" age_regex = r"_P(?'Age'\d*|)" adult_regex = r"(?'IsAdult'(?i)adult)" genotype_regex = r"_(?'Genotype'WT|DR|R141C|RS1KO|C59S|MelKO|GNAT-KO|GNAT-HT)" cond_regex = r"(?'Condition'Drugs|NoDrugs|BaCl_LAP4|BaCl|No drugs|No Drugs)" pc_regex = r"(?'Photoreceptors'Cones|Rods)" color_regex = r"(?'Color'blue|green|Blue|Green|UV|365|365UV|520|520Green|525|525Green)" avg_regex = r"(?'Correct'Average|average)" background_regex = r"(?'Background'withback|noback)" percent_regex = r"(?'Percent'\d{1,3})(%|p)" nd_regex = r"nd(?'ND'\d{1,3})" #nd_regex = r"nd(?'ND'.{1,3})_(?'Percent'\d{1,3})p" NamedTuple(m::RegexMatch) = NamedTuple{Symbol.(Tuple(keys(m)))}(values(m.captures)) function findmatch(str_array::Vector{String}, reg_format::Regex; verbose=false, first=true) matches = map(r -> match(reg_format, r), str_array) #println(matches) #println("Revise is working") #println(any(.! isnothing.(matches))) if any(.! isnothing.(matches)) if verbose println("We found a format") end if first return matches[findall(!isnothing, matches)][1] |> NamedTuple else return matches[findall(!isnothing, matches)] .|> NamedTuple end else if verbose println("We did not find a format") end end end function findmatch(path::String, reg_format::Regex; kwargs...) str_array = splitpath(path) return findmatch(str_array, reg_format; kwargs...) end function find_condition(str_array; possible_conds=["BaCl", "BaCl_LAP4", "NoDrugs"]) for cond in possible_conds val = findall(str_array .== cond) if !isempty(val) cond = str_array[val] return cond[1] end end end """ These functions keep only things parsed as the object T, which defaults to Int64 """ filter_string(::Type{T}, str::String) where {T} = filter(!isnothing, map(c -> tryparse(T, c), split(str, ""))) |> join filter_string(::Type{T}, str::SubString{String}) where {T} = filter_string(T, str |> string) filter_string(str::String) = filter(!isnothing, map(c -> tryparse(Int64, c), split(str, ""))) |> join filter_string(::Type{T}, ::Nothing) where {T} = nothing """ This function takes a named tuple that contains numbers and cleans those numbers """ function parseNamedTuple(::Type{T}, nt::NamedTuple{keys}) where {T<:Real,keys} nt_vals = [values(nt)...] #Fill nt_vals with the values of the NamedTuple new_vals = [] for itm in nt_vals #println(itm |> typeof) if isa(itm, String) || isa(itm, SubString{String})#If the item is not nothing #We only want to parse the item if all of it is a number #println(itm) val = tryparse(T, itm) #Try to parse the item as a Int64 if !isnothing(val) push!(new_vals, val) else #We should make sure that we at least convert these to a string push!(new_vals, String(itm)) end else push!(new_vals, itm) end end NamedTuple{keys}(new_vals) end #If no type is provided the namedtuple is parsed as a Int64 parseNamedTuple(nt::NamedTuple{keys}) where {keys} = parseNamedTuple(Int64, nt)
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
1708
""" A simple stimulus """ function stimulus_callback(step_begin, step_end, level) condition_fn1(u, t, integrator) = t == step_begin affect_fn1!(integrator) = integrator.u[1] = level cb_fn1 = DiscreteCallback(condition_fn1, affect_fn1!) condition_fn2(u, t, integrator) = t == step_end affect_fn2!(integrator) = integrator.u[1] = 0.0 cb_fn2 = DiscreteCallback(condition_fn2, affect_fn2!) return CallbackSet(cb_fn1, cb_fn2) end #u = [ui, uc] ui = stimulus, uc = artifact response #P = [A, C] A = Amplitude, C = Capacitance RC(du, u, p, t) = du[2] = ((u[1] - u[2]) / (p[2])) function CapModel(du, u, p, t) end function RCArtifact(data::Experiment, p0; model = :RC) u0 = [0.0, 0.0] tstops = data.stim_protocol[1].timestamps tspan = (data.t[1], data.t[end]) stim = stimulus_callback(tstops[1], tstops[2], p0[1]) prob = ODEProblem(RC, u0, tspan, p0) sol = solve(prob, callback=stim, tstops=tstops) artifact = sol(data.t, idxs=1) - sol(data.t, idxs=2) #The second phase will be fitting the artifact to the data return artifact end function fitArtifact(data::Experiment; p0 = [150.0, 1.0e-3], swp = 1, ch = 2) println(size(data)) min_func(x) = MeanSquaredError(RCArtifact(data, x), data.data_array[swp,:,ch]) results = optimize(min_func, p0) pOPT = Optim.minimizer(results) return pOPT end function removeArtifact(exp::Experiment) data = deepcopy(exp) for swp in 1:size(data, 1), ch in 1:size(data,3) pOPT = fitArtifact(data) artifact = RCArtifact(data_unfilt, pOPT) artifact_removed = artifact .- data_unfilt.data_array[swp, :, ch] end return data end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
288
using Documenter, PhysiologyAnalysis makedocs( sitename = "PhysiologyAnalysis.jl", #modules = [ PhysiologyAnalysis ], #pages = [ # "Home" => "index.md" #] ) #deploydocs( #repo = "github.com/mattar13/PhysiologyAnalysis.jl.git", #target = "build" #)
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
5156
module PhysiologyAnalysis using Requires # The top level is the ElectroPhysiology package. These are not imported into the workspace using Dates using ElectroPhysiology import ElectroPhysiology: Experiment, readABF, parseABF import ElectroPhysiology: eachtrial, eachchannel import ElectroPhysiology: now, year, month, day, hour, minute, second import ElectroPhysiology: TWO_PHOTON import ElectroPhysiology: readABFInfo, getABF_datetime #= Packages used for fitting data ====================================# using LsqFit #Used for fitting amplification, Intensity Response, and Resistance Capacitance models #= Packages used for Analyzing data ==================================# import Polynomials as PN #used for fitting and stats using StatsBase #Used for mean, std, and SEM functions. using Statistics using HypothesisTests import Statistics.mean #= Packages not yet uses using Distributions using Statistics, StatsBase #These functions use R functions as well as StatsBase #export some basic functions from ============================================================# export readABF, parseABF export plt #This package does 3 things: #1) Fitting ============================================================================# include("Fitting/Models.jl") export HILL_MODEL, HILLfit, STFfit export AMP, AMPfit export curve_fit #curve fitting from LsqFit #2) Data anlysis ========================================================================# include("Analysis/ERGAnalysis/ERGAnalysis.jl") #export calculate_basic_stats export saturated_response, dim_response export minima_to_peak, time_to_peak export percent_recovery_interval #This finds the dominant time constant export recovery_time_constant #This finds the recovery time constant export integral #This finds the integration time export get_response export calculate_threshold #using JLD2 #Maybe this should be added to Requires.jl include("Analysis/WaveAnalysis/thresholding.jl") export calculate_threshold using Peaks #Use this as a better way to find peaks include("Analysis/WaveAnalysis/TimescaleAnalysis.jl") export get_timestamps, extract_interval export max_interval_algorithim, timeseries_analysis export findmaxima export crosscor, crosscor! include("Analysis/WholeCellAnalysis/passive_analysis.jl") export calculate_baseline, calculate_peak export calculate_resistance, calculate_capacitance export extract_timepoint # These functions are used by the base #This file contains things like extraction and convienance functions function set_calibration_path(pathname::String ;path = "$(homepath)/Datasheets/calibration.txt") open(path, "w") do file write(file, pathname) end end using FileIO, Images include("Analysis/ImagingAnalysis/PixelExtraction.jl") export zProject, frameAverage export normalize, binarize export findROIcentroid using ImageFiltering include("Analysis/ImagingAnalysis/deltaF.jl") export deltaF, deltaF_F, roll_mean include("Analysis/ImagingAnalysis/ROIAnalysis.jl") export findROIcentroids homepath = joinpath(splitpath(pathof(PhysiologyAnalysis))[1:end-1]...) calibration_path() = read("$(homepath)/Datasheets/calibration.txt", String) #3) Import all Datasheet tools ===========================================================# function __init__() @require OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" begin using .OrdinaryDiffEq @require ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" begin println("Loading differential equations and nose fitting") using .ModelingToolkit include("Fitting/NoseModel.jl") export findNosePeak end end #Eventually we should massively restructure this @require XLSX = "fdbf4ff8-1666-58a4-91e7-1b58723a45e0" begin #println(XLSX exported) println("Loading dataframes and file opening") using .XLSX @require DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" begin using .DataFrames @require Query = "1a8c2f83-1ff3-5112-b086-8aa67b057ba1" begin using .Query include("Datasheets/FilePathExtraction.jl") export traverse_root export parse_cell_details include("Datasheets/DataSheetCreation.jl") export create2PDataSheet export save2PDataSheet, open2PDataSheet include("Datasheets/DataSheetModify.jl") export expand_dates include("Datasheets/DataSheetAnalysis.jl") export pair_experiments!, IV_analysis! end end end @require PyCall = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0" begin using .PyCall @require Conda = "8f4d0f93-b110-5947-807f-2305c1781a2d" begin using .Conda include("Analysis/ImagingAnalysis/CellPose_port.jl") println("CellPose port loaded") export cellpose_model end end #We want to add something for PyCall end end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
620
""" This function is for computing the R-squared of a polynomial """ function RSQ(poly::PN.Polynomial, x, y) ŷ = poly.(x) ȳ = sum(ŷ) / length(ŷ) SSE = sum((y - ŷ) .^ 2) SST = sum((y .- ȳ) .^ 2) 1 - SSE / SST end function RSQ(ŷ::Array{T}, y::Array{T}) where {T<:Real} ȳ = sum(ŷ) / length(ŷ) SSE = sum((y - ŷ) .^ 2) SST = sum((y .- ȳ) .^ 2) 1 - SSE / SST end function sig_symbol(val) if val <= 0.001 return "***" elseif val <= 0.005 return "**" elseif val <= 0.05 return "*" else return "-" end end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
8383
""" This is useful for finding sequences of items. Requires a bitvector """ function findsequential(sequence::BitVector; seq_to_find=:all) #first we need to do a normal findall sequences = Vector{Int64}[] #Save all sequences in a vector of sequences current = Int64[] for (idx, itm) in enumerate(sequence) if itm #If an item is true, push it to the current sequence push!(current, idx) elseif !isempty(current) && !itm #If the current sequence is not empty and the item is false push!(current, idx) #push the item to the current sequence if seq_to_find == :first #if we only want to find the first sequence, then return return current end push!(sequences, current) #push the current sequence to all sequences current = Int64[] #clear the current sequence end end if seq_to_find == :last return sequences[end] else return sequences end end function gaussian_saturation(data::Experiment{T}; p0=[-250.0, 0.5, 80.0], ub=[0.0, 1.0, 10e3] ) where {T<:Real} rmaxes = zeros(size(data, 1), size(data, 3)) f(xs, p) = map(x -> p[1] * exp((-(x - p[2])^2) / 2 * p[3]), xs) #model(xs, p) = map(x -> f(x, p), xs) t = data.t for swp = axes(data, 1), ch = axes(data, 3) ȳ = data.data_array[swp, :, ch] fit = curve_fit(f, t, ȳ, p0, upper=ub) y = f(t, fit.param) #Generate the model fit idx_min = argmin(y) resp_val = ȳ[idx_min] rmaxes[swp, ch] = resp_val end return rmaxes end function histogram_saturation(data::Experiment{T}; precision::Int64=100) where {T<:Real} norm_factor = minimum(data) rmaxes = zeros(size(data, 1), size(data, 3)) minima = minimum(data, dims=2)[:, 1, :] for swp = axes(data, 1), ch = axes(data, 3) #Lets try to quickly zero any positive results #y_data = data[swp, :, ch] #y_data *= y_data[swp, y_data .==] y_data = data[swp, :, ch] ./ norm_factor hfit = Distributions.fit(Histogram, y_data, LinRange(0.15, 1.0, precision)) weights = hfit.weights / maximum(hfit.weights) edges = collect(hfit.edges[1])[1:length(weights)] resp = edges[argmax(weights)] #println(minimum(edges)) if resp == minimum(edges) #println("No Nose") #println(minima[swp, ch]) rmaxes[swp, ch] = minima[swp, ch] else #println("Nose") #println(resp) rmaxes[swp, ch] = resp * norm_factor end end rmaxes end """ This function uses a histogram method to find the saturation point. - In ERG datas, a short nose component is usually present in saturated values - Does this same function work for the Rmax of nonsaturated responses? - Setting the saturated threshold to infinity will completely disregard the histogram method """ function saturated_response(data::Experiment{T}; mode = :Gaussian, kwargs...) where {T<:Real} #We want to pick the region to analyze first if mode == :Gaussian #This mode is better for data that has rmaxes = gaussian_saturation(data; kwargs...) elseif mode == :Histogram rmaxes = histogram_saturation(data; kwargs...) elseif mode == :Logistic println("Not implemented") #= rmaxes = zeros(size(data,1), size(data,3)) nose_peak = findNosePeak(data; kwargs...) #First use the logistic function to fit out the nose resp = minimum(data, dims = 2)[:, 1, :] #Then find the minimum for i in axes(nose_peak,1) vals = (resp[:, i] .>= nose_peak[i]) .* resp[:,i] sats = (resp[:, i] .< nose_peak[i]) .* nose_peak[i] rmaxes[:, i] .= (vals .+ sats) end #println(rmaxes) return rmaxes =# end end function minima_to_peak(data::Experiment; verbose=false) #We need to exclude the area resp = zeros(size(data, 1), size(data, 3)) for swp = axes(data, 1), ch = axes(data, 3) past_stim = findall(data.t .> 0.0) data_section = data[swp, past_stim, ch] #Isolate all the items past the stim # cutoff the analysis at the maximum (A-wave is before the B-wave) cutoff_idx = argmax(data_section) max_val = maximum(data_section) data_section = data_section[1:cutoff_idx] #Measure the minimum betweent the first value and the maximum min_val = minimum(data_section) if verbose println("Minimum: $min_val") println("Maximum: $max_val") println(max_val - min_val) end resp[swp, ch] = max_val - min_val end resp end function findRDIM(responses::Vector{T}, rng = (0.1, 0.4)) where T <: Real #This section we need to extract Rdim responses. normalized_responses = abs.(responses) ./ maximum(abs.(responses)) #println(normalized_responses) rdim_idxs = findall(rng[1] .< normalized_responses .< rng[2]) #Basically the rdim will now be any response under the half saturation point if isempty(rdim_idxs) rdim_idx = argmin(responses) else rdim_min = argmax(responses[rdim_idxs]) rdim_idx = rdim_idxs[rdim_min] end rdim_idx end """ This function calculates the time to peak using the dim response properties of the concatenated file """ function time_to_peak(data::Experiment{T}) where {T<:Real} over_stim = findall(data.t .> 0.0) #We only want to extract time points after the stim lowest_val = map(x -> x[2], argmin(data[:, over_stim, :], dims=2))[:, 1, :] lowest_val .+= over_stim[1] - 1 data.t[lowest_val] .* 1000 end """ This function is the amount of time that a certain trace spends in a particular bandwith. I think it will be similar to the pepperburg, So this may become that function The "criterion" is the percent. This function will measure how long a response takes to return to a specific criterion amount (iᵣ) -By default iᵣ is set to 0.60. An intial problem is the tendancy for the function to pick up drift and other packets. We can eliminate non-sequential packets For more information on this function see Pepperburg & Cornwall et al. Light-dependent delay in the falling phase of the retinal rod photoresponse Use: >>> rmaxes = saturated_response(data1_testA) >>> Tᵣ = percent_recovery_interval(data1_testA, rmaxes) """ function percent_recovery_interval(data::Experiment{T}, rmaxes::AbstractArray{T}; iᵣ::T=0.50) where {T<:Real} #first we can normalize the data to a range @assert size(data,3) == size(rmaxes, 2) #rmax data matches data channels #Tᵣ = fill(NaN, size(data,1), size(data,3)) Tᵣ = zeros(size(data,1), size(data,3)) for swp in axes(data, 1), ch in axes(data, 3) data_percent = data.data_array[swp, :, ch] ./ rmaxes[ch] recovery_seqs = findsequential(data_percent .> iᵣ, seq_to_find=:all) #we have to eliminate all datavalues under data.t = 0.0 after_stim = findall(map(seq -> all(data.t[seq] .> 0.0), recovery_seqs)) #this returns false anywhere where the data.t is less than 0.0 recovery_seqs = recovery_seqs[after_stim] #this eliminates all sequences with less than 0.0 time if !isempty(recovery_seqs) #if there are no sequences then return 0.0 long_seq = argmax(length.(recovery_seqs)) recovery_idx = recovery_seqs[long_seq][end] Tᵣ[swp, ch] = data.t[recovery_idx] end end return Tᵣ end """ The integration time is fit by integrating the dim flash response and dividing it by the dim flash response amplitude - A key to note here is that the exact f(x) of the ERG data is not completely known - The integral is therefore a defininte integral and a sum of the area under the curve - This equation is according to Baylor DA, Hodgkin AL (1973) Detection and resolution of visual stimuli by turtle photoreceptors. J Physiol 234:163–198. """ function integral(data::Experiment{T}) where {T<:Real} #we want this to be equal to any response after the stimuli data_section = data[:, data.t.>0.0, :] data_section = abs.(data_section) data_section ./= maximum(data_section, dims=2) return sum(data_section, dims=2) * data.dt end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
1187
""" In order to run this, we need to have python included in the path """ function build_cellpose(;python_env = "") ENV["PYTHON"] = python_env Pkg.build("PyCall") #This is necessary to run cellpose from new Conda.pip_interop(true) Conda.pip("install", "numpy") Conda.pip("install --user", "opencv-python") Conda.pip("install", "cellpose") end function cellpose_model(;model_type="cyto", flow_threshold = 0.4, cellprob_threshold = 0.0, relative_path_loc = "Analysis\\ImagingAnalysis\\CellPoseModels\\") #╔═╡Set up the python environment to play nice with julia path_loc = joinpath(splitpath(pathof(PhysiologyAnalysis))[1:end-1]..., relative_path_loc) try py""" import os os.environ["CELLPOSE_LOCAL_MODELS_PATH"] = $path_loc import cellpose from cellpose import models """ #╔═╡Import and create the models cellpose = pyimport("cellpose") model = cellpose.models.Cellpose(model_type=model_type) return model catch error #If the error is with python, I want to build_cellpose from PyCall throw(error) end end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
944
collapse_dims(a) = dropdims(a, dims = (findall(size(a) .== 1)...,)) function zProject(exp::Experiment{TWO_PHOTON, T}) where T <: Real px, py = exp.HeaderDict["framesize"] z_proj = (sum(exp, dims = 2) / size(exp, 2)) return reshape(z_proj, (px, py)) end frameAverage(data::Experiment{TWO_PHOTON, T}) where T<: Real = sum(exp, dims = 1)/size(exp,1) function binarize(img, threshold = 0.5) return img .> threshold end function findROIcentroid(data, ROI_id::Int64) ROI_mask = getROImask(data, ROI_id) ROI_mask_coords = map(coord -> [coord[1],coord[2]], findall(ROI_mask.!= 0)) ROI_coords_arr = hcat(ROI_mask_coords...) ROI_centroid = Tuple(mean(ROI_coords_arr, dims =2)') #Find the centroid by taking the mean return ROI_centroid end function findROIcentroid(data) ROI_mask = getROImask(data) ROI_centroids = map(i -> findROIcentroid(data, i), 1:maximum(ROI_mask)) return ROI_centroids end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
1620
function findROIcentroids(array::Array{Int,2}, xlim::AbstractRange, ylim::AbstractRange) # Ensure the array is square size_x, size_y = size(array) @assert size_x == size_y "The input array must be square." # Calculate the number of elements along each axis n = size_x # Calculate the physical spacing between pixels dx = (xlim[2] - xlim[1]) dy = (ylim[2] - ylim[1]) # Find unique labels excluding 0 (assumed to be background) labels = setdiff(unique(array), [0]) # Dictionary to store centroids: label => (x_centroid, y_centroid) centroids = Dict{Int, Tuple{Float64, Float64}}() for label in labels # Find indices where array equals the current label inds = findall(array .== label) if isempty(inds) continue # Skip if no indices found for the label end # Initialize sums for x and y positions sum_x = 0.0 sum_y = 0.0 for I in inds i, j = Tuple(I) # Map array indices to physical coordinates x_pos = xlim[1] + (j - 0.5) * dx y_pos = ylim[1] + (i - 0.5) * dy sum_x += x_pos sum_y += y_pos end # Compute the centroid by averaging positions num_points = length(inds) centroid_x = sum_x / num_points centroid_y = sum_y / num_points # Store the centroid in the dictionary centroids[label] = (centroid_x, centroid_y) end return centroids end findROIcentroids(exp) = findROIcentroids(getROImask(exp), exp.HeaderDict["xrng"], exp.HeaderDict["yrng"])
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
710
function roll_mean(img; voxel_x = 1, voxel_y = 1, voxel_z = 100, boundary_mode ="replicate") if voxel_x == 0 voxel_x = size(img,1) end if voxel_y == 0 voxel_y == size(img,2) end kernel = centered(ones(voxel_x,voxel_y,voxel_z) / (voxel_x*voxel_y*voxel_z)) imfilter(img, kernel, mode) end function deltaF(img; mode = :mean, kwargs...) background = roll_mean(img; kwargs...) img .- background end function deltaF_F(img; mode = :mean, kwargs...) if mode == :rolling_mean background = roll_mean(img; kwargs...) elseif mode == :mean background = mean(img, dims = 3)[:,:,1] end (img .- background)./background end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
12753
""" This function returns all the time stamps in a spike or burst array The same function exists in RetinalChaos """ function get_timestamps(tseries::Vector{T}, spike_array::BitArray{1}) where {T<:Real} diff_vals = map(i -> (spike_array[i] - spike_array[i+1]), 1:length(spike_array)-1) diff_starts = findall(x -> x == -1, diff_vals) #This is a list of all the starting points in the array diff_ends = findall(x -> x == 1, diff_vals) #This is a list of all the ending points in the array #If there is one more end than start than we have to remove the last point from the diff_starts if spike_array[1] #This means we start out in a spike and will most likely end o #println("We started out in a spike, the first value will be an end spike") diff_ends = diff_ends[2:end] end if length(diff_starts) > length(diff_ends) #This happens because an end point was cutoff diff_starts = diff_starts[1:length(diff_ends)] elseif length(diff_starts) < length(diff_ends) #This happens because a start point was cutoff diff_ends = diff_ends[2:end] end return zip(tseries[diff_starts], tseries[diff_ends]) |> collect end function get_timestamps(tseries::Vector{T}, spike_array::BitArray{3}) where {T<:Real} n_trials, n_data, n_channels = size(spike_array) tstamps = Array{Vector{Tuple{T,T}}}(undef, (size(spike_array,1), size(spike_array,3))) for trial in 1:n_trials #focus the analysis on the last data channel for channel in 1:n_channels tstamps[trial, channel] = get_timestamps(tseries, spike_array[trial, :, channel]) end end return tstamps end function get_timestamps(exp::Experiment; Z = 4.0) thresholds = calculate_threshold(exp, Z = Z) spike_array = exp.data_array .> thresholds get_timestamps(exp.t, spike_array) end function extract_interval(timestamps::Vector{Tuple{T,T}}, max_duration=10e5, max_interval=10e5, min_duration=0.0, min_interval=0.0 ) where T <: Real durations = map(ts -> (ts[2]-ts[1]), timestamps) intervals = map(i -> timestamps[i][1] - timestamps[i-1][2], 2:length(timestamps)) return durations[min_duration.<durations.<max_duration], intervals[min_interval.<intervals.<max_interval] end function extract_interval(timestamps::Matrix{Vector{Tuple{T,T}}}; kwargs...) where {T<:Real} durations = Array{Vector{T}}(undef, size(timestamps)) intervals = Array{Vector{T}}(undef, size(timestamps)) for trial in axes(timestamps,1) for channel in axes(timestamps, 2) tstamps = timestamps[trial, channel] if !isempty(tstamps) duration_I, interval_I = extract_interval(tstamps; kwargs...) durations[trial, channel] = duration_I intervals[trial, channel] = interval_I end end end return durations, intervals end """ This function uses the Maximum Interval Sorting method to sort bursts in a single trace. It takes in timestamps and returns the burst durations and the spikes per burst """ function max_interval_algorithim(timestamps::Vector{Tuple{T,T}}; ISIstart::T=0.5, ISIend::T=0.5, IBImin::T=1.0, DURmin::T=0.05, SPBmin::Int64=4, verbose=false ) where {T<:Real} burst_timestamps = Tuple[] SPB_list = Float64[] #Lets organize the spipkes into intervals spikes and not spikes durations, intervals = extract_interval(timestamps) bursting = false burst_start_list = T[] burst_end_list = T[] burst_start = 0.0 burst_end = 0.0 SPB = 0 idx = 1 for i in eachindex(intervals) if verbose println("Spike $i") println(burst_start) println(timestamps[i][2]) end #println("Are the spikes part of the burst? $bursting") #println("Are the intervals longer than the minimum? $(intervals[i] <= ISIstart)") if bursting == false && intervals[i] <= ISIstart #If the cell does not start as bursting and the interval is under ISI start bursting = true #Begin the burst burst_start = timestamps[i][1] #Record the burst start if verbose println("\t Interval begins") end elseif bursting == true && intervals[i] >= ISIend || i == length(intervals) #If the cell is bursting, and the interval to the next spike is greater than ISI thresh bursting = false #The bursting can stop burst_end = timestamps[i][2] #The burst end can be recorded if verbose println("Interval $(intervals[i]) >= IBImin $(IBImin) $(intervals[i] >= IBImin)") println("Duration $((burst_end - burst_start)) >= $DURmin $((burst_end - burst_start) >= DURmin)") println("Spike per burst $(SPB) >= $SPBmin $(SPB >= SPBmin)") end if intervals[i] >= IBImin && (burst_end - burst_start) >= DURmin && SPB >= SPBmin #If the burst meets all the correct qualifications if verbose println(" Burst #$idx successfully added at timestamp : $burst_start -> $burst_end Duration: $(burst_end - burst_start) > $DURmin Spikes per burst: $SPB > $SPBmin IBI to burst #$(idx+1): $(intervals[i]) " ) end push!(burst_start_list, burst_start) push!(burst_end_list, burst_end) push!(SPB_list, SPB) SPB = 0 idx += 1 elseif i == length(intervals) && (burst_end - burst_start) >= DURmin && SPB >= SPBmin #a weird caveat, bursting has finished but interval has never cleared the ISIend if verbose println(" Burst #$idx successfully added at timestamp : $burst_start -> $burst_end Duration: $(burst_end - burst_start) > $DURmin Spikes per burst: $SPB > $SPBmin " ) end push!(burst_start_list, burst_start) push!(burst_end_list, burst_end) #push!(burst_timestamps, (burst_start, burst_end)) #Record it push!(SPB_list, SPB) SPB = 0 idx += 1 else if verbose println("Burst did not fit recommended qualities") println("Timestamp $idx: $burst_start -> $burst_end") println("DUR $idx: $(burst_end - burst_start) $(<) $DURmin") println("SPB $idx: $SPB < $SPBmin") println("IBI $idx: $(intervals[i])") end end end if bursting == true SPB += 1 end end if length(burst_start_list) > length(burst_end_list) #This algorithim usually leaves one last burst off because it has no end point. We can add this push!(burst_end_list, burst_start_list[end] + intervals[end]) end burst_timestamps = hcat(burst_start_list, burst_end_list) return burst_timestamps, SPB_list end function max_interval_algorithim(timestamp_arr::VecOrMat{Matrix{T}}; reshape_back=true, kwargs...) where {T<:Real} n_sizes = size(timestamp_arr) n_flat = *(n_sizes...) bursts = Vector{Matrix{T}}(undef, n_flat) spd = Vector{Vector{T}}(undef, n_flat) if length(n_sizes) > 1 resize_arr = reshape(timestamp_arr, n_flat) else #We don't need to reshape the array back since it is only 1D reshape_back = false resize_arr = timestamp_arr end for idx in 1:length(timestamp_arr) if isassigned(timestamp_arr, idx) result = max_interval_algorithim(resize_arr[idx]; kwargs...) bursts[idx] = result[1] spd[idx] = (result[2]) end end if reshape_back return reshape(bursts, n_sizes...), reshape(spd, n_sizes...) else return bursts, spd end end function timeseries_analysis(t, vm_array; timestamps_only=false, Z::Float64=4.0, max_spike_duration::Float64=50.0, max_spike_interval=100, max_burst_duration::Float64=10e5, max_burst_interval=10e5, verbose=false ) #where {T, N} N = length(size(vm_array)) #println(N) if verbose print("[$(now())]: Extracting the thresholds... ") end if N == 1 thresholds = calculate_threshold(vm_array, Z=Z) elseif N == 2 || N == 3 thresholds = calculate_threshold(vm_array, Z=Z, dims=2) end spike_array = Array(vm_array .> thresholds) #println(spike_array |> typeof) if verbose println("Completed") end spikes = get_timestamps(t, spike_array) res = max_interval_algorithim(spikes) if isnothing(res) bursts = spb = nothing else bursts, spb = res end timestamps = Dict( "Spikes" => spikes, "Bursts" => bursts ) if timestamps_only return timestamps else if verbose print("[$(now())]: Extracting the Data... ") end #println(spikes) spike_durs, isi = extract_interval(spikes, max_duration=max_spike_duration, max_interval=max_spike_interval) spike_dur_avg = sum(spike_durs) / length(spike_durs) spike_dur_sem = std(spike_durs) / sqrt(length(spike_durs)) isi_avg = sum(isi) / length(isi) isi_sem = std(isi) / sqrt(length(isi)) if !isnothing(bursts) burst_durs, ibi = extract_interval(bursts, max_duration=max_burst_duration, max_interval=max_burst_interval) burst_dur_avg = sum(burst_durs) / length(burst_durs) burst_dur_sem = std(burst_durs) / sqrt(length(burst_durs)) ibi_avg = sum(ibi) / length(ibi) ibi_sem = std(ibi) / sqrt(length(ibi)) else burst_durs = [] ibi = [] burst_dur_avg = NaN burst_dur_sem = NaN ibi_avg = NaN ibi_sem = NaN end data = Dict( "Time" => t, "DataArray" => vm_array, "Thresholds" => thresholds, "SpikeDurs" => spike_durs, "SpikeDurAvg" => spike_dur_avg, "SpikeDurSEM" => spike_dur_sem, "ISIs" => isi, "ISIAvg" => isi_avg, "ISISEM" => isi_sem, "BurstDurs" => burst_durs, "BurstDurAvg" => burst_dur_avg, "BurstDurSEM" => burst_dur_sem, "IBIs" => ibi, "IBIAvg" => ibi_avg, "IBISEM" => ibi_sem, "SpikesPerBurst" => spb ) if verbose println("Complete") end return timestamps, data end end #Eventually I will add a @require loop here function timeseries_analysis(t::AbstractArray{T}, vm_array::Array{T,N}, save_file::String; tstamps_name="timestamps", data_name="data", verbose=false, kwargs... ) where {T,N} timestamps, data = timeseries_analysis(t, vm_array; kwargs...) if verbose print("[$(now())]: Saving data... ") end #Uncomment to use BSON file format #bson("$(save_file)\\timestamps.bson", timestamps) #bson("$(save_file)\\data.bson", data) #Uncomment to use JLD2 to save the packages save("$(save_file)/$(tstamps_name).jld2", timestamps) save("$(save_file)/$(data_name).jld2", data) if verbose println("Complete") end return timestamps, data end timeseries_analysis(exp::Experiment; kwargs...) = timeseries_analysis(exp.t, exp.data_array; kwargs...) timeseries_analysis(exp::Experiment, save_file::String; kwargs...) = timeseries_analysis(exp.t, exp.data_array, save_file; kwargs...) #=================================Import this for the experiment objects=================================# function get_timestamps(exp::Experiment{T}, threshold::AbstractArray{T}, rng::Tuple; dt=:exp) where {T<:Real} if dt == :exp dt = exp.dt end tseries = collect(rng[1]:dt:rng[2]-dt) tidxs = round.(Int64, (tseries ./ dt) .+ 1) spike_array = exp.data_array[:, tidxs, :] .> threshold get_timestamps(spike_array, tseries) end get_timestamps(exp::Experiment{T}, threshold::AbstractArray{T}; dt=:exp) where {T<:Real} = get_timestamps(exp, threshold, (exp.t[1], exp.t[end]), dt=dt) get_timestamps(exp::Experiment{T}, rng::Tuple; dt=:exp, kwargs...) where {T<:Real} = get_timestamps(exp, calculate_threshold(exp; kwargs...), rng, dt=dt) get_timestamps(exp::Experiment{T}; dt=:exp, kwargs...) where {T<:Real} = get_timestamps(exp, calculate_threshold(exp; kwargs...), (exp.t[1], exp.t[end]), dt=dt)
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
731
""" calculate_threshold(vm_arr::AbstractArray; Z = 4, dims = -1) Finds the threshold of a trace by calculating the average and then adding the 4x the standard deviation. If using a differential solution, make sure dt is set, otherwise the standard deviation will be unevenly sampled """ function calculate_threshold(x::AbstractArray{T}; Z = 4.0, dims = 2) where {T <: Real} if dims == -1 return [mean(x) + Z*std(x)] else avg = mean(x, dims = dims) dev = Z * std(x, dims = dims) return avg + dev #We want these all to come out as vectors vs matrices end end calculate_threshold(data::Experiment{F, T}; kwargs...) where {F, T<:Real} = calculate_threshold(data.data_array; kwargs...)
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
2382
function calculate_baseline(exp::Experiment{E, T}; channel = 1) where {E, T <: Real} baselines = Vector{Float64}() for (i, trial) in enumerate(eachtrial(exp)) epoch1_idx = exp.HeaderDict["EpochTableByChannel"][channel].epochWaveformBytrial[i].p2s[2] #This monstrosity is the first point of the avg_base = mean(trial[1,1:epoch1_idx,2]) push!(baselines, avg_base) end baselines end function calculate_peak(exp::Experiment{E, T}; channel = 1, digital_cmd = "Cmd 0") where {E, T<:Real} if size(exp,3) != 3 #We need to add a new channel create_signal_waveform!(exp, digital_cmd) end cmd_v = exp[:, findfirst(exp[1,:,3] .!= 0.0), 3] I_CM = zeros(cmd_v |> size) for (i, tr) in enumerate(eachtrial(exp)) if cmd_v[i]-10 < 0.0 #I don't know why we have to subtract 10. Maybe this isn't every time I_CM[i] = minimum(tr, dims = 2)[1, 1, channel] else I_CM[i] = maximum(tr, dims = 2)[1, 1, channel] end end I_CM end extract_timepoint(exp; timepoint = 0.5, channel = 1) = exp[:, findfirst(exp.t .>= timepoint), channel] function calculate_resistance(exp) V_HOLD = extract_timepoint(exp; channel = 2) I_CM = calculate_peak(exp) I_RIN = extract_timepoint(exp) @. lin_model(x, p) = p[1]*x+p[2] Rs_fit = curve_fit(lin_model, I_CM, V_HOLD, [1.0, 0.0]) Rin_fit = curve_fit(lin_model, I_RIN[1:4], V_HOLD[1:4], [1.0, 0.0]) #The higher Rs = Rs_fit.param[1]*1e3 Rin = Rin_fit.param[1]*1e3 V_M = Rin_fit.param[2] V_HOLD = Rs_fit.param[2] return Rs, Rin, V_M, V_HOLD end function calculate_capacitance(exp; channel = 1) V_HOLD = extract_timepoint(exp; channel = 2) I_RIN = extract_timepoint(exp) #Find the stable state current epoch_idx1 = exp.HeaderDict["EpochTableByChannel"][channel].epochWaveformBytrial[1].p1s[3] #This monstrosity is the first point of the epoch_idx2 = exp.HeaderDict["EpochTableByChannel"][channel].epochWaveformBytrial[1].p2s[3] #This monstrosity is the first point of the data_section = exp[:,epoch_idx1:epoch_idx2,1].-I_RIN Q = sum((data_section[4,:])) * (exp.dt/1e3) * 1e-9 #This is the charge in Amp*ms V = V_HOLD[4,1] * 1e-3 #This is the holding voltage in V abs.((Q./V)*1e12) #This is the capacitance in picoFarads end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
2383
function pair_experiments!(dataset::Dict{String, DataFrame}; vmin = 5.30, vmax = 6.30 ) patch_datasheet = dataset["ABF Files"] img_datasheet = dataset["TIF Files"] paired_experiments = DataFrame( "ABF Experiment" => String[], "TIF Experiment" => String[], "ABF Date" => DateTime[], "TIF Date" => DateTime[] ) for (i, row) in enumerate(eachrow(patch_datasheet)) if occursin("5mins", row.protocols) date = row.date_created elapsed_dates = (Dates.value.(abs.(date.- img_datasheet[:, "date_created"]))./1000)./60 idx = findlast(vmin .< elapsed_dates .<= vmax) if !isnothing(idx) push!(paired_experiments, (patch_datasheet[i, "filename"], img_datasheet[idx, "filename"], date, img_datasheet[idx, "date_created"])) end end end dataset["Paired Files"] = paired_experiments dataset end function IV_analysis!(dataset::Dict{String, DataFrame}) patch_datasheet = dataset["ABF Files"] patch_datasheet[!, "Rs"] = fill(NaN, size(patch_datasheet,1)) patch_datasheet[!, "Ri"] = fill(NaN, size(patch_datasheet,1)) patch_datasheet[!, "Vh"] = fill(NaN, size(patch_datasheet,1)) patch_datasheet[!, "Vm"] = fill(NaN, size(patch_datasheet,1)) patch_datasheet[!, "Cm"] = fill(NaN, size(patch_datasheet,1)) patch_datasheet[!, "τ"] = fill(NaN, size(patch_datasheet,1)) patch_datasheet[!, "Quality"] = fill(NaN, size(patch_datasheet,1)) for (i, row) in enumerate(eachrow(patch_datasheet)) if occursin("IV", row.protocols) println(i) exp = readABF(row.filename); if size(exp,3) >= 2 && size(exp,1) == 9 println(row.protocols) println(row.filename) Rs, Rin, V_M, V_HOLD = calculate_resistance(exp) patch_datasheet[i, "Rs"] = Rs patch_datasheet[i, "Ri"] = Rin patch_datasheet[i, "Vh"] = V_HOLD patch_datasheet[i, "Vm"] = V_M patch_datasheet[i, "Cm"] = Cm = calculate_capacitance(exp) patch_datasheet[i, "τ"] = Rs*Cm patch_datasheet[i, "Quality"] = Rs/Rin * 100 end end end dataset["ABF Files"] = patch_datasheet return dataset end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
2723
function create2PDataSheet(img_dir, patch_dir; verbose = false) all_img_files = traverse_root(img_dir) img_files = [] #This is the list of all files actually analyzed img_allDates = [] for (idx, img_file) in enumerate(all_img_files) if verbose println(img_file) end try img_test = readImage(img_file) push!(img_files, img_file) push!(img_allDates, img_test.HeaderDict["FileStartDateTime"]) if verbose println("File $idx out of $(length(all_img_files))") end catch error if verbose println("File didn't load") end end end img_datasheet = DataFrame( filename = img_files, date_created = img_allDates, filetype = "TIF", protocols = "Image" ) patch_files = traverse_root(patch_dir) patch_allDates = [] protocols = [] for (idx, patch_file) in enumerate(patch_files) if verbose println(patch_file) end HeaderDict = ElectroPhysiology.readABFInfo(patch_file) push!(patch_allDates, getABF_datetime(patch_file)) push!(protocols, HeaderDict["ProtocolPath"]) if verbose println("File $idx out of $(length(patch_files))") end end patch_datasheet = DataFrame(filename = patch_files, date_created = patch_allDates, filetype = "ABF", protocols = protocols) all_files = [img_datasheet; patch_datasheet] |> @orderby(_.date_created) |> DataFrame return Dict{String, DataFrame}( "All Files" => all_files, "TIF Files" => img_datasheet, "ABF Files" => patch_datasheet ) end function update2PDataSheet(img_dir, patch_dir, img_datasheet, patch_datasheet) all_img_files = traverse_root(img_dir) #Walk through each img file and check to see if it is in the datasheet patch_files = traverse_root(patch_dir) end function save2PDataSheet(filename::String, dataset::Dict{String, DataFrame}) XLSX.openxlsx(filename, mode = "w") do xf for (k, v) in dataset if k == "All Files" #This one we want to rename sheet 1 (always first) sheet_K = xf[1] #Sheet 1 should be renamed XLSX.rename!(sheet_K, "All Files") else sheet_K = XLSX.addsheet!(xf, k) end XLSX.writetable!(sheet_K, v) end end println("Datasheet saved") end function open2PDataSheet(filename) xf = XLSX.readxlsx(filename) df_set = Dict{String, DataFrame}() for sn in XLSX.sheetnames(xf) df_set[sn] = DataFrame(XLSX.readtable(filename, sn)) end return df_set end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
200
function expand_dates(datasheet) datasheet |> @mutate( Date = Date(datasheet[1, "date_created"]), Time = Time(datasheet[1, "date_created"])) |> DataFrame end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
1762
function traverse_root(root_path) path_list = String[] for (root, dirs, files) in walkdir(root_path) for file in files full_path = joinpath(root, file) # Create the full path to the file push!(path_list, full_path) # Print the path or do other processing end end return path_list end #%% Lets get a filename and extract all of the characteristics date_age_regex = r"(?'Year'\d{2,4})_(?'Month'\d{1,2})_(?'Date'\d{1,2})_(?'Genotype'.+)_P(?'Age'\d+)" #One regex will have the age date_regex = r"(?'Year'\d{2,4})_(?'Month'\d{1,2})_(?'Date'\d{1,2})_(?'Genotype'.+)" #One regex won't have the age cell_patch_regex = r"Cell(?'Cell'\d+)" ca_catch_regex = r"ca_img(?'Cell'\d)" cell_catch_regex = r"cell_img(?'Cell'\d)" function parse_cell_details(filename) date_age_match = match(date_age_regex, filename) date_match = match(date_regex, filename) if !isnothing(date_age_match) #We failed to notate the age genotype = date_age_match["Genotype"] age = parse(Int64, date_age_match["Age"]) elseif !isnothing(date_match) genotype = date_match["Genotype"] age = 30 else genotype = "WT" age = 30 end cell_match = match(cell_patch_regex, filename) ca_catch_match = match(ca_catch_regex, filename) cell_catch_match = match(cell_catch_regex, filename) if !isnothing(cell_match) cell_n = parse(Int64, cell_match["Cell"]) elseif !isnothing(ca_catch_match) cell_n = parse(Int64, ca_catch_match["Cell"]) elseif !isnothing(cell_catch_match) cell_n = parse(Int64, cell_catch_match["Cell"]) else cell_n = 0 end println(age, genotype, cell_n) return age, genotype, cell_n end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
9557
""" This function is used to calculate the photon density based on the photon energy from the calibrator The equation for this function is as follows E = Photon Energy C = speed of Light E/(C) """ photons(E::Float64; λ::Int64 = 525) = ( E / (6.626e-34 * 3e8/(λ*10e-9)))*10e-8 """ This function is used to calculate the transferrance from the optical density (D) """ Transferrance(D) = 10^-D #For the next equation we have 3 fit variables previously determined """ This function is the relationship between: Independent Variables: Transmittance (T) LED Percent (I) Stimulus Time (t_stim) Dependent Variable Photons (P) x[1] = T x[2] = I x[3] = t_stim """ stimulus_model(x::Array{T,1}, p::Array{Float64,1}) where T <:Real = x[1]*(p[1]*x[2]^2 + p[2]*x[2] + p[3])*x[3] stimulus_model(x::Array{T,2}, p::Array{Float64,1}) where T <:Real = [stimulus_model(x[i,:], p) for i in 1:size(x,1)] stimulus_model(x::Array{T,1}) where T <:Real = stimulus_model(x, [25352.59, 43857.01, 929.56]) #Green stimuli stimulus_model(x::Array{T,2}) where T <:Real = stimulus_model(x, [25352.59, 43857.01, 929.56]) f_I(ND::Float64, P::Float64, t_stim::Float64) = stimulus_model([Transferrance(ND), P, t_stim]) f_I(ND::Int64, P::Int64, t_stim::Int64) = stimulus_model([Transferrance(ND|>Float64), P|>Float64, t_stim|>Float64]) ##############################These are the IR and Amplification models############# """ # Adult Intensity-Response models ## The relationship is demonstrated by \$R = f(I)\$ \$f(I) = R_{max}\\frac{I^n}{I^n_{1/2}+I^n}\$ if Response values are normalized to 1, then \$R_{max}\$ = 1 and can be cancelled out to form the equations ### Variables: - R: The response amplitude is the dependent variable - I: The stimulus light intensity (I) is the independent variable ### Parameters: - R_max: Maximum saturating value(\$R_{max}\$) - Ih: The flash strength required to elicit half of \$R_{max}\$: (\$I_{1/2}\$) - n: The power of the equation ### Function usage [IN 1]: IR(I, Ih, n) [OUT 1]: Response """ HILL(x, rmax, k, n) = rmax * (x^n / (k^n + x^n)) #This is the basic form of the model HILL_MODEL(X, p) = map(x -> HILL(x, p[1], p[2], p[3]), X) #This is used for fitting a larger dataset """ # Developmental Intensity response (>P14) ## The relationship is demonstrated by \$R = f(I)\$ where \$f(I) =R_{max}\\left(\\alpha(1 - e^{SI}) + (1-\\alpha)\\frac{I^n}{Ih^n + S}\$ if Response values are normalized to 1, then \$R_{max}\$ = 1 and can be cancelled out to form the equations ### Variables: - R: The response amplitude is the dependent variable - I: The stimulus light intensity (I) is the independent variable ### Parameters: - R_max: Maximum saturating value(\$R_{max}\$) - Ih: The flash strength required to elicit half of \$R_{max}\$: (\$I_{1/2}\$) - n: The power of the equation - (\$\\alpha\$): The temperature-dependent weighting coefficient: - S: he fractional sensitivity ### Function usage [IN 1]: IR_dev(I, rmax, k, n, α, S) [OUT 1]: Response_dev """ modHILL(x, rmax, k, n, α, S) = rmax * (α*(1-exp(S*x)) + (1-α)*(x^n / (k^n + S))) modHILL_MODEL(X, p) = map(x -> modHILL(x, p[1], p[2], p[3], p[4], p[5]), X) #This is used for fitting a larger dataset """ # Amplification Amplification is a time series, therefore it is a function of time ## The relationship is demonstrated by \$R = f(t)\$ \$f(t) = R_{max}(1-e^{-\\alpha(t-t_{eff})^2})\$ ### Variables - R: The response is the dependent variable - t: Time is the independent variable. ### Parameters - (\$t_{eff}\$): The effective time delay is a short delay between stimulus onset and response onset indicative of the biomolecuar diffusion rates - (\$\\alpha\$): The amplification coefficient represents the rate of the response increases from the biomolecular processes. ### Function usage [IN 1]: AMP(t, α, t_eff, rmax) [OUT 1]: Response """ AMP(t, α, t_eff, rmax) = t > t_eff ? rmax * (1 - exp(-α*(t-t_eff)^2)) : 0.0 """ # Recovery Time Constant (τRec) This function is a single exponential. ### Function usage [IN 1]: Recovery(t, V⁰, τRec) [OUT 1]: Response """ REC(t, V⁰, τRec) = V⁰ * exp(-t/τRec) """ Weber Contrast sensitivity The """ WEBER(I_Feature::T, I_Background::T) where T <: Real = (I_Feature - I_Background)/I_Background """ Michelson Contrast """ MICHELSON(I_Min::T, I_Max::T) where T <: Real = (I_Max - I_Min)/(I_Max + I_Min) MICHELSON(I::Array{T,2}) where T <: Real = (maximum(I) - minimum(I))/(maximum(I) + minimum(I)) """ Root Mean Squared Contrast This is the contrast of an image when the image is M x N in size. The image has i and j features (equal to 1->M-1 and 1->N-1) """ RMS_Contrast(I::Array{T, 2}; normalized = true) where T <: Real = (1/(size(I,1)*size(I,2))) .* sum((I.-sum(I)/length(I))^2) #%% Lets write some fitting equations into the #=========================== The below functions are created by fitting a model ===========================# """ function IRfit(intensity::AbstractArray{T}, response::AbstractArray{T}; lb::AbstractArray{T} = [1.0, 1.0, 0.1], #Default rmin = 100, kmin = 0.1, nmin = 0.1 p0::AbstractArray{T} = [500.0, 1000.0, 2.0], #Default r = 500.0, k = 200.0, n = 2.0 ub::AbstractArray{T} = [Inf, Inf, 10.0], #Default rmax = 2400, kmax = 800 ) where {T<:Real} This function takes X and Y values and fits them according to a HILL type fit """ function HILLfit(intensity::AbstractArray{T}, response::AbstractArray{T}; lb::AbstractArray{T} = [1.0, 1.0, 0.1], #Default rmin = 100, kmin = 0.1, nmin = 0.1 p0::AbstractArray{T} = [500.0, 1000.0, 2.0], #Default r = 500.0, k = 200.0, n = 2.0 ub::AbstractArray{T} = [Inf, Inf, 10.0], #Default rmax = 2400, kmax = 800 ) where {T<:Real} fit = curve_fit(HILL_MODEL, intensity, response, p0, lower=lb, upper=ub) #Calculate the R-squared ss_resid = sum(fit.resid.^2) ss_total = sum((response .- mean(response)).^2) RSQ = 1 - ss_resid/ss_total return fit, RSQ end function STFfit(a_wave::AbstractArray{T}, b_wave::AbstractArray{T}; lb::AbstractArray{T} = [0.001, 0.001, 0.1], p0::AbstractArray{T} = [1.0, 10.0, 2.0], ub::AbstractArray{T} = [Inf, Inf, 10.0], ) where {T <: Real} fit = curve_fit(HILL_MODEL, a_wave, b_wave, p0, lower=lb, upper=ub) #for some reason this works the best when b is in log units #Calculate the r squared ss_resid = sum(fit.resid.^2) ss_total = sum((b_wave .- mean(b_wave)).^2) RSQ = 1 - ss_resid/ss_total return fit, RSQ end """ The recovery time constant is calculated by fitting the normalized Rdim with the response recovery equation """ function TAUfit(data::Experiment{T}; τRec::T=1.0 ) where {T<:Real} #Make sure the sizes are the same #@assert size(resp) == (size(data, 1), size(data,3)) trec = zeros(T, size(data, 1), size(data, 3)) gofs = zeros(T, size(data, 1), size(data, 3)) #This function uses the recovery model and takes t as a independent variable model(x, p) = map(t -> REC(t, -1.0, p[2]), x) for swp in axes(data, 1), ch in axes(data, 3) # println(dim_idx[ch]) xdata = data.t ydata = data[swp, :, ch] #Test both scenarios to ensure that ydata ./= minimum(ydata) #Normalize the Rdim to the minimum value #ydata ./= resp #Normalize the Rdim to the saturated response #cutoff all points below -0.5 and above -1.0 over_1 = findall(ydata .>= 1.0) if !isempty(over_1) begin_rng = over_1[end] xdata = xdata[begin_rng:end] ydata = ydata[begin_rng:end] cutoff = findall(ydata .< 0.5) if isempty(cutoff) #println("Exception") end_rng = length(ydata) else end_rng = cutoff[1] end xdata = xdata[1:end_rng] .- xdata[1] ydata = -ydata[1:end_rng] p0 = [ydata[1], τRec] fit = curve_fit(model, xdata, ydata, p0) #report the goodness of fit SSE = sum(fit.resid .^ 2) ȳ = sum(model(xdata, fit.param)) / length(xdata) SST = sum((ydata .- ȳ) .^ 2) GOF = 1 - SSE / SST trec[swp, ch] = fit.param[2] gofs[swp, ch] = GOF end end return trec, gofs end function AMPfit(data::Experiment{T}, resp::Union{T,Matrix{T}}; #This argument should be offloaded to a single value time_cutoff=0.1, lb::Vector{T}=(0.0, 0.001), p0::Vector{T}=(200.0, 0.002), ub::Vector{T}=(Inf, 0.040) ) where {T<:Real} #@assert size(resp) == (size(data, 1), size(data,3)) amp = zeros(2, size(data, 1), size(data, 3)) gofs = zeros(T, size(data, 1), size(data, 3)) for swp = axes(data, 1), ch = axes(data, 3) if isa(resp, Matrix{T}) resp_0 = resp[swp, ch] else resp_0 = resp end model(x, p) = map(t -> AMP(t, p[1], p[2], resp_0), x) idx_end = findall(data.t .>= time_cutoff)[1] xdata = data.t[1:idx_end] ydata = data[swp, 1:idx_end, ch] fit = curve_fit(model, xdata, ydata, p0, lower=lb, upper=ub) #Check Goodness of fit SSE = sum(fit.resid .^ 2) ȳ = sum(model(xdata, fit.param)) / length(xdata) SST = sum((ydata .- ȳ) .^ 2) GOF = 1 - SSE / SST amp[1, swp, ch] = fit.param[1] #Alpha amp value amp[2, swp, ch] = fit.param[2] #Effective time value gofs[swp, ch] = GOF end return amp, gofs end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
2292
function DoubleLogistic(tsteps; R1 = 100.0, K1 = -300, R2 = 5.0, K2 = -200.0 ) @parameters r1 r2 k1 k2 @variables t N1(t) N2(t) X(t) D = Differential(t) eqs = [D(N1) ~ r1*(k1 - N1), D(N2) ~ r2*(k2-k1 - N2)] @named sys = ODESystem(eqs) sys = structural_simplify(sys) u0 = [N1 => 0.0,N2 => 0.0,X => 0.0] p = [r1=> R1,r2=> R2,k1=> K1,k2=> K2,] prob = ODEProblem(sys, u0, (tsteps[1], tsteps[end]), p) sol_arr = solve(prob, Tsit5(), saveat = tsteps) |> Array return sol_arr[1,:]# .+ sol_arr[2,:] end function SingleLogistic(tsteps; R1 = 100.0, K1 = 30, ) @parameters r1 k1 @variables t N1(t) D = Differential(t) eqs = [D(N1) ~ r1*(k1 - N1)] @named sys = ODESystem(eqs) sys = structural_simplify(sys) u0 = [N1 => 0.0] p = [r1=> R1,k1=> K1] prob = ODEProblem(sys, u0, (tsteps[1], tsteps[end]), p) sol_arr = solve(prob, Tsit5(), saveat = tsteps) |> Array return sol_arr[1,:]# .+ sol_arr[2,:] end NOSEMODEL1(x, p; init_y = -300.0) = init_y .+ SingleLogistic(x; R1 = p[1], K1 = p[2]) NOSEMODEL2(x, p) = DoubleLogistic(x; R1 = p[1], K1 = p[2], R2 = p[3], K2 = p[4]) function NOSEfit(time, response; #ub = [], p0 = [10.0, 30.0], #lb = [], ) MODEL(x, p) = NOSEMODEL1(x, p; init_y = minimum(response)) fit = curve_fit(NOSEMODEL1, time, response, p0) ss_resid = sum(fit.resid.^2) ss_total = sum((response .- mean(response)).^2) RSQ = 1 - ss_resid/ss_total return fit, RSQ end function findNosePeak(data::Experiment; tmax = 0.75, kwargs...) t = data.t resp = zeros(size(data, 3)) for ch in axes(data,3) ŷ = minimum(data, dims =1)[1,:,ch] if t[argmin(ŷ)] < tmax-data.dt tidxs = findall(t[argmin(ŷ)].< t .< tmax) fit, RSQ = NOSEfit(t[tidxs], ŷ[tidxs]; kwargs...) y = NOSEMODEL1(t[tidxs], fit.param; init_y = minimum(ŷ)); if minimum(ŷ) < y[end] < 0.0 resp[ch] = y[end] else resp[ch] = minimum(ŷ) end else resp[ch] = minimum(ŷ) end end return resp end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
2182
using Dates, Revise using ElectroPhysiology, PhysiologyAnalysis using Pkg; Pkg.activate("test") #%% ╔═╡This task is for extraction of points, centroids, and ROIs using cellpose using GLMakie, PhysiologyPlotting file_loc = "G:/Data/Two Photon" data2P_fn = "$(file_loc)/2024_09_03_SWCNT_VGGC6/swcntBATH_kpuff_nomf_20um001.tif" #╔═╡Extract the image data2P = readImage(data2P_fn); xlims = data2P.HeaderDict["xrng"] ylims = data2P.HeaderDict["yrng"] deinterleave!(data2P) #This seperates the movies into two seperate movies #Lets adjust the delta f/f calculation to take the mean # ╔═╡Seperate the red and green channels img_arr = get_all_frames(data2P) red_zstack = img_arr[:,:,:,2] grn_zstack = img_arr[:,:,:,1] red_zproj = project(data2P, dims = (3))[:,:,1,2] grn_zproj = project(data2P, dims = (3))[:,:,1,1] red_trace = project(data2P, dims = (1,2))[1,1,:,2] grn_trace = project(data2P, dims = (1,2))[1,1,:,1] delta_f_f_red_zstack = deltaF_F(red_zstack) delta_f_f_grn_zstack = deltaF_F(grn_zstack) delta_f_f_red_trace = mean(delta_f_f_red_zstack, dims = (1,2))[1,1,:] delta_f_f_grn_trace = mean(delta_f_f_grn_zstack, dims = (1,2))[1,1,:] #%% ╔═╡ Lets use CellPose to label cells #Pkg.build("PyCall") using PyCall, Conda model = cellpose_model() mask, flow, style, diam = model.eval(grn_zproj) data2P.HeaderDict["ROIs"] .= vec(mask) # Can we find out the centroid of each img from ROIs? mask = getROImask(data2P) centroids = findROIcentroids(data2P) # ╔═╡Plot the figure fig2 = Figure(size = (1200, 500)) ax1 = GLMakie.Axis(fig2[1,1], title = "Green Channel", aspect = 1.0) ax2 = GLMakie.Axis(fig2[1,2], title = "Fluor") hm1 = heatmap!(ax1, xlims, ylims, grn_zproj, colormap = :viridis, colorrange = (0.0, maximum(grn_zproj)/25)) hm2 = contour!(ax1, xlims, ylims, mask, color = :red) lines!(ax2, data2P.t, grn_trace) for idx in 1:maximum(data2P.HeaderDict["ROIs"]) println(idx) ROI_data = data2P[getROIindexes(data2P, idx), :, :] ROI_trace = mean(ROI_data, dims = 1)[1,:,1] lines!(ax2, data2P.t, ROI_trace) end for (k, v) in centroids println("$k - $v") scatter!(ax1, (v[2], v[1])) end fig2 #%% for (k,v) in data2P.HeaderDict println(k) end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
207
using ElectroPhysiology using PhysiologyAnalysis using Test test_file = raw"to_analyze.abf" data = readABF(test_file) |> data_filter @testset "Testing ERG analysis" begin include("testAnalysis.jl") end
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
590
#finding the saturated response print("Testing the saturated response... ") resps = saturated_response(data); @test size(resps, 1) == size(data,1) @test size(resps, 2) == size(data,3) #rmaxes = maximum(resps, dims=1); #print("Finding the minima to peak... ") #@time m2p = minima_to_peak(data); #print("Finding the time to peak") #@time t2p = time_to_peak(data); #print("Finding the dominant time constant... ") #@time t_dom = percent_recovery_interval(data, rmaxes); #print("Finding the integration time") #@time int_time = integral(data); #println("Extracting some timescale data")
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
213
using ePhys #The most important functionality of the datasheet opener is to read through a file directory and parse files #test the functionality to open datasheets #test the functionality to update datasheets
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git
[ "MIT" ]
0.6.30
d1abcd4b0b02e036f6a7630f6b563b576e9a8561
code
929
println("Testing baseline adjust... ") print("Inplace completed: ") @time baseline_adjust!(data); print("Function completed: ") @time data_baseline = baseline_adjust(data); println("Completed! ") print("Testing Truncate functions") @time data_trunc = truncate_data(data); print("Inplace: ") @time truncate_data!(data); println("Testing filtering functions.") print("Lowpass filter inplace:") @time lowpass_filter!(data); print("Lowpass filter: ") @time filt_data = lowpass_filter(data); print("Lowpass filter (choosing freq): ") @time lopass_data = lowpass_filter(data, 300.0); print("Highpass filter inplace:") @time highpass_filter!(data); print("Highpass filter: ") @time hipass_data = highpass_filter(data); print("Highpass filter (choosing freq): ") @time hipass_data = highpass_filter(data, 60.0); print("Notch filter inplace:") @time notch_filter!(data); print("Notch filter: ") @time hipass_data = notch_filter(data);
PhysiologyAnalysis
https://github.com/mattar13/PhysiologyAnalysis.jl.git