licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 5.48.1 | 066f60231c1b0ae2905ffd2651e207accd91f627 | code | 1083 | using DelayDiffEq
using Test
# Test parameterized delayed logistic equation
# delayed logistic equation
f_inplace(du, u, h, p, t) = (du[1] = p[1] * u[1] * (1 - h(p, t - 1; idxs = 1)))
f_scalar(u, h, p, t) = p[1] * u * (1 - h(p, t - 1))
# simple history function
h(p, t; idxs = nothing) = 0.1
@testset for inplace in (true, false)
# define problem
# we specify order_discontinuity_t0 = 1 to indicate that the discontinuity at
# t = 0 is of first order
prob = DDEProblem(inplace ? f_inplace : f_scalar,
inplace ? [0.1] : 0.1,
h, (0.0, 50.0), [0.3];
constant_lags = [1],
order_discontinuity_t0 = 1)
# solve problem with initial parameter:
sol1 = solve(prob, MethodOfSteps(Tsit5()))
@test length(sol1) == 21
@test first(sol1(12))≈0.884 atol=1e-4
@test first(sol1.u[end])≈1 atol=1e-5
# solve problem with updated parameter
prob.p[1] = 1.4
sol2 = solve(prob, MethodOfSteps(Tsit5()))
@test length(sol2) == 47
@test first(sol2(12))≈1.125 atol=5e-4
@test first(sol2.u[end])≈0.994 atol=2e-5
end
| DelayDiffEq | https://github.com/SciML/DelayDiffEq.jl.git |
|
[
"MIT"
] | 5.48.1 | 066f60231c1b0ae2905ffd2651e207accd91f627 | code | 3921 | using DelayDiffEq
using Test
# out-of-place problem
function f_notinplace(u, h, p, t)
[-h(p, t - 1 / 5)[1] + u[1]; -h(p, t - 1 / 3)[2] - h(p, t - 1 / 5)[2]]
end
const prob_notinplace = DDEProblem(f_notinplace, ones(2), (p, t) -> zeros(2), (0.0, 100.0),
constant_lags = [1 / 5, 1 / 3])
# in-place problem
function f_inplace(du, u, h, p, t)
du[1] = -h(p, t - 1 / 5)[1] + u[1]
du[2] = -h(p, t - 1 / 3)[2] - h(p, t - 1 / 5)[2]
end
const prob_inplace = DDEProblem(f_inplace, ones(2), (p, t) -> zeros(2), (0.0, 100.0),
constant_lags = [1 / 5, 1 / 3])
const alg = MethodOfSteps(BS3())
@testset for prob in (prob_notinplace, prob_inplace)
# reference solution
dde_int = init(prob, alg)
sol = solve!(dde_int)
# save all components
@testset "all components" begin
# without keyword argument
@testset "without keyword" begin
# solution, solution of DDE integrator, and solution of ODE integrator
# contain all components
@test length(sol.u[1]) == 2
@test length(dde_int.sol.u[1]) == 2
@test length(dde_int.integrator.sol.u[1]) == 2
# solution and solution of DDE integrator are equal
@test sol.t == dde_int.sol.t
@test sol.u == dde_int.sol.u
## interpolation
@test sol(25:100, idxs = 2) ≈ [u[1] for u in sol(25:100, idxs = [2])]
end
# with keyword argument
@testset "with keyword" begin
dde_int2 = init(prob, alg; save_idxs = [1, 2])
sol2 = solve!(dde_int2)
# solution, solution of DDE integrator, and solution of ODE integrator
# contain all components
@test length(sol2.u[1]) == 2
@test length(dde_int2.sol.u[1]) == 2
@test length(dde_int2.integrator.sol.u[1]) == 2
# solution and solution of DDE integrator are equal
@test sol.t == dde_int.sol.t
@test sol.u == dde_int.sol.u
# interpolation
@test sol[2, :] ≈ dde_int.integrator.sol(sol.t, idxs = 2)
end
end
# save only second component
@testset "second component" begin
# array index
@testset "array index" begin
dde_int2 = init(prob, alg; save_idxs = [2])
sol2 = solve!(dde_int2)
# solution and solution of DDE integrator contain only second component
@test length(sol2.u[1]) == 1
@test length(dde_int2.sol.u[1]) == 1
# solution of ODE integrator contains both components
@test length(dde_int2.integrator.sol.u[1]) == 2
# solution and solution of DDE integrator are equal
@test sol2.t == dde_int2.sol.t
@test sol2.u == dde_int2.sol.u
# interpolation
@test sol(25:100, idxs = 2) ≈ sol2(25:100, idxs = 1)
@test sol(25:100, idxs = [2]) ≈ sol2(25:100, idxs = [1])
end
# scalar index
@testset "scalar index" begin
dde_int2 = init(prob, alg; save_idxs = 2)
sol2 = solve!(dde_int2)
# solution and solution of DDE integrator is only vector of floats
@test typeof(sol2.u) === Vector{Float64}
@test typeof(dde_int2.sol.u) === Vector{Float64}
# solution of ODE integrator contains both components
@test length(dde_int2.integrator.sol.u[1]) == 2
# solution and solution of DDE integrator are equal
@test sol2.t == dde_int2.sol.t
@test sol2.u == dde_int2.sol.u
# solution equals second component of complete solution
@test sol.t ≈ sol2.t && sol[2, :] ≈ sol2.u
# interpolation of solution equals second component of
# interpolation of complete solution
@test sol(25:100, idxs = 2) ≈ sol2(25:100, idxs = 1)
end
end
end
| DelayDiffEq | https://github.com/SciML/DelayDiffEq.jl.git |
|
[
"MIT"
] | 5.48.1 | 066f60231c1b0ae2905ffd2651e207accd91f627 | code | 5103 | using DelayDiffEq, DDEProblemLibrary
using Test
const prob = prob_dde_constant_1delay_long_ip
const alg = MethodOfSteps(Tsit5())
# reference integrator and solution
const dde_int = init(prob, alg)
const sol = solve!(dde_int)
@testset "reference" begin
# solution equals solution of DDE integrator
@test sol.t == dde_int.sol.t
@test sol.u == dde_int.sol.u
# solution equals solution of ODE integrator
@test sol.t == dde_int.integrator.sol.t
@test sol.u == dde_int.integrator.sol.u
end
# do not save every step
@testset "not every step (save_start=$save_start)" for save_start in (false, true)
# for time(s) as scalar (implicitly adds end point as well!) and vectors
for saveat in (25.0, [25.0, 50.0, 75.0])
dde_int2 = init(prob, alg; saveat = saveat, save_start = save_start)
# end point is saved if saveat is a scalar
@test dde_int2.opts.save_end == (saveat isa Number)
sol2 = solve!(dde_int2)
# solution is equal to solution of DDE integrator
@test sol2.t == dde_int2.sol.t
@test sol2.u == dde_int2.sol.u
# time point of solution
if saveat isa Number
@test sol2.t ==
(save_start ? [0.0, 25.0, 50.0, 75.0, 100.0] : [25.0, 50.0, 75.0, 100.0])
else
@test sol2.t == (save_start ? [0.0, 25.0, 50.0, 75.0] : [25.0, 50.0, 75.0])
end
# history is equal to solution above
@test sol.t == dde_int2.integrator.sol.t
@test sol.u == dde_int2.integrator.sol.u
end
end
# do not save every step
@testset "not every step (save_end=$save_end)" for save_end in (false, true)
# for time(s) as scalar (implicitly adds end point as well!) and vectors
for saveat in (25.0, [25.0, 50.0, 75.0])
dde_int2 = init(prob, alg; saveat = saveat, save_end = save_end)
# start point is saved if saveat is a scalar
@test dde_int2.opts.save_start == (saveat isa Number)
sol2 = solve!(dde_int2)
# solution is equal to solution of DDE integrator
@test sol2.t == dde_int2.sol.t
@test sol2.u == dde_int2.sol.u
# time point of solution
if saveat isa Number
@test sol2.t ==
(save_end ? [0.0, 25.0, 50.0, 75.0, 100.0] : [0.0, 25.0, 50.0, 75.0])
else
@test sol2.t == (save_end ? [25.0, 50.0, 75.0, 100.0] : [25.0, 50.0, 75.0])
end
# history is equal to solution above
@test sol.t == dde_int2.integrator.sol.t
@test sol.u == dde_int2.integrator.sol.u
end
end
# save every step
@testset "every step (save_start=$save_start)" for save_start in (false, true)
for saveat in (25.0, [25.0, 50.0, 75.0])
dde_int2 = init(prob, alg; saveat = saveat, save_everystep = true,
save_start = save_start)
# end point is saved implicitly
@test dde_int2.opts.save_end
sol2 = solve!(dde_int2)
# solution is equal to solution of DDE integrator
@test sol2.t == dde_int2.sol.t
@test sol2.u == dde_int2.sol.u
# time points of solution
@test symdiff(sol.t, sol2.t) ==
(save_start ? [25.0, 50.0, 75.0] : [0.0, 25.0, 50.0, 75.0])
# history is equal to solution above
@test sol.t == dde_int2.integrator.sol.t
@test sol.u == dde_int2.integrator.sol.u
end
end
# save every step
@testset "every step (save_end=$save_end)" for save_end in (false, true)
for saveat in (25.0, [25.0, 50.0, 75.0])
dde_int2 = init(prob, alg; saveat = saveat, save_everystep = true,
save_end = save_end)
# start point is saved implicitly
@test dde_int2.opts.save_start
sol2 = solve!(dde_int2)
# solution is equal to solution of DDE integrator
@test sol2.t == dde_int2.sol.t
@test sol2.u == dde_int2.sol.u
# time points of solution
@test symdiff(sol.t, sol2.t) ==
(save_end ? [25.0, 50.0, 75.0] : [100.0, 25.0, 50.0, 75.0])
# history is equal to solution above
@test sol.t == dde_int2.integrator.sol.t
@test sol.u == dde_int2.integrator.sol.u
end
end
@testset "not matching end time point" begin
sol = solve(prob, alg; saveat = 40)
@test sol.t == [0.0, 40, 80, 100]
end
@testset "changing end time point saveat" begin
_saveat = [0.0, 0.25, 0.5, 1.0]
integ = init(DDEProblem((u, h, p, t) -> u, 0.0, (p, t) -> 0.0, (0.0, 1.0)),
MethodOfSteps(Tsit5()), saveat = _saveat)
add_tstop!(integ, 2.0)
solve!(integ)
@test integ.sol.t == _saveat
integ = init(DDEProblem((u, h, p, t) -> u, 0.0, (p, t) -> 0.0, (0.0, 1.0)),
MethodOfSteps(Tsit5()), saveat = _saveat, save_end = true)
add_tstop!(integ, 2.0)
solve!(integ)
@test integ.sol.t == [0.0, 0.25, 0.5, 1.0, 2.0]
integ = init(DDEProblem((u, h, p, t) -> u, 0.0, (p, t) -> 0.0, (0.0, 1.0)),
MethodOfSteps(Tsit5()), saveat = _saveat, save_end = false)
add_tstop!(integ, 2.0)
solve!(integ)
@test integ.sol.t == [0.0, 0.25, 0.5]
end
| DelayDiffEq | https://github.com/SciML/DelayDiffEq.jl.git |
|
[
"MIT"
] | 5.48.1 | 066f60231c1b0ae2905ffd2651e207accd91f627 | code | 4154 | using DelayDiffEq, DDEProblemLibrary
using Test
# Check that numerical solutions approximate analytical solutions,
# independent of problem structure
@testset "standard history" begin
# standard algorithm
alg = MethodOfSteps(BS3(); constrained = false)
alg1 = MethodOfSteps(Tsit5(); constrained = false,
fpsolve = NLFunctional(; max_iter = 100))
alg2 = MethodOfSteps(DP8(); constrained = false,
fpsolve = NLFunctional(; max_iter = 10))
alg3 = MethodOfSteps(Tsit5(); constrained = true)
alg4 = MethodOfSteps(DP5(); constrained = false,
fpsolve = NLFunctional(; max_iter = 100))
## Single constant delay
@testset "single constant delay" begin
@testset "short time span" begin
### Scalar function
sol_scalar = solve(prob_dde_constant_1delay_scalar, alg)
@test sol_scalar.errors[:l∞] < 3.0e-5
@test sol_scalar.errors[:final] < 2.1e-5
@test sol_scalar.errors[:l2] < 1.2e-5
### Out-of-place function
sol_oop = solve(prob_dde_constant_1delay_oop, alg)
@test sol_scalar.t ≈ sol_oop.t && sol_scalar.u ≈ sol_oop[1, :]
### In-place function
sol_ip = solve(prob_dde_constant_1delay_ip, alg)
@test sol_scalar.t ≈ sol_ip.t && sol_scalar.u ≈ sol_ip[1, :]
end
@testset "long time span" begin
prob = prob_dde_constant_1delay_long_scalar
sol1 = solve(prob, alg1; abstol = 1e-12, reltol = 1e-12)
sol2 = solve(prob, alg2; abstol = 1e-8, reltol = 1e-10)
sol3 = solve(prob, alg3; abstol = 1e-8, reltol = 1e-10)
sol4 = solve(prob, alg4; abstol = 1e-12, reltol = 1e-12)
# relaxed tests to prevent floating point issues
@test abs(sol1.u[end] - sol2.u[end]) < 2.5e-8
@test abs(sol1.u[end] - sol3.u[end]) < 3.7e-8
@test abs(sol1.u[end] - sol4.u[end]) < 9.0e-11 # 9.0e-13
end
end
## Two constant delays
@testset "two constant delays" begin
@testset "short time span" begin
### Scalar function
sol_scalar = solve(prob_dde_constant_2delays_scalar, alg)
@test sol_scalar.errors[:l∞] < 2.4e-6
@test sol_scalar.errors[:final] < 2.1e-6
@test sol_scalar.errors[:l2] < 1.2e-6
### Out-of-place function
sol_oop = solve(prob_dde_constant_2delays_oop, alg)
@test sol_scalar.t ≈ sol_oop.t && sol_scalar.u ≈ sol_oop[1, :]
### In-place function
sol_ip = solve(prob_dde_constant_2delays_ip, alg)
@test sol_scalar.t ≈ sol_ip.t && sol_scalar.u ≈ sol_ip[1, :]
end
@testset "long time span" begin
prob = prob_dde_constant_2delays_long_scalar
sol1 = solve(prob, alg1; abstol = 1e-12, reltol = 1e-12)
sol2 = solve(prob, alg2; abstol = 1e-8, reltol = 1e-10)
sol3 = solve(prob, alg3; abstol = 1e-8, reltol = 1e-10)
sol4 = solve(prob, alg4; abstol = 1e-12, reltol = 1e-12)
# relaxed tests to prevent floating point issues
@test abs(sol1.u[end] - sol3.u[end]) < 1.2e-13 # 1.2e-15
@test abs(sol1.u[end] - sol4.u[end]) < 3.1e-13 # 3.1e-15
end
end
end
## Non-standard history functions
@testset "non-standard history" begin
alg = MethodOfSteps(Tsit5(); constrained = false,
fpsolve = NLFunctional(; max_iter = 100))
@testset "idxs" begin
function f(du, u, h, p, t)
du[1] = -h(p, t - 0.2; idxs = 1) + u[1]
end
h(p, t; idxs = nothing) = idxs isa Number ? 0.0 : [0.0]
prob = DDEProblem(f, [1.0], h, (0.0, 100.0); constant_lags = [0.2])
solve(prob, alg; abstol = 1e-12, reltol = 1e-12)
end
@testset "in-place" begin
function f(du, u, h, p, t)
h(du, p, t - 0.2)
du[1] = -du[1] + u[1]
end
h(val, p, t) = (val .= 0.0)
prob = DDEProblem(f, [1.0], h, (0.0, 100.0); constant_lags = [0.2])
solve(prob, alg; abstol = 1e-12, reltol = 1e-12)
end
end
| DelayDiffEq | https://github.com/SciML/DelayDiffEq.jl.git |
|
[
"MIT"
] | 5.48.1 | 066f60231c1b0ae2905ffd2651e207accd91f627 | code | 1677 | using DelayDiffEq, DDEProblemLibrary, Unitful
using Test
using DDEProblemLibrary: remake_dde_constant_u0_tType
const probs = Dict(
true => remake_dde_constant_u0_tType(prob_dde_constant_1delay_long_ip,
[1.0u"N"],
typeof(1.0u"s")),
false => remake_dde_constant_u0_tType(prob_dde_constant_1delay_long_scalar,
1.0u"N",
typeof(1.0u"s")))
# we test the current handling of units for regressions
# however, it is broken upstream: https://github.com/JuliaDiffEq/OrdinaryDiffEq.jl/issues/828
@testset for inplace in (true, false)
prob = probs[inplace]
alg = MethodOfSteps(Tsit5(); constrained = false,
fpsolve = NLFunctional(; max_iter = 100))
# default
sol1 = solve(prob, alg)
# without units
if inplace
@test_throws Unitful.DimensionError solve(prob, alg;
abstol = 1e-6, reltol = 1e-3)
else
sol2 = solve(prob, alg; abstol = 1e-6, reltol = 1e-3)
@test sol1.t == sol2.t
@test sol1.u == sol2.u
end
# with correct units
sol3 = solve(prob, alg; abstol = 1e-6u"N", reltol = 1e-3u"N")
@test sol1.t == sol3.t
@test sol1.u == sol3.u
# with correct units as vectors
if inplace
sol4 = solve(prob, alg; abstol = [1e-6u"N"], reltol = [1e-3u"N"])
@test sol1.t == sol4.t
@test sol1.u == sol4.u
end
# with incorrect units for absolute tolerance
@test_throws Unitful.DimensionError solve(prob, alg;
abstol = 1e-6u"s", reltol = 1e-3u"N")
# with incorrect units for relative tolerance
@test_throws Unitful.DimensionError solve(prob, alg;
abstol = 1e-6u"N", reltol = 1e-3u"s")
end
| DelayDiffEq | https://github.com/SciML/DelayDiffEq.jl.git |
|
[
"MIT"
] | 5.48.1 | 066f60231c1b0ae2905ffd2651e207accd91f627 | code | 975 | using DelayDiffEq, DDEProblemLibrary
using Test
@testset "init" begin
prob = prob_dde_constant_1delay_ip
prob_scalar = prob_dde_constant_1delay_scalar
inferred = [BS3(), Tsit5(), RK4(), Vern6()]
for alg in inferred
ddealg = MethodOfSteps(alg)
@test_broken @inferred init(prob, ddealg)
@test_broken @inferred init(prob_scalar, ddealg)
end
notinferred = [SDIRK2(), TRBDF2(), KenCarp4(), Rosenbrock23(), Rodas4()]
for alg in notinferred
ddealg = MethodOfSteps(alg)
@test_broken @inferred init(prob, ddealg)
@test_broken @inferred init(prob_scalar, ddealg)
end
end
@testset "discontinuity_time" begin
prob_inplace = prob_dde_constant_1delay_ip
prob_scalar = prob_dde_constant_1delay_scalar
for prob in (prob_inplace, prob_scalar)
int = init(prob, MethodOfSteps(Tsit5()))
@inferred DelayDiffEq.discontinuity_time(int, (u, p, t) -> 1.0, 0.0, (0.5, 1.5))
end
end
| DelayDiffEq | https://github.com/SciML/DelayDiffEq.jl.git |
|
[
"MIT"
] | 5.48.1 | 066f60231c1b0ae2905ffd2651e207accd91f627 | code | 1341 | using DelayDiffEq, DDEProblemLibrary
using LinearAlgebra, Test, LinearSolve
const PROB_WALTMAN = DDEProblemLibrary.prob_dde_RADAR5_waltman_5
const PROB_KWARGS = (reltol = 1e-7, abstol = [1e-21, 1e-21, 1e-21, 1e-21, 1e-9, 1e-9])
# solution at final time point T = 300 obtained from RADAR5
# with relative tolerance 1e-6 and componentwise absolute tolerances
# 1e-21, 1e-21, 1e-21, 1e-21, 1e-9, and 1e-9
const RADAR5_SOL = [6.154488183e-16, 3.377120916e-7, 4.22140331e-7,
2.142554562e-6, 299.9999999, 299.6430338]
function test_waltman_sol(sol)
@test sol.retcode == ReturnCode.Success
@test sol.t[end] == 300
# compare solution at the final time point with RADAR5
for i in 1:6
@test sol.u[end][i]≈RADAR5_SOL[i] rtol=1e-3 atol=1e-17
end
end
# standard factorization
sol1 = solve(PROB_WALTMAN, MethodOfSteps(Rosenbrock23()); PROB_KWARGS...)
test_waltman_sol(sol1)
# in-place LU factorization
sol2 = solve(PROB_WALTMAN,
MethodOfSteps(Rosenbrock23(linsolve = GenericFactorization(lu!)));
PROB_KWARGS...)
test_waltman_sol(sol2)
# out-of-place LU factorization
sol3 = solve(
PROB_WALTMAN, MethodOfSteps(Rosenbrock23(linsolve = GenericFactorization(lu)));
PROB_KWARGS...)
test_waltman_sol(sol3)
# compare in-place and out-of-place LU factorization
@test sol2.t == sol3.t
@test sol2.u == sol3.u
| DelayDiffEq | https://github.com/SciML/DelayDiffEq.jl.git |
|
[
"MIT"
] | 5.48.1 | 066f60231c1b0ae2905ffd2651e207accd91f627 | docs | 3496 | # DelayDiffEq.jl
[](https://github.com/SciML/DelayDiffEq.jl/actions?query=workflow%3ACI%20branch%3Amaster)
[](https://coveralls.io/github/SciML/DelayDiffEq.jl?branch=master)
[](https://codecov.io/gh/SciML/DelayDiffEq.jl)
DelayDiffEq.jl is a component package in the DifferentialEquations ecosystem. It holds the
delay differential equation solvers and utilities. It is built on top of OrdinaryDiffEq
to extend those solvers for delay differential equations. While completely independent
and usable on its own, users interested in using this
functionality should check out [DifferentialEquations.jl](https://github.com/SciML/DifferentialEquations.jl).
## API
DelayDiffEq.jl is part of the JuliaDiffEq common interface, but can be used independently of DifferentialEquations.jl. The only requirement is that the user passes a DelayDiffEq.jl algorithm to `solve`. For example, we can solve the [DDE tutorial from the documentation](https://diffeq.sciml.ai/stable/tutorials/dde_example/) using the `MethodOfSteps(Tsit5())` algorithm:
```julia
using DelayDiffEq
const p0 = 0.2; const q0 = 0.3; const v0 = 1; const d0 = 5
const p1 = 0.2; const q1 = 0.3; const v1 = 1; const d1 = 1
const d2 = 1; const beta0 = 1; const beta1 = 1; const tau = 1
function bc_model(du,u,h,p,t)
du[1] = (v0/(1+beta0*(h(p, t-tau)[3]^2))) * (p0 - q0)*u[1] - d0*u[1]
du[2] = (v0/(1+beta0*(h(p, t-tau)[3]^2))) * (1 - p0 + q0)*u[1] +
(v1/(1+beta1*(h(p, t-tau)[3]^2))) * (p1 - q1)*u[2] - d1*u[2]
du[3] = (v1/(1+beta1*(h(p, t-tau)[3]^2))) * (1 - p1 + q1)*u[2] - d2*u[3]
end
lags = [tau]
h(p, t) = ones(3)
tspan = (0.0,10.0)
u0 = [1.0,1.0,1.0]
prob = DDEProblem(bc_model,u0,h,tspan,constant_lags = lags)
alg = MethodOfSteps(Tsit5())
sol = solve(prob,alg)
using Plots; plot(sol)
```
Both constant and state-dependent lags are supported. Interfacing with OrdinaryDiffEq.jl for implicit methods for stiff equations is also supported.
## Available Solvers
For the list of available solvers, please refer to the [DifferentialEquations.jl DDE Solvers page](https://diffeq.sciml.ai/stable/solvers/dde_solve/). For options for the `solve` command, see the [common solver options page](https://diffeq.sciml.ai/stable/basics/common_solver_opts/).
## Citing
If you use DelayDiffEq.jl in your work, please cite the following:
```bib
@article{DifferentialEquations.jl-2017,
author = {Rackauckas, Christopher and Nie, Qing},
doi = {10.5334/jors.151},
journal = {The Journal of Open Research Software},
keywords = {Applied Mathematics},
note = {Exported from https://app.dimensions.ai on 2019/05/05},
number = {1},
pages = {},
title = {DifferentialEquations.jl – A Performant and Feature-Rich Ecosystem for Solving Differential Equations in Julia},
url = {https://app.dimensions.ai/details/publication/pub.1085583166 and http://openresearchsoftware.metajnl.com/articles/10.5334/jors.151/galley/245/download/},
volume = {5},
year = {2017}
}
@article{widmann2022delaydiffeq,
title={DelayDiffEq: Generating Delay Differential Equation Solvers via Recursive Embedding of Ordinary Differential Equation Solvers},
author={Widmann, David and Rackauckas, Chris},
journal={arXiv preprint arXiv:2208.12879},
year={2022}
}
```
| DelayDiffEq | https://github.com/SciML/DelayDiffEq.jl.git |
|
[
"MIT"
] | 1.4.9 | acbeb83e1710ce103d505fae4431e970a2825791 | code | 763 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
module DrillHoles
using Meshes
using Unitful
using GeoTables
using DataFrames
using DataScienceTraits
using TableTransforms
using LinearAlgebra
import Tables
import Interpolations
const Continuous = DataScienceTraits.Continuous
const Categorical = DataScienceTraits.Categorical
const LinearItp = Interpolations.linear_interpolation
const LinearBC = Interpolations.Line
include("units.jl")
include("tables.jl")
include("desurvey.jl")
include("composite.jl")
export
# types
MiningTable,
Survey,
Collar,
Interval,
# functions
desurvey
end
| DrillHoles | https://github.com/JuliaEarth/DrillHoles.jl.git |
|
[
"MIT"
] | 1.4.9 | acbeb83e1710ce103d505fae4431e970a2825791 | code | 2636 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
composite(itable, len)
Composite interval table `itable` to a given length `len`.
## References
* Abzalov, M. 2018. [Applied Mining Geology](https://www.springer.com/gp/book/9783319392639)
"""
function composite(itable, len)
# initialize rows
rows = []
# process each drillhole separately
for dh in groupby(itable, :HOLEID)
# skip if hole has no data
isempty(dh) && continue
# retrieve depth columns
FROM, TO = dh.FROM, dh.TO
# number of composite intervals
N = ceil(Int, (TO[end] - FROM[begin]) / len)
# split original intervals into sub-intervals
# that fit perfectly within composite intervals
j = 1
id = Int[]
df = similar(dh, 0)
for i in 1:(N - 1)
# current interface
AT = FROM[begin] + i * len
# copy all intervals before interface
while TO[j] < AT
push!(df, dh[j, :])
push!(id, i)
j += 1
end
# make sure this is not a gap
if FROM[j] < AT
# first sub-interval at interface
push!(df, dh[j, :])
push!(id, i)
df.TO[end] = AT
# second sub-interval at interface
push!(df, dh[j, :])
push!(id, i + 1)
df.FROM[end] = AT
end
end
# last composite interval (i = N)
while j < size(dh, 1)
j += 1
push!(df, dh[j, :])
push!(id, N)
end
# composite id and interval lengths
df[!, :ID_] = id
df[!, :LEN_] = df.TO - df.FROM
# variables of interest
allcols = propertynames(df)
discard = [:FROM, :TO, :ID_, :LEN_]
allvars = setdiff(allcols, discard)
# perform aggregation
for d in groupby(df, :ID_)
row = Dict{Symbol,Any}()
row[:SOURCE] = :INTERVAL
row[:HOLEID] = dh.HOLEID[begin]
row[:FROM] = d.FROM[begin]
row[:TO] = d.TO[end]
for var in allvars
x = d[!, var]
l = d[!, :LEN_]
x̄ = aggregate(x, l)
row[var] = x̄
end
push!(rows, row)
end
end
# return table with all composites
DataFrame(rows)
end
# helper function to aggregate vectors
function aggregate(x, l)
# discard missing
m = @. !ismissing(x)
xm = x[m]
lm = l[m]
# aggregate if possible
if isempty(lm) || iszero(sum(lm))
missing
else
_aggregate(elscitype(x), xm, lm)
end
end
_aggregate(::Type{Continuous}, x, l) = (x ⋅ l) / sum(l)
_aggregate(::Type{Categorical}, x, l) = x[argmax(l)]
| DrillHoles | https://github.com/JuliaEarth/DrillHoles.jl.git |
|
[
"MIT"
] | 1.4.9 | acbeb83e1710ce103d505fae4431e970a2825791 | code | 10906 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
desurvey(collar, survey, intervals;
step=:arc, indip=:auto, outdip=:down,
inunit=u"m", outunit=inunit, len=nothing,
geom=:point, radius=1.0u"m")
Desurvey drill holes based on `collar`, `survey` and `intervals` tables.
Optionally, specify a `step` method, an input dip angle convention `indip`,
an output dip angle convention `outdip`, an input unit `inunit` and
an output unit in length units.
The option `len` can be used to composite samples to a given length, and
the option `geom` can be used to specify the geometry of each sample.
In the case of `:cylinder` geometry, the option `radius` can be used to
specify the radius of each cylinder.
## Step methods
* `:arc` - spherical arc step
* `:tan` - simple tanget step
See https://help.seequent.com/Geo/2.1/en-GB/Content/drillholes/desurveying.htm
## Dip conventions
### Input dip angle
* `:auto` - most frequent dip sign points downwards
* `:down` - positive dip points downwards
* `:up` - positive dip points upwards
### Output dip angle
* `:down` - positive dip points downwards
* `:up` - positive dip points upwards
## Output geometries
* `:cylinder` - geospatial data with cylinders
* `:point` - geospatial data with points
* `:none` - data frame with usual columns
"""
function desurvey(
collar,
survey,
intervals;
step=:arc,
indip=:auto,
outdip=:down,
inunit=u"m",
outunit=inunit,
len=nothing,
geom=:point,
radius=1.0u"m"
)
# sanity checks
@assert step ∈ [:arc, :tan] "invalid step method"
@assert indip ∈ [:auto, :down, :up] "invalid input dip convention"
@assert outdip ∈ [:down, :up] "invalid output dip convention"
@assert dimension(inunit) == u"𝐋" "invalid input unit"
@assert dimension(outunit) == u"𝐋" "invalid output unit"
# pre-process input tables
ctable, stable, itables = preprocess(collar, survey, intervals, indip, inunit)
# combine all intervals into single table and
# assign values to sub-intervals when possible
itable = interleave(itables)
# composite samples to a specified length
ltable = isnothing(len) ? itable : composite(itable, aslen(len, u"m"))
# combine composites with survey table and
# interpolate AZM and DIP angles
ftable = position(ltable, stable)
# combine samples with collar table and
# compute Cartesian coordinates X, Y and Z
result = locate(ftable, ctable, step)
# post-process output table
postprocess(result, outdip, outunit, geom, aslen(radius, u"m"))
end
function preprocess(collar, survey, intervals, indip, inunit)
withunit(x) = aslen(x, inunit)
# select relevant columns of collar table and
# standardize column names to HOLEID, X, Y, Z
ctable = let
f1 = Rename(collar.holeid => :HOLEID, collar.x => :X, collar.y => :Y, collar.z => :Z)
f2 = Select(:HOLEID, :X, :Y, :Z)
f3 = DropMissing()
f4 = Coerce(:X => Continuous, :Y => Continuous, :Z => Continuous)
f5 = Functional(:X => withunit, :Y => withunit, :Z => withunit)
DataFrame(collar.table) |> (f1 → f2 → f3 → f4 → f5)
end
# select relevant columns of survey table and
# standardize column names to HOLEID, AT, AZM, DIP
stable = let
f1 = Rename(survey.holeid => :HOLEID, survey.at => :AT, survey.azm => :AZM, survey.dip => :DIP)
f2 = Select(:HOLEID, :AT, :AZM, :DIP)
f3 = DropMissing()
f4 = Coerce(:AT => Continuous, :AZM => Continuous, :DIP => Continuous)
f5 = Functional(:AT => withunit, :AZM => asdeg, :DIP => asdeg)
DataFrame(survey.table) |> (f1 → f2 → f3 → f4 → f5)
end
# flip sign of dip angle if necessary
indip == :auto && (indip = dipguess(stable))
indip == :down && (stable.DIP *= -1)
# duplicate rows if hole id has a single row
singles = []
for hole in groupby(stable, :HOLEID)
if size(hole, 1) == 1
single = copy(hole)
single.AT .+= oneunit(eltype(single.AT))
push!(singles, single)
end
end
stable = vcat(stable, singles...)
# select all columns of interval tables and
# standardize column names to HOLEID, FROM, TO
itables = map(intervals) do interval
f1 = Rename(interval.holeid => :HOLEID, interval.from => :FROM, interval.to => :TO)
f2 = Functional(:FROM => withunit, :TO => withunit)
DataFrame(interval.table) |> (f1 → f2)
end
ctable, stable, itables
end
dipguess(stable) = sum(sign, stable.DIP) > 0 ? :down : :up
function postprocess(table, outdip, outunit, geom, radius)
# flip sign of dip angle if necessary
outdip == :down && (table.DIP *= -1)
# fix output units
fixunit(x) = uconvert(outunit, x)
fixunits = Functional(:FROM => fixunit, :TO => fixunit, :AT => fixunit, :X => fixunit, :Y => fixunit, :Z => fixunit)
# discard auxiliary SOURCE information
samples = view(table, table.SOURCE .== :INTERVAL, Not(:SOURCE))
# sort columns for clarity
samples = select(samples, sort(names(samples)))
# place actual variables at the end
cols = [:HOLEID, :FROM, :TO, :AT, :AZM, :DIP, :X, :Y, :Z]
holes = select(samples, cols, Not(cols)) |> fixunits
# return data frame if no geometry is specified
geom == :none && return holes
# initialize result
geotables = []
# process each drillhole separately
for dh in groupby(holes, :HOLEID)
# skip if hole has no data
isempty(dh) && continue
# columns with data
values = select(dh, Not(cols[2:end]))
# coordinates of centroids
coords = collect(zip(dh.X, dh.Y, dh.Z))
# centroids as points
points = Point.(coords)
# geometry elements along hole
domain = if geom == :cylinder
CylindricalTrajectory(points, radius)
else
PointSet(points)
end
push!(geotables, georef(values, domain))
end
reduce(vcat, geotables)
end
function interleave(itables)
# stack tables in order to see all variables
table = vcat(itables..., cols=:union)
# intialize rows of result table
rows = []
# process each drillhole separately
for hole in groupby(table, :HOLEID)
# save hole id for later
holeid = first(hole.HOLEID)
# find all possible depths
depths = [hole.FROM; hole.TO] |> unique |> sort
# loop over all sub-intervals
for i in 2:length(depths)
# current sub-interval
from, to = depths[i - 1], depths[i]
# intialize row with metadata
row = Dict{Symbol,Any}(:HOLEID => holeid, :FROM => from, :TO => to)
# find all intervals which contain sub-interval
samples = filter(I -> I.FROM ≤ from && to ≤ I.TO, hole, view=true)
# fill values when that is possible assuming homogeneity
props = select(samples, Not([:HOLEID, :FROM, :TO]))
for name in propertynames(props)
ind = findfirst(!ismissing, props[!, name])
val = isnothing(ind) ? missing : props[ind, name]
row[name] = val
end
# save row and continue
push!(rows, row)
end
end
# concatenate rows
DataFrame(rows)
end
function position(itable, stable)
# copy table to avoid mutation
interv = copy(itable)
# depth equals to middle of interval
interv[!, :AT] = (interv.FROM .+ interv.TO) ./ 2
# register source of data for interval table
interv[!, :SOURCE] .= :INTERVAL
# join attributes and trajectory
table = outerjoin(interv, stable, on=[:HOLEID, :AT])
# register source of data for survey table
table.SOURCE = coalesce.(table.SOURCE, :SURVEY)
# initialize drillholes
drillholes = []
# process each drillhole separately
for hole in groupby(table, :HOLEID)
dh = sort(hole, :AT)
# interpolate azm and dip angles
interpolate!(dh, :AT, :AZM)
interpolate!(dh, :AT, :DIP)
push!(drillholes, dh)
end
# concatenate all drillholes
attrib = reduce(vcat, drillholes)
# fill FROM and TO of survey table
# with AT (degenerate interval)
for row in eachrow(attrib)
ismissing(row.FROM) && (row.FROM = row.AT)
ismissing(row.TO) && (row.TO = row.AT)
end
# drop missing type from complete columns
dropmissing!(attrib, [:FROM, :TO, :AZM, :DIP])
end
# interpolate ycol from xcol assuming table is sorted
function interpolate!(table, xcol, ycol)
xs = table[!, xcol]
ys = table[!, ycol]
is = findall(!ismissing, ys)
if !isempty(is)
itp = LinearItp(xs[is], ys[is], extrapolation_bc=LinearBC())
@inbounds Threads.@threads for i in 1:length(xs)
ys[i] = itp(xs[i])
end
end
end
function locate(attrib, ctable, method)
# collar coordinates are at depth 0
ctableat = copy(ctable)
ctableat[!, :AT] .= zero(eltype(attrib.AT))
# join tables on hole id and depth
table = leftjoin(attrib, ctableat, on=[:HOLEID, :AT])
# choose a step method
step = method == :arc ? arcstep : tanstep
# initialize drillholes
drillholes = []
# process each drillhole separately
for hole in groupby(table, :HOLEID)
# sort intervals by depth
dh = sort(hole, :AT)
# view rows from survey table
survey = view(dh, dh.SOURCE .== :SURVEY, :)
# cannot interpolate a single row
size(survey, 1) > 1 || continue
# use step method to calculate coordinates on survey
at, azm, dip = survey.AT, survey.AZM, survey.DIP
x, y, z = survey.X, survey.Y, survey.Z
@inbounds for i in 2:size(survey, 1)
# compute increments dx, dy, dz
az1, dp1 = azm[i - 1], dip[i - 1]
az2, dp2 = azm[i], dip[i]
d12 = at[i] - at[i - 1]
dx, dy, dz = step(az1, dp1, az2, dp2, d12)
# add increments to x, y, z
x[i] = x[i - 1] + dx
y[i] = y[i - 1] + dy
z[i] = z[i - 1] + dz
end
# interpolate coordinates linearly on intervals
interpolate!(dh, :AT, :X)
interpolate!(dh, :AT, :Y)
interpolate!(dh, :AT, :Z)
push!(drillholes, dh)
end
# concatenate drillhole trajectories
result = reduce(vcat, drillholes)
# drop missing type from complete columns
dropmissing!(result, [:X, :Y, :Z])
end
# -------------
# STEP METHODS
# -------------
# assumes positive dip points upwards
function arcstep(az1, dp1, az2, dp2, d12)
dp1, dp2 = (90.0u"°" - dp1), (90.0u"°" - dp2)
sindp1, cosdp1 = sincos(dp1)
sindp2, cosdp2 = sincos(dp2)
sinaz1, cosaz1 = sincos(az1)
sinaz2, cosaz2 = sincos(az2)
DL = acos(cos(dp2 - dp1) - sindp1 * sindp2 * (1 - cos(az2 - az1)))
RF = DL ≈ 0.0 ? 1.0 : 2 * tan(DL / 2) / DL
dx = 0.5 * d12 * (sindp1 * sinaz1 + sindp2 * sinaz2) * RF
dy = 0.5 * d12 * (sindp1 * cosaz1 + sindp2 * cosaz2) * RF
dz = 0.5 * d12 * (cosdp1 + cosdp2) * RF
dx, dy, dz
end
# assumes positive dip points upwards
function tanstep(az1, dp1, az2, dp2, d12)
dp1 = (90.0u"°" - dp1)
sindp1, cosdp1 = sincos(dp1)
sinaz1, cosaz1 = sincos(az1)
dx = d12 * sindp1 * sinaz1
dy = d12 * sindp1 * cosaz1
dz = d12 * cosdp1
dx, dy, dz
end
| DrillHoles | https://github.com/JuliaEarth/DrillHoles.jl.git |
|
[
"MIT"
] | 1.4.9 | acbeb83e1710ce103d505fae4431e970a2825791 | code | 6555 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
MiningTable
A table from the mining industry (e.g. survey, collar, interval).
"""
abstract type MiningTable end
"""
required(table)
Return the required columns of mining `table`.
"""
function required end
"""
selection(table)
Return the subtable of mining `table` with required columns.
"""
selection(t::MiningTable) = t.table |> Select(required(t))
# -----------------
# TABLES INTERFACE
# -----------------
Tables.istable(::Type{<:MiningTable}) = true
Tables.rowaccess(::Type{<:MiningTable}) = true
Tables.columnaccess(::Type{<:MiningTable}) = true
Tables.rows(t::MiningTable) = Tables.rows(selection(t))
Tables.columns(t::MiningTable) = Tables.columns(selection(t))
Tables.columnnames(t::MiningTable) = Tables.columnnames(selection(t))
# -----------
# IO METHODS
# -----------
Base.show(io::IO, mime::MIME"text/plain", t::MiningTable) = _show(io, mime, t)
Base.show(io::IO, mime::MIME"text/html", t::MiningTable) = _show(io, mime, t)
_show(io, mime, t) = show(io, mime, selection(t))
# ----------------
# IMPLEMENTATIONS
# ----------------
"""
Collar(table; [holeid], [x], [y], [z])
The collar `table` stores the `x`, `y`, `z` coordinates
(usually in meters) of the head of the drill holes with
specified `holeid`.
Common column names are searched in the `table` when keyword
arguments are ommitted.
## Examples
```julia
Collar(table, holeid="BHID", x="EASTING", y="NORTHING")
Collar(table, x="XCOLLAR", y="YCOLLAR", z="ZCOLLAR")
```
See also [`Survey`](@ref), [`Interval`](@ref).
"""
struct Collar{𝒯} <: MiningTable
table::𝒯
holeid::Symbol
x::Symbol
y::Symbol
z::Symbol
function Collar{𝒯}(table, holeid, x, y, z) where {𝒯}
assertspec(table, [holeid, x, y, z])
assertreal(table, [x, y, z])
new(table, holeid, x, y, z)
end
end
Collar(table; holeid=defaultid(table), x=defaultx(table), y=defaulty(table), z=defaultz(table)) =
Collar{typeof(table)}(table, Symbol(holeid), Symbol(x), Symbol(y), Symbol(z))
required(table::Collar) = (table.holeid, table.x, table.y, table.z)
"""
Survey(table; [holeid], [at], [azm], [dip])
The survey `table` stores the `azm` and `dip` angles
(usually in degrees) `at` each depth (usually in meters)
along drill holes with specified `holeid`.
Common column names are searched in the `table` when keyword
arguments are ommitted.
## Examples
```julia
Survey(table, holeid="BHID", at="DEPTH")
Survey(table, azm="AZIMUTH")
```
See also [`Collar`](@ref), [`Interval`](@ref).
"""
struct Survey{𝒯} <: MiningTable
table::𝒯
holeid::Symbol
at::Symbol
azm::Symbol
dip::Symbol
function Survey{𝒯}(table, holeid, at, azm, dip) where {𝒯}
assertspec(table, [holeid, at, azm, dip])
assertreal(table, [at, azm, dip])
new(table, holeid, at, azm, dip)
end
end
Survey(table; holeid=defaultid(table), at=defaultat(table), azm=defaultazm(table), dip=defaultdip(table)) =
Survey{typeof(table)}(table, Symbol(holeid), Symbol(at), Symbol(azm), Symbol(dip))
required(table::Survey) = (table.holeid, table.at, table.azm, table.dip)
"""
Interval(table; [holeid], [from], [to])
The interval `table` stores the interval `from` a given
depth `to` another greater depth (usually in meters), along
drill holes with specified `holeid`. Besides the intervals,
the `table` stores measurements of variables such as grades,
mineralization domains, geological interpretations, etc.
Common column names are searched in the `table` when keyword
arguments are ommitted.
## Examples
```julia
Interval(table, holeid="BHID", from="START", to="FINISH")
Interval(table, from="BEGIN", to="END")
```
See also [`Collar`](@ref), [`Survey`](@ref).
"""
struct Interval{𝒯} <: MiningTable
table::𝒯
holeid::Symbol
from::Symbol
to::Symbol
function Interval{𝒯}(table, holeid, from, to) where {𝒯}
assertspec(table, [holeid, from, to])
assertreal(table, [from, to])
new(table, holeid, from, to)
end
end
Interval(table; holeid=defaultid(table), from=defaultfrom(table), to=defaultto(table)) =
Interval{typeof(table)}(table, Symbol(holeid), Symbol(from), Symbol(to))
required(table::Interval) = (table.holeid, table.from, table.to)
function selection(t::Interval)
all = Tables.columnnames(t.table)
req = collect(required(t))
not = setdiff(all, req)
t.table |> Select([req; not])
end
# ---------
# DEFAULTS
# ---------
# helper function to find default column names
# from a list of candidate names
function default(table, names, kwarg)
cols = Tables.columns(table)
available = Tables.columnnames(cols)
augmented = augment(names)
for name in augmented
if name ∈ available
return name
end
end
ag = join(augmented, ", ", " and ")
av = join(available, ", ", " and ")
throw(ArgumentError("""\n
None of the column names $ag was found in table.
Please specify $kwarg=... explicitly.
Available names: $av.
"""))
end
defaultid(table) = default(table, [:holeid, :bhid], :holeid)
defaultx(table) = default(table, [:x, :xcollar, :easting], :x)
defaulty(table) = default(table, [:y, :ycollar, :northing], :y)
defaultz(table) = default(table, [:z, :zcollar, :elevation], :z)
defaultazm(table) = default(table, [:azimuth, :azm, :brg], :azm)
defaultdip(table) = default(table, [:dip], :dip)
defaultat(table) = default(table, [:at, :depth], :at)
defaultfrom(table) = default(table, [:from], :from)
defaultto(table) = default(table, [:to], :to)
function augment(names)
snames = string.(names)
anames = [snames; uppercasefirst.(snames); uppercase.(snames)]
Symbol.(unique(anames))
end
# -----------
# ASSERTIONS
# -----------
function assertspec(table, names)
cols = Tables.columns(table)
avail = Tables.columnnames(cols)
if !(names ⊆ avail)
wrong = join(setdiff(names, avail), ", ", " and ")
throw(ArgumentError("None of the names $wrong was found in the table."))
end
end
function assertreal(table, names)
cols = Tables.columns(table)
for name in names
x = Tables.getcolumn(cols, name)
T = eltype(x)
if !(T <: Union{Real,Missing})
throw(ArgumentError("""\n
Column $name should contain real values,
but it currently has values of type $T.
Please fix the type before trying to load
the data into the mining table.
"""))
end
end
end
| DrillHoles | https://github.com/JuliaEarth/DrillHoles.jl.git |
|
[
"MIT"
] | 1.4.9 | acbeb83e1710ce103d505fae4431e970a2825791 | code | 366 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
const Len{T} = Quantity{T,u"𝐋"}
const Deg{T} = Quantity{T,NoDims,typeof(u"°")}
aslen(x, u) = x * u
aslen(x::Len, _) = x
asdeg(x) = x * u"°"
asdeg(x::Deg) = x
| DrillHoles | https://github.com/JuliaEarth/DrillHoles.jl.git |
|
[
"MIT"
] | 1.4.9 | acbeb83e1710ce103d505fae4431e970a2825791 | code | 7485 | using DrillHoles
using DataFrames
using Unitful
using Meshes
using Test
@testset "DrillHoles.jl" begin
collar = Collar(DataFrame(HOLEID=1:2, X=1:2, Y=1:2, Z=1:2))
survey = Survey(DataFrame(HOLEID=[1, 1, 2, 2], AT=[0, 5, 0, 5], AZM=[0, 1, 20, 21], DIP=[89, 88, 77, 76]))
assays = Interval(DataFrame(HOLEID=[1, 1, 2], FROM=[1, 3.5, 0], TO=[3.5, 8, 7], A=[1, 2, 3]))
lithos = Interval(DataFrame(HOLEID=[1, 2, 2], FROM=[0, 0, 4.4], TO=[8, 4.4, 8], L=["A", "B", "C"]))
dh = desurvey(collar, survey, [assays, lithos], geom=:none)
@test dh.HOLEID == [1, 1, 1, 2, 2, 2]
@test dh.FROM ≈ [0.0, 1.0, 3.5, 0.0, 4.4, 7.0] * u"m"
@test dh.TO ≈ [1.0, 3.5, 8.0, 4.4, 7.0, 8.0] * u"m"
@test dh.AT ≈ [0.5, 2.25, 5.75, 2.2, 5.7, 7.5] * u"m"
@test dh.AZM ≈ [0.1, 0.45, 1.15, 20.44, 21.14, 21.5] * u"°"
@test dh.DIP ≈ [88.9, 88.55, 87.85, 76.56, 75.86, 75.5] * u"°"
@test isapprox(
dh.X,
[
1.000152273918042,
1.0006852326311886,
1.001751150057482,
2.180003148116409,
2.466371792847059,
2.6136470958513938
] * u"m",
atol=1e-5u"m"
)
@test isapprox(
dh.Y,
[
1.0130869793585429,
1.058891407113443,
1.1505002626232426,
2.4809751054874134,
3.2461627733082983,
3.6396878596161817
] * u"m",
atol=1e-5u"m"
)
@test isapprox(
dh.Z,
[
0.5001776737812189,
-1.2492004679845148,
-4.747956751515982,
-0.1391896286156471,
-3.5424458559587215,
-5.292691915735159
] * u"m",
atol=1e-5u"m"
)
@test isequal(dh.A, [missing, 1, 2, 3, 3, missing])
@test isequal(dh.L, ["A", "A", "A", "B", "C", "C"])
# changing step method only changes coordinates X, Y, Z
dh2 = desurvey(collar, survey, [assays, lithos], step=:tan, geom=:none)
@test isequal(dh[!, Not([:X, :Y, :Z])], dh2[!, Not([:X, :Y, :Z])])
@test isapprox(
dh2.X,
[1.0, 1.0, 1.0, 2.169263142065488, 2.4385454135333093, 2.5770334388596177] * u"m",
atol=1e-5u"m"
)
@test isapprox(
dh2.Y,
[
1.0087262032186417,
1.039267914483888,
1.10035133701438,
2.4650466607708674,
3.204893621088157,
3.5853863435370488
] * u"m",
atol=1e-5u"m"
)
@test isapprox(
dh2.Z,
[
0.5000761524218044,
-1.2496573141018803,
-4.74912424714925,
-0.1436141425275177,
-3.553909369275841,
-5.307775485889264
] * u"m",
atol=1e-5u"m"
)
# input dip option
dh = desurvey(collar, survey, [assays, lithos], indip=:down, geom=:none)
@test dh.DIP ≈ [88.9, 88.55, 87.85, 76.56, 75.86, 75.5] * u"°"
dh = desurvey(collar, survey, [assays, lithos], indip=:up, geom=:none)
@test dh.DIP ≈ [-88.9, -88.55, -87.85, -76.56, -75.86, -75.5] * u"°"
# output dip option
dh = desurvey(collar, survey, [assays, lithos], outdip=:down, geom=:none)
@test dh.DIP ≈ [88.9, 88.55, 87.85, 76.56, 75.86, 75.5] * u"°"
dh = desurvey(collar, survey, [assays, lithos], outdip=:up, geom=:none)
@test dh.DIP ≈ [-88.9, -88.55, -87.85, -76.56, -75.86, -75.5] * u"°"
# input unit option
dh = desurvey(collar, survey, [assays, lithos], inunit=u"ft", geom=:none)
@test unit(eltype(dh.FROM)) == u"ft"
@test unit(eltype(dh.TO)) == u"ft"
@test unit(eltype(dh.AT)) == u"ft"
@test unit(eltype(dh.X)) == u"ft"
@test unit(eltype(dh.Y)) == u"ft"
@test unit(eltype(dh.Z)) == u"ft"
# output unit option
dh = desurvey(collar, survey, [assays, lithos], inunit=u"ft", outunit=u"m", geom=:none)
@test unit(eltype(dh.FROM)) == u"m"
@test unit(eltype(dh.TO)) == u"m"
@test unit(eltype(dh.AT)) == u"m"
@test unit(eltype(dh.X)) == u"m"
@test unit(eltype(dh.Y)) == u"m"
@test unit(eltype(dh.Z)) == u"m"
# len option
dh1 = desurvey(collar, survey, [assays, lithos], len=1.0, geom=:none)
dh2 = desurvey(collar, survey, [assays, lithos], len=1.0u"m", geom=:none)
@test isequal(dh1, dh2)
# radius option
dh1 = desurvey(collar, survey, [assays, lithos], radius=1.0, geom=:none)
dh2 = desurvey(collar, survey, [assays, lithos], radius=1.0u"m", geom=:none)
@test isequal(dh1, dh2)
# geom option
dh = desurvey(collar, survey, [assays, lithos], geom=:cylinder)
@test eltype(dh.geometry) <: Cylinder
# point geometries by default
dh = desurvey(collar, survey, [assays, lithos])
@test eltype(dh.geometry) <: Point
# guess column names
collar = Collar(DataFrame(holeid=1:2, XCOLLAR=1:2, Y=1:2, z=1:2))
@test collar.holeid == :holeid
@test collar.x == :XCOLLAR
@test collar.y == :Y
@test collar.z == :z
survey = Survey(DataFrame(HOLEID=[1, 1, 2, 2], at=[0, 5, 0, 5], BRG=[0, 1, 20, 21], DIP=[89, 88, 77, 76]))
@test survey.holeid == :HOLEID
@test survey.at == :at
@test survey.azm == :BRG
@test survey.dip == :DIP
assays = Interval(DataFrame(holeid=[1, 1, 2], FROM=[1, 3.5, 0], to=[3.5, 8, 7], A=[1, 2, 3]))
@test assays.holeid == :holeid
@test assays.from == :FROM
@test assays.to == :to
# result has standard column names
dh = desurvey(collar, survey, [assays], geom=:none)
@test propertynames(dh) == [:HOLEID, :FROM, :TO, :AT, :AZM, :DIP, :X, :Y, :Z, :A]
# custom column names
cdf = DataFrame(HoleId=1:2, XCollar=1:2, YCollar=1:2, ZCollar=1:2)
collar = Collar(cdf, holeid=:HoleId, x="XCollar", y=:YCollar, z="ZCollar")
@test collar.holeid == :HoleId
@test collar.x == :XCollar
@test collar.y == :YCollar
@test collar.z == :ZCollar
sdf = DataFrame(HoleId=[1, 1, 2, 2], At=[0, 5, 0, 5], Azimuth=[0, 1, 20, 21], Dip=[89, 88, 77, 76])
survey = Survey(sdf, holeid=:HoleId, at="At", azm=:Azimuth, dip="Dip")
@test survey.holeid == :HoleId
@test survey.at == :At
@test survey.azm == :Azimuth
@test survey.dip == :Dip
idf = DataFrame(HoleId=[1, 1, 2], From=[1, 3.5, 0], To=[3.5, 8, 7])
assays = Interval(idf, holeid=:HoleId, from="From", to=:To)
@test assays.holeid == :HoleId
@test assays.from == :From
@test assays.to == :To
# Tables.jl interface
collar = Collar(DataFrame(holeid=1:2, XCOLLAR=1:2, Y=1:2, z=1:2, w=1:2))
@test Tables.istable(collar)
@test Tables.rowaccess(collar) == true
@test Tables.columnaccess(collar) == true
@test Tables.columnnames(collar) == [:holeid, :XCOLLAR, :Y, :z]
result = DataFrame(holeid=1:2, XCOLLAR=1:2, Y=1:2, z=1:2)
@test DataFrame(Tables.rows(collar)) == result
@test DataFrame(Tables.columns(collar)) == result
@test DataFrame(collar) == result
survey =
Survey(DataFrame(HOLEID=[1, 1, 2, 2], at=[0, 5, 0, 5], BRG=[0, 1, 20, 21], DIP=[89, 88, 77, 76], BAR=[1, 2, 3, 4]))
@test Tables.istable(survey)
@test Tables.rowaccess(survey) == true
@test Tables.columnaccess(survey) == true
@test Tables.columnnames(survey) == [:HOLEID, :at, :BRG, :DIP]
result = DataFrame(HOLEID=[1, 1, 2, 2], at=[0, 5, 0, 5], BRG=[0, 1, 20, 21], DIP=[89, 88, 77, 76])
@test DataFrame(Tables.rows(survey)) == result
@test DataFrame(Tables.columns(survey)) == result
@test DataFrame(survey) == result
assays = Interval(DataFrame(foo=[1, 2, 3], holeid=[1, 1, 2], FROM=[1, 3.5, 0], to=[3.5, 8, 7], A=[1, 2, 3]))
@test Tables.istable(assays)
@test Tables.rowaccess(assays) == true
@test Tables.columnaccess(assays) == true
@test Tables.columnnames(assays) == [:holeid, :FROM, :to, :foo, :A]
result = DataFrame(holeid=[1, 1, 2], FROM=[1, 3.5, 0], to=[3.5, 8, 7], foo=[1, 2, 3], A=[1, 2, 3])
@test DataFrame(Tables.rows(assays)) == result
@test DataFrame(Tables.columns(assays)) == result
@test DataFrame(assays) == result
end
| DrillHoles | https://github.com/JuliaEarth/DrillHoles.jl.git |
|
[
"MIT"
] | 1.4.9 | acbeb83e1710ce103d505fae4431e970a2825791 | docs | 2599 | # DrillHoles.jl
[![Build Status][build-img]][build-url] [![Coverage][codecov-img]][codecov-url]
Desurvey and composite drill hole tables from the mining industry.
## Installation
Get the latest stable release with Julia's package manager:
```
] add DrillHoles
```
## Usage
Given a *collar table*, a *survey table* and at least one *interval
table* (such as assay and lithology), the function `desurvey` can
be used for desurveying and compositing. Examples of these tables
are shown bellow:

- *Collar table*: stores the coordinates (X, Y, Z) of each drill
hole with given ID (HOLEID).
- *Survey table*: stores the arc length (AT) and azimuth (AZM) and
dip (DIP) angles along the drill hole trajectory. Together with the
collar table it fully specifies the trajectory.
- *Interval table*: stores the actual measurements taken on cylinders
of rock defined by an interval of arc lenghts (FROM and TO). Usually,
there are multiple interval tables with different types of measurements.
Assuming that each of these tables was loaded into a
[Tables.jl](https://github.com/JuliaData/Tables.jl) table
(e.g. CSV.File, DataFrame), we can use the following constructors
to automatically detect the columns:
```julia
using DrillHoles
using CSV
collar = Collar(CSV.File("collar.csv"))
survey = Survey(CSV.File("survey.csv"))
assay = Interval(CSV.File("assay.csv"))
litho = Interval(CSV.File("litho.csv"))
```
If the columns of the tables follow an exotic naming convention,
users can manually specify the names with keyword arguments:
```julia
# manually specify column with hole ID
Collar(CSV.File("collar.csv"), holeid = "MyHoleID")
```
Please check the documentation of `Collar`, `Survey` and `Interval`
for more details.
By default, the `desurvey` function returns a `GeoTable` compatible with
the [GeoStats.jl](https://github.com/JuliaEarth/GeoStats.jl) framework.
It supports different dip angle conventions from open pit and underground
mining as well as different stepping methods:
```julia
samples = desurvey(collar, survey, [assay, litho])
```
The option `geom` can be used to control the output format, and the option
`len` can be used for compositing. Please check the documentation for more
details.
[build-img]: https://img.shields.io/github/actions/workflow/status/JuliaEarth/DrillHoles.jl/CI.yml?branch=master&style=flat-square
[build-url]: https://github.com/JuliaEarth/DrillHoles.jl/actions
[codecov-img]: https://codecov.io/gh/JuliaEarth/DrillHoles.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/JuliaEarth/DrillHoles.jl
| DrillHoles | https://github.com/JuliaEarth/DrillHoles.jl.git |
|
[
"MIT"
] | 0.1.5 | 5eab5deb315ab62dd45d01e19f79a2b5d98b8839 | code | 998 | using GrayCoding
using Documenter
DocMeta.setdocmeta!(GrayCoding, :DocTestSetup, :(using GrayCoding); recursive=true)
push!(LOAD_PATH,"../src/")
makedocs(;
modules=[GrayCoding],
authors="Nivedita Rethnakar et al.",
repo="https://github.com/nivupai/GrayCoding.jl/blob/{commit}{path}#{line}",
sitename="GrayCoding.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://nivupai.github.io/GrayCoding.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Algebra of Gray Codes" => "algebra.md",
"Applications" => [
"List of Applications" => "applications.md",
"Quantum Algorithms and Circuits" => "quantum.md",
"QAM modulation in WiFi communications" => "wireless.md",
"DNA Coding" => "dna.md"
],
"Tutorials" => "tutorials.md",
],
)
deploydocs(;
repo="github.com/nivupai/GrayCoding.jl",
devbranch="main",
)
| GrayCoding | https://github.com/nivupai/GrayCoding.jl.git |
|
[
"MIT"
] | 0.1.5 | 5eab5deb315ab62dd45d01e19f79a2b5d98b8839 | code | 19344 | module GrayCoding
using LinearAlgebra
using Gadfly
# Write your package code here.
"""
Generate Encoding and Decoding matrices for Gray Codes of alphabet.
```julia-repl
julia> G,B,g,b=GrayMatrix(4, 2);
julia> G
4×4 Matrix{Int64}:
1 0 0 0
1 1 0 0
1 1 1 0
1 1 1 1
julia> B
4×4 Matrix{Int64}:
1 0 0 0
1 1 0 0
0 1 1 0
0 0 1 1
julia> g
4×16 Matrix{Int64}:
0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1
0 0 0 0 1 1 1 1 0 0 0 0 1 1 1 1
0 0 1 1 0 0 1 1 0 0 1 1 0 0 1 1
0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1
julia> b
4×16 Matrix{Int64}:
0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1
0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0
0 0 1 1 1 1 0 0 0 0 1 1 1 1 0 0
0 1 1 0 0 1 1 0 0 1 1 0 0 1 1 0
```
"""
function GrayMatrix(n::Int64=3, q::Int64=2)
M = q^n
G = Array(Bidiagonal(ones(1, n)[:], (q - 1) * ones(1, n - 1)[:], :L))
B = mod.(inv(G), q)
G = convert.(Int64, G)
B = convert.(Int64, B)
x = 0:M-1
a = digits.(x, base = q, pad = n) # left-lsb
# a=digits.(x,base=2,pad=num_bits)|> reverse # left-msb
b = hcat(a...)
b = reverse(b, dims = 1)
g = Int.(mod.(G * b, q))
return B, G, b, g
end
"""
Plots a matrix into a 2D with labels. Optional arguments including colors
```julia-repl
julia> using Random;
julia> A= rand(0:9,10,10);
julia> matrixplot(A)
```
"""
function matrixplot(A;kwargs...)
a,b=size(A)
X = transpose(repeat(1:b, 1, a))[:]
Y = repeat(a:-1:1, b)[:]
Gadfly.plot(x = X, y = Y, Geom.rectbin(), color = A,alpha=[0.5], Coord.cartesian(fixed = true), Theme(bar_spacing = 0.1cm), Geom.label(position = :centered), label = string.(A)[:], Theme(key_position = :none, grid_line_width = 0pt, panel_stroke = nothing), Guide.xticks(ticks = nothing, label = false), Guide.yticks(ticks = nothing, label = false), Guide.xlabel(nothing), Guide.ylabel(nothing);kwargs...)
end
"""
Plot the DNA codon matrix
```julia-repl
julia> dnamatrix()
```
"""
function dnamatrix()
U = [(x,y) for x ∈ 0:7, y ∈ 0:7]
B,G,b,g=GrayMatrix(3,2)
V = [string(g[:,y+1]...)string(g[:,x+1]...) for x ∈ 0:7, y ∈ 0:7]
revV = [string(g[:,y+1]...)string(g[:,x+1]...) for x ∈ 0:7, y ∈ 0:7]
Vx = [string(g[:,y+1]...) for x ∈ 0:7, y ∈ 0:7]
Vy = [string(g[:,x+1]...) for x ∈ 0:7, y ∈ 0:7]
VM=[(Vx[i,j][1]Vy[i,j][1]Vx[i,j][2]Vy[i,j][2]Vx[i,j][3]Vy[i,j][3]) for i ∈ 1:8, j ∈ 1:8]
DNA=parse.(Int,VM,base=2)
# DM0=[replace(Vx[i,j][1]Vy[i,j][1],"00"=>"C","01"=>"A","10"=>"U","11"=>"G")replace(Vx[i,j][2]Vy[i,j][2],"00"=>"C","01"=>"A","10"=>"U","11"=>"G")replace(Vx[i,j][3]Vy[i,j][3],"00"=>"C","01"=>"A","10"=>"U","11"=>"G") for i ∈ 1:8, j ∈ 1:8]
DM=[replace(Vx[i,j][1]Vy[i,j][1],"00"=>"C","01"=>"A","10"=>"U","11"=>"G")replace(Vx[i,j][2]Vy[i,j][2],"00"=>"C","01"=>"A","10"=>"U","11"=>"G")replace(Vx[i,j][3]Vy[i,j][3],"00"=>"C","01"=>"A","10"=>"U","11"=>"G") for j ∈ 1:8, i ∈ 1:8]
AA=copy(DM) # Amino Acid
replace!(AA,"CUU"=>"Leucine","CUC"=>"Leucine","CUA"=>"Leucine","CUG"=>"Leucine","UUA"=>"Leucine","UUG"=>"Leucine","UUU"=>"Phenylalanine","UUC"=>"Phenylalanine","AUC"=>"Isoleucine","AUA"=>"Isoleucine","AUU"=>"Isoleucine","AUG"=>"Methionine","GUA"=>"Valine","GUC"=>"Valine","GUU"=>"Valine","GUG"=>"START","UCA"=>"Serine","UCC"=>"Serine","UCU"=>"Serine","UCG"=>"Serine","CCC"=>"Proline","CCA"=>"Proline","CCU"=>"Proline","CCG"=>"Proline","ACU"=>"Threonine","ACA"=>"Threonine","ACC"=>"Threonine","ACG"=>"Threonine","GCC"=>"Alanine","GCU"=>"Alanine","GCA"=>"Alanine","GCG"=>"Alanine","GGU"=>"Glycine","GGA"=>"Glycine","GGC"=>"Glycine","GGG"=>"Glycine","CGA"=>"Arginine","CGC"=>"Arginine","CGU"=>"Arginine","CGG"=>"Arginine","GAU"=>"Aspartic acid","GAC"=>"Aspartic acid","GAA"=>"Glutamic acid","GAG"=>"Glutamic acid","AAU"=>"Asparagine","AAC"=>"Asparagine","UGU"=>"Cysteine","UGC"=>"Cysteine","UGG"=>"Tryptophan","CAA"=>"Glutamine","CAG"=>"Glutamine","UAA"=>"STOP","UAG"=>"STOP","UAU"=>"Tyrosine","UAC"=>"Tyrosine","AAA"=>"Lysine","AAG"=>"Lysine","CAC"=>"Histidine","CAU"=>"Histidine","AGG"=>"Arginine","AGA"=>"Arginine","AGU"=>"Serine","AGC"=>"Serine","UGA"=>"STOP" )
return DM,VM,AA,Vx,Vy,DNA
end
"""
Decimal to binary conversion
```julia-repl
julia> dec2bin(10,7)
```
"""
function dec2bin(x,n)
a=digits.(x,base=2,pad=n) # left-lsb
# a=digits.(x,base=2,pad=num_bits)|> reverse # left-msb
b=hcat(a...);
return b
end
"""
Binary to decimal number conversion. Input can be
- binary strings,
- binary digits or
- a vector of binary string or digits
```julia-repl
julia> bin2dec([011,"0111",0111])
```
"""
bin2dec(u) =[parse(Int,string(u[:,ii]...),base=2) for ii in 1:size(u,2)]
bin2dec(x::AbstractVector) = parse(Int,string(x...),base=2)
bin2dec(x::Int64) = parse(Int,string(x),base=2)
bin2dec(x::AbstractString) = parse(Int,x,base=2)
"""
Pulse amplitude modulation (PAM) mapping. This is a type of digital modulation mapping used in Communication systems.
"""
function pam_encode(x,M)
# M --> M-QAM
n=Int(ceil(log2(M)/2))
B,G,b,g=find_gray_matrix(n)
u=digits(x,base=2,pad=n) |> reverse
v=Int.(mod.(G*u,2))
w=bin2dec(v)
y=-sqrt(M)+1+2*w
return y
end
"""
Generate Gray vectors
"""
function gen_gray(m)
x=[0 1]
for i in 2:m
rev_x=reverse(x)
x=[x rev_x.+2^(i-1)]
end
return x[:]
end
"""
Decimal to binary conversion
"""
function dec2bin(x,n)
a=digits.(x,base=2,pad=n) # left-lsb
# a=digits.(x,base=2,pad=num_bits)|> reverse # left-msb
b=hcat(a...);
return b
end
"""
Recursive function to illustrate the reflection+shift property of Gray mapping.
### Arguents
* n - The iteration number `n ≥ 0`
* C - The decimal sequence of the gray mapped bits
* R - The reflected sequence (without shifting)
```julia-repl
julia> C,R = gray_recursion(4)
```
"""
function gray_recursion(n::Int)
C= n < 1 ? [0] : vcat(gray_recursion(n - 1)[1:end], gray_recursion(n - 1)[end:-1:1] .+ Int(exp2(n-1)) )
return C
end
"""
Reflected code.
```julia-repl
julia>reflect_code(3)
[0,1,3,2,2,3,1,0]
```
"""
function reflect_code(n)
n<1 ? [0] : vcat(gray_recursion(n-1),reverse(gray_recursion(n-1)))
end
"""
Recursive construction of binary Gray code digits.
Gray code ``g[n]`` can be recursively constructed as follows.
Start with `` g[1] = (0,1) = (g_{1},g_{2})``
```math
g[n+1] = 0g_{1},...,0g_{N−1},0g_{N},1g_{N},1g_{N−1},...,1g_{1}.
```
### Examples
```julia-repl
julia> gray(3)
3×8 Matrix{Int64}:
0 0 0 0 1 1 1 1
0 0 1 1 0 1 1 0
0 1 1 0 1 1 0 0
```
"""
function gray(n)
n < 2 ? [0 1] : hcat(vcat(0,gray(n-1)),vcat(1,reverse(gray(n-1))))
end
"""
Find the Givens embedding ``{}^{i}G_{j,k}(A)``
"""
function G(A,i,j,k)
n = size(A,2)
G = Matrix(1.0I(n).+0im)
α = A[k,i]
β = A[j,i]
Γ0 = [ α' β'
-β α ]
N = norm(Γ0[1,:],2);
Γ = Γ0./N
# Embed the Givens matrix Γ in G
G[k,k] = Γ[1,1]
G[k,j] = Γ[1,2]
G[j,k] = Γ[2,1]
G[j,j] = Γ[2,2]
return G,G*A
end
"""
Find the matrix which has a Givens matrix embedding ``{}^{i}G_{j,k}(A)``.
For a given matrix ``A``, a generic rotation matrix ``{}^{i}G_{j,k}(A)`` is generated. The matrix ``{}^{i}G_{j,k}(A)`` is such that it helps to selectively nullifies an element of matrix ``V={}^{i}G_{j,k}(A) A``. That is, ``V_{j,i}=0`` where ``V={}^{i}G_{j,k}(A) A``. The fllowing Givens rotation matrix ``{}^{i}\\Gamma_{j,k}``
```math
\\begin{aligned}
{}^{i}\\Gamma_{j,k} &= \\begin{pmatrix} {}^{i}g_{k,k} & {}^{i}g_{k,j} \\\\ {}^{i}g_{j,k} & {}^{i}g_{j,j}\\end{pmatrix} \\\\
&=\\frac{1}{\\sqrt{\\lvert a_{j,i} \\rvert^{2}+ \\lvert a_{k,i} \\rvert^{2}}}\\begin{pmatrix} a_{k,i}^{*} & a_{j,i}^{*} \\\\ -a_{j,i} & {}^{i}a_{k,i}\\end{pmatrix}.
\\end{aligned}
```
is embedded in an identity matrix ``I(n)`` to produce,
```math
{}^{i}G_{j,k}(A) = \\begin{pmatrix}
1 & 0 & \\ldots & \\ldots & \\ldots & \\ldots & \\ldots & \\ldots & 0 \\\\
0 & 1 & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\vdots \\\\
\\vdots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\vdots \\\\
0 & 0 & \\ddots & {\\color{red} {}^{i}g_{k,k}} & \\ddots & {\\color{red} {}^{i}g_{k,j}} & \\ddots & \\ddots & \\vdots \\\\
\\vdots & \\ddots & \\ddots & \\ddots & \\ddots \\ddots & & \\ddots & \\ddots & \\vdots \\\\
0 & 0 & \\ddots & {\\color{red} {}^{i}g_{j,k}} & \\ddots & {\\color{red}
{}^{i}g_{j,j}} & \\ddots & \\ddots & \\vdots \\\\
\\vdots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\vdots \\\\
0 & 0 & \\ldots & \\ldots & \\ldots & \\ldots & \\ldots & 1 & 0 \\\\
0 & 0 & \\ldots & \\ldots & \\ldots & \\ldots & \\ldots & \\ldots & 1
\\end{pmatrix}.
```
Essentially, ``{}^{i}G_{j,k}(A)`` is a modified identity matrix such that four non trivial elements are taken from the givens rotation matrix ``{}^{i}\\Gamma_{j,k}``.
"""
function GivensG(A,i,j,k)
n = size(A,2)
G = Matrix(1.0I(n).+0im)
α = A[k,i]
β = A[j,i]
Γ0 = [ α' β'
-β α ]
N = norm(Γ0[1,:],2);
Γ = Γ0./N
# Embed the Givens matrix Γ in G
G[k,k] = Γ[1,1]
G[k,j] = Γ[1,2]
G[j,k] = Γ[2,1]
G[j,j] = Γ[2,2]
return G,G*A
end
"""
Given unitary matrux ``U(n) \\in S(2^n)``, it uses a a repeated Givens rotations to get a to level matrix as follows.
```math
\\prod_{j=1}^{n-2}{ {}^{1}G_{n-j,n-j-1} U(n)} = \\begin{pmatrix} 1 & 0 \\\\ 0 & U(n-1)\\end{pmatrix}
```
### Parameters
* U -- Input. Unitary matrix of size ``2^n``
* V -- Output unitary matrix in two level form `[1 0;0 U']` form where `U'` is a unitary matrix of size ``2^{n-1}``.
* GG -- The ``n-2`` sequence of Given matrices (in augmented form) ``[{}^{1}G_{n}\\lvert{}^{1}G_{n-1}\\lvert\\ldots\\lvert{}^{1}G_{2}]``
### Examples
```julia-repl
julia> level2unitary(U)
```
"""
function level2unitary(U)
n=size(U,2)
V=I(n)*U;
GG=Matrix(I(n))
for i=1:1
for j=0:n-1-i
G0,V=GivensG(V,i,n-j,n-j-1)
GG=[GG;G0]
end
end
return GG[n+1:end,:],V
end
"""
Decomposition of aribtrary unitary matrix ``U(n) \\in S(2^n)``, as a cascade of two level Givens matrices.
Using the following property,
```math
\\prod_{j=0}^{n-1}{ {}^{1}G_{n-j,n-j-1} U(n)} = \\begin{pmatrix} 1 & 0 \\\\ 0 & U(n-1)\\end{pmatrix}
```
```math
\\prod_{i=1}^{n-1} \\prod_{j=0}^{n-1-i}{ {}^{i}G_{n-j,n-j-1} U(n)} = \\begin{pmatrix} 1 & \\ldots & 0 \\\\ \\vdots & \\ddots & \\vdots \\\\ 0 & \\ldots & \\det(U)\\end{pmatrix}.
```
There are ``2^{n-1}(2^{n}-1)`` number of unitary two-level matrices (each of them formed by embedding a Givens rotaton matrix into indentity matrix). Note that ``\\sum_{i=1}^{2^{n}}{N-i}=2^{n-1}(2^{n}-1)``.
### Parameters
* U -- Input. Unitary matrix of size ``2^n``
* V -- Output unitary matrix in two level form `[1 0;0 U']` form where `U'` is a unitary matrix of size ``2^{n-1}``.
* Gm -- The ``(n-2)(n-1)`` sequence of Given matrices (in augmented form) ``[{}^{1}G_{n}\\lvert{}^{1}G_{n-1}\\lvert\\ldots\\lvert{}^{1}G_{2}\\lvert {}^{2}G_{n}\\lvert{}^{2}G_{n-1}\\lvert\\ldots\\lvert{}^{2}G_{3}\\lvert\\ldots\\lvert{}^{n-2}G_{n}\\lvert{}^{n-2}G_{n-1}\\lvert{}^{n-1}G_{n}]``
* Gs -- The ``(n-2)(n-1)`` sequence of Given matrices (in augmented form) left to right ``[ {}^{n-1}G_{n} \\lvert {}^{n-2}G_{n-1} \\lvert {}^{n-2}G_{n} \\lvert \\ldots \\lvert {}^{2}G_{3} \\lvert \\ldots \\lvert {}^{2}G_{n-1} \\lvert {}^{2}G_{n} \\lvert{}^{1}G_{2} \\lvert \\ldots \\lvert {}^{1}G_{n-1} \\lvert {}^{1}G_{n} ]``
### Examples
```julia-repl
julia> using LinearAlgebra
julia> N=4;A=rand(N,N)+im*rand(N,N);S=svd(A);U=S.U
julia> Gn(U)
```
"""
function Gn(A)
n=size(A,2)
V=I(n)*A;
Gm=Matrix(I(n))
Gs=Matrix(I(n))
for i=1:n-1
for j = 0:n-1-i
G1,V = GivensG(V,i,n-j,n-j-1)
Gm = [Gm ; G1]
Gs = [G1 ; Gs]
end
end
return V,Gm[n+1:end,:],Gs[1:end-n,:]
end
"""
For a given unitary matrix ``U``, it finds the cascaded rotation matrix ``C`` such that ``C\\times U =I``, except for the diagonal element of the ``n``th element which is ``\\det(U)``. The matrix ``C`` is obtained by the cascade of several (i.e., ``2^{n-1}(2^{n}-1)``) two level Givens rotation matrices, namely,
```math
C = \\prod_{i=1}^{n-1} \\prod_{j=0}^{n-1-i}{ {}^{i}G_{n-j,n-j-1}}
```
```math
\\prod_{i=1}^{n-1} \\prod_{j=0}^{n-1-i}{ {}^{i}G_{n-j,n-j-1} U(n)} = \\begin{pmatrix} 1 & \\ldots & 0 \\\\ \\vdots & \\ddots & \\vdots \\\\ 0 & \\ldots & \\det(U)\\end{pmatrix}.
```
### Parameters
* U -- Input. Unitary matrix of size ``n``
* Ic -- Identity matix with the exception that the last diagonal entry (``n``th diagonal element) which is ``\\det(U)``.
* C -- The cascaded rotation matrix ``{}^{n-1}G_{n} \\times {}^{n-2}G_{n-1} \\times {}^{n-2}G_{n} \\times \\ldots \\times {}^{2}G_{3} \\times \\ldots \\times {}^{2}G_{n-1} \\times {}^{2}G_{n} \\times {}^{1}G_{2} \\times \\ldots \\times {}^{1}G_{n-1} \\times {}^{1}G_{n} ``
### Examples
```julia-repl
julia> using LinearAlgebra
julia> N=4;A=rand(N,N)+im*rand(N,N);S=svd(A);U=S.U
4×4 Matrix{ComplexF64}:
-0.4-0.06im 0.23-0.73im -0.03-0.14im 0.39-0.27im
-0.61-0.32im 0.07+0.06im 0.32+0.02im -0.64-0.08im
-0.38-0.33im -0.48+0.38im -0.07-0.4im 0.46+0.07im
-0.2-0.26im -0.09-0.14im -0.7+0.47im -0.09+0.38im
julia> Gc,Ic=Gcascaded(U)
4×4 Matrix{ComplexF64}:
1.0+0.0im 0.0+0.0im -0.0+0.0im 0.0-0.0im
0.0+0.0im 1.0-0.0im 0.0+0.0im -0.0-0.0im
0.0-0.0im -0.0+0.0im 1.0+0.0im 0.0+0.0im
0.0-0.0im -0.0+0.0im 0.0-0.0im 0.4-0.9im
julia> det(A)
0.4166084175706718 - 0.9090860390575042im
```
"""
function Gcascaded(U)
Gv=Gn(U)[3]
Gc=Matrix(I(4))
m=Int(size(Gv,1)/4)
for q in 0:m-1
Gc=Gc*Gv[1+q*4:4*(q+1),:]
end
Ic=Gc*U
return Gc,Ic
end
"""
For ``n`` bits, with the corresponding decimal sequence ``x=0,1,2,\\ldots,2^{n-1}``, find the gray ordering sequence using the bitwise `XOR` logic. Namely.
``gπ= x ⊻ \\lfloor x/2 \\rfloor = x \\oplus \\lfloor x/2 \\rfloor`` where ``⊻`` is the bitwise `XOR` operation.
```julia-repl
julia> gray_ordering(3)
[0,1,3,2,6,7,5,4]
```
"""
function gray_ordering(n)
N=2^n
x=Int.(vcat(range(start=0,stop=N-1,step=1)))
y = Int.(floor.(x/2))
gπ=[x[i] ⊻ y[i] for i=1:N]
return gπ
end
""" Converts a gray mapped decimal number to simple binary mapped decimal number. This routine follows the inverse mapping of the function `gray_ordering()`
### Parameters
* ``n`` The number of digits in the equivalent binary mapping
* ``g`` The gray mapped decimal number
* ``d`` The simple binary mapped decimal number
```julia-repl
julia> G2D.(0:7,3)
[0,1,3,2,7,6,4,5]
```
"""
function G2D(g::Int,n)
xx=gray_ordering(n) .+1
a=collect(1:2^n)
p=invperm(xx)
d=p[g+1]
return d-1
end
"""
Quantum circuit decomposition. For nan arbitrary unitary matix for ``n`` qubits.
Arbitrary quantum circuit abstracted by unitary matrix ``U`` decomposed by ``2^{n-1}2^{n}``
unitary two-level matrices, each of which corresponds ``{}^{i}\\Gamma_{j,k}``.
The program produce the ``\\Gamma`` matrix and the coefficients ``i,j,k``. The quantum
circuit of this decomposition can be visualized as a cascade (from left to right) of this matrix.
"""
function sequenceΓ(k::Int64)
n=Int(2^k)
N=Int(n*(n-1)/2)
idxI=-100
idxJ=-100
m=0
for i=1:n-1
for j = n:-1:(i+1)
idxI=[idxI n-i]
idxJ=[idxJ j]
end
end
dI,dJ=Int.(idxI[2:end] .+0),Int.(idxJ[2:end] .+0)
gV=gray_ordering(k)
gJ2 = gV[dJ .- 1] .+1
gI=gV[-dI.+0 .+ n] .+1
gJ= gV[dJ] .+1
ii=reverse(gI)
jj=reverse(gJ)
kk=reverse(gJ2)
Γ = hcat(ii,jj,kk)'
return ii,jj,kk,Γ
end
"""
The unitary matrix ``U`` corresponding to of the 3 quibit Tiffoli quantum gate
```math
U=\\begin{pmatrix}
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\\\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\\\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\\\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\\\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\\\
0 & 0 & 0 & 0 & 0 & 0 & {\\color{red}0} & {\\color{red}1} \\\\
0 & 0 & 0 & 0 & 0 & 0 & {\\color{red}1} & {\\color{red}0} \\\\
\\end{pmatrix}
```
"""
function tiffoli_matrix()
U=Matrix(I(8))
U[8,8]=U[7,7]=0
U[8,7]=U[7,8]=1
return U
end
"""
Given a matrix ``A`` (usually a unitary matrix) and indices ``(i,j,k)``, find the ``\\Gamma`` Givens rotation matrix and the Givens matrix embedding ``{}^{i}G_{j,k}(A)`` matrix.
For a given matrix ``A``, a generic rotation matrix ``{}^{i}G_{j,k}(A)`` is generated. The matrix ``{}^{i}G_{j,k}(A)`` is such that it helps to selectively nullifies an element of matrix ``V={}^{i}G_{j,k}(A) A``. That is, ``V_{j,i}=0`` where ``V={}^{i}G_{j,k}(A) A``. The fllowing Givens rotation matrix ``{}^{i}\\Gamma_{j,k}``
```math
\\begin{aligned}
{}^{i}\\Gamma_{j,k} &= \\begin{pmatrix} {}^{i}g_{k,k} & {}^{i}g_{k,j} \\\\ {}^{i}g_{j,k} & {}^{i}g_{j,j}\\end{pmatrix} \\\\
&=\\frac{1}{\\sqrt{\\lvert a_{j,i} \\rvert^{2}+ \\lvert a_{k,i} \\rvert^{2}}}\\begin{pmatrix} a_{k,i}^{*} & a_{j,i}^{*} \\\\ -a_{j,i} & {}^{i}a_{k,i}\\end{pmatrix}.
\\end{aligned}
```
is embedded in an identity matrix ``I(n)`` to produce,
```math
{}^{i}G_{j,k}(A) = \\begin{pmatrix}
1 & 0 & \\ldots & \\ldots & \\ldots & \\ldots & \\ldots & \\ldots & 0 \\\\
0 & 1 & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\vdots \\\\
\\vdots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\vdots \\\\
0 & 0 & \\ddots & {\\color{red} {}^{i}g_{k,k}} & \\ddots & {\\color{red} {}^{i}g_{k,j}} & \\ddots & \\ddots & \\vdots \\\\
\\vdots & \\ddots & \\ddots & \\ddots & \\ddots \\ddots & & \\ddots & \\ddots & \\vdots \\\\
0 & 0 & \\ddots & {\\color{red} {}^{i}g_{j,k}} & \\ddots & {\\color{red}
{}^{i}g_{j,j}} & \\ddots & \\ddots & \\vdots \\\\
\\vdots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\ddots & \\vdots \\\\
0 & 0 & \\ldots & \\ldots & \\ldots & \\ldots & \\ldots & 1 & 0 \\\\
0 & 0 & \\ldots & \\ldots & \\ldots & \\ldots & \\ldots & \\ldots & 1
\\end{pmatrix}.
```
Essentially, ``{}^{i}G_{j,k}(A)`` is a modified identity matrix such that four non trivial elements are taken from the givens rotation matrix ``{}^{i}\\Gamma_{j,k}``.
```julia-repl
julia> using LinearAlgebra
julia> using GrayCoding
julia>n=2;N=2^n;A=rand(N,N)+im*rand(N,N);S=svd(A);U=S.U
4×4 Matrix{ComplexF64}:
-0.365903-0.405021im 0.442293-0.0769938im … 0.115307-0.288609im
-0.285173-0.35669im -0.671764+0.0698449im -0.384583+0.295428im
-0.196831-0.611652im -0.154487+0.0160399im 0.379159-0.121825im
-0.177839-0.221435im 0.536228-0.175044im -0.338822+0.62835im
julia> i,j,k=1,2,4
julia> Γ,G,GA=quantumΓ(U,i,j,k);
julia> round.(quantumΓ(S.U,1,2,4)[1],digits=1)
2×2 Matrix{ComplexF64}:
-0.3+0.4im -0.5+0.7im
0.5+0.7im -0.3-0.4im
julia> round.(quantumΓ(S.U,1,2,4)[2],digits=1)
4×4 Matrix{ComplexF64}:
1.0+0.0im 0.0+0.0im 0.0+0.0im 0.0+0.0im
0.0+0.0im -0.3-0.4im 0.0+0.0im 0.5+0.7im
0.0+0.0im 0.0+0.0im 1.0+0.0im 0.0+0.0im
0.0+0.0im -0.5+0.7im 0.0+0.0im -0.3+0.4im
julia> round.(quantumΓ(S.U,1,2,4)[3],digits=1)
4×4 Matrix{ComplexF64}:
-0.4-0.4im 0.4-0.1im 0.6+0.1im 0.1-0.3im
0000000 0.7+0.5im -0.2-0.4im -0.3+0.2im
-0.2-0.6im -0.2+0.0im -0.4-0.5im 0.4-0.1im
0.5+0.0im 0.2-0.2im -0.2-0.1im -0.1-0.8im
```
"""
function quantumΓ(A,i,j,k)
n = size(A,2)
G = Matrix(1.0I(n).+0im)
α = A[k,i]
β = A[j,i]
Γ0 = [ α' β'
-β α ]
N = norm(Γ0[1,:],2);
Γ = Γ0./N
# Embed the Givens matrix Γ in G
G[k,k] = Γ[1,1]
G[k,j] = Γ[1,2]
G[j,k] = Γ[2,1]
G[j,j] = Γ[2,2]
return Γ,G,G*A
end
end
| GrayCoding | https://github.com/nivupai/GrayCoding.jl.git |
|
[
"MIT"
] | 0.1.5 | 5eab5deb315ab62dd45d01e19f79a2b5d98b8839 | code | 93 | using GrayCoding
using Test
@testset "GrayCoding.jl" begin
# Write your tests here.
end
| GrayCoding | https://github.com/nivupai/GrayCoding.jl.git |
|
[
"MIT"
] | 0.1.5 | 5eab5deb315ab62dd45d01e19f79a2b5d98b8839 | docs | 1107 | # GrayCoding
# Introduction
Welcome to the documentation for GrayCoding!
## What is GrayCocoding?
[GrayCoding](https://github.com/nivupai/GrayCoding.jl) is a formal Linear Algebraic framework for ``q``-ry Gray Code.
Encoding and Decooding of Gray codes can be treated as a special case of algebraic block coding.
* Encoding: ``g=G*b ``
* Decoding: ``b=B*g ``

!!! tip
This is still under active devlopment.
## Resources for getting started
There are few ways to get started with GrayCoding:
* Read TBD.
[](https://nivupai.github.io/GrayCoding.jl/stable)
[](https://nivupai.github.io/GrayCoding.jl/dev)
[](https://github.com/nivupai/GrayCoding.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/nivupai/GrayCoding.jl)
| GrayCoding | https://github.com/nivupai/GrayCoding.jl.git |
|
[
"MIT"
] | 0.1.5 | 5eab5deb315ab62dd45d01e19f79a2b5d98b8839 | docs | 3442 | # Algebraic framework of Gray Codes
The classical algorithmic procedure of encoding and decoding are as follows:
## Encoding
q-ry digits ``d`` to gray digits ``g`` conversion.
``
g_{i} = \begin{cases}
d_{i} , & \text{if} \mod\left(\displaystyle{\sum_{j=1}^{i-1}{g_{j}}},2\right)=0 \\
q-1-d_{i} , & \text{if} \mod\left(\displaystyle \sum_{j=1}^{i-1}{g_{j}},2\right)=1
\end{cases}
``
and ``g_{1} = d_{1}``.
## Decoding
``
d_{i} = \begin{cases}
g_{i} , & \text{if} \mod\left(\displaystyle{\sum_{j=1}^{i-1}{g_{j}}},2\right)=0 \\
q-1-g_{i} , & \text{if} \mod\left(\displaystyle \sum_{j=1}^{i-1}{g_{j}},2\right)=1
\end{cases}
``
and ``d_{1} = g_{1}``.
# Linear Algebraic Formulation (N.Rethnakar 2020)
Example of generator matrix ``G`` for binary to gray mapping is given by,
```math
G=\begin{pmatrix} 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
1 & 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
{\color{gray}0} & 1 & 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
{\color{gray}0} & {\color{gray}0} & 1 & 1 & {\color{gray}0} & {\color{gray}0} \\
{\color{gray}0} & {\color{gray}0} & {\color{gray}0} & 1 & 1 & {\color{gray}0} \\
{\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & 1 & 1 \end{pmatrix}
```
The decoding matrix ``B=G^{-1}`` is given by,
```math
B=\begin{pmatrix} 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
1 & 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
1 & 1 & 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
1 & 1 & 1 & 1 & {\color{gray}0} & {\color{gray}0} \\
1 & 1 & 1 & 1 & 1 & {\color{gray}0} \\
1 & 1 & 1 & 1 & 1 & 1 \end{pmatrix}
```
## Illustration of Binary Code

### Generalized ``q``-ry Gray Code
```math
G=\begin{pmatrix} 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
q-1 & 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
{\color{gray}0} & q-1 & 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
{\color{gray}0} & {\color{gray}0} & q-1 & 1 & {\color{gray}0} & {\color{gray}0} \\
{\color{gray}0} & {\color{gray}0} & {\color{gray}0} & q-1 & 1 & {\color{gray}0} \\
{\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & q-1 & 1 \end{pmatrix} \equiv \begin{pmatrix} 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
-1 & 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
{\color{gray}0} & -1 & 1 & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} \\
{\color{gray}0} & {\color{gray}0} & -1 & 1 & {\color{gray}0} & {\color{gray}0} \\
{\color{gray}0} & {\color{gray}0} & {\color{gray}0} & -1 & 1 & {\color{gray}0} \\
{\color{gray}0} & {\color{gray}0} & {\color{gray}0} & {\color{gray}0} & -1 & 1 \end{pmatrix}_{\mathbb{F}_{q}}
```
## Gray Encoding as Differentiation
Encoding matrix operation act as a forward discrete differentiation operation in ``\mathbb{F}_{q}``.

## Gray Decoding as Integrator
 | GrayCoding | https://github.com/nivupai/GrayCoding.jl.git |
|
[
"MIT"
] | 0.1.5 | 5eab5deb315ab62dd45d01e19f79a2b5d98b8839 | docs | 144 | # Applications
* Digital Modulation Schemes:
* DNA Codon mapping
* Quantum Circuits and Gates
* Digital Electronics/Counters
* Music/Puzzles
| GrayCoding | https://github.com/nivupai/GrayCoding.jl.git |
|
[
"MIT"
] | 0.1.5 | 5eab5deb315ab62dd45d01e19f79a2b5d98b8839 | docs | 165 | # DNA Codon mapping
## 3ry Gray Codes
## Binary equivalance

## Codong matrix mapping

| GrayCoding | https://github.com/nivupai/GrayCoding.jl.git |
|
[
"MIT"
] | 0.1.5 | 5eab5deb315ab62dd45d01e19f79a2b5d98b8839 | docs | 1443 | ```@meta
CurrentModule = GrayCoding
```
# GrayCoding
Documentation for [GrayCoding](https://github.com/nivupai/GrayCoding.jl).
# Introduction
Welcome to the documentation for GrayCoding!
## What is GrayCocoding.jl?
[GrayCoding](https://github.com/nivupai/GrayCoding.jl) is a formal Linear Algebraic framework for ``q``-ry Gray Code.
Encoding and Decooding of Gray codes can be treated as a special case of algebraic block coding.
* Encoding: ``\textbf{g}=G \textbf{b} ``
* Decoding: ``\textbf{b}=B \textbf{g} ``

!!! tip
This is still under active devlopment.
## Resources for getting started
There are few ways to get started with GrayCoding:
## Installation
Open a Julia session and enter
```julia
using Pkg; Pkg.add("GrayCoding")
```
this will download the package and all the necessary dependencies for you. Next you can import the package with
```julia
using GrayCoding
```
and you are ready to go.
## Quickstart
```julia
using GrayCoding
```
## Citation
If you use this package in your work, please cite it as
```
@software{nrethnakar2022GrayAlgebra,
author = {
Nivedita Rethnakar
},
title = {GrayCoding.jl: Algebra of Gray Coding and Applications},
month = {1},
year = {2022},
doi = {10.5281/zenodo.5989996},
url = {https://github.com/nivupai/GrayCoding.jl}
}
```
* Read TBD.
```@index
```
```@autodocs
Modules = [GrayCoding]
```
| GrayCoding | https://github.com/nivupai/GrayCoding.jl.git |
|
[
"MIT"
] | 0.1.5 | 5eab5deb315ab62dd45d01e19f79a2b5d98b8839 | docs | 7249 | # Quantum Computing
Broadly stated, the term quantum computation comprieses of the following three elements:
* A register or a set of registers,
* A unitary matrix ``U``, as an abstract representation of the quantum algorithm,
* Measurements to extract the information of interest.
As a mathematial abstraction, a quantum computation is the set ``\{\mathcal{H},U,\{Mm\}\}``, where ``H = \mathbb{C}^{2^{n}}`` is the Hilbert space of an ``n-``qubit register, ``U \in U\left(2^{n}\right)`` represents the quantum algorithm and ``\{M_{m}\}`` is the set of measurement operators. The hardware circuitry along with equipment to control and manipulate the qubits is called a quantum computer.
!!! info "More on Quantum Computation Information"
Quantum Computing is a fascinating subject. In this, we cover only the very basic things to connect the Gray Code framework that we've devloped to Quantum circuits. The classic book on this subject by Michael A. Nielsen’s and Isaac L. Chuang titled “Quantum Computation and Quantum Information” is the go to place for more (Michael A. Nielsen and Isaac L. Chuang. Quantum Computation and Quantum Information. Cambridge University Press (2000)).
# Single qubit gates
These are the simplest set of gates which take one qubit as an input, act upon (change the state) and produce a qubit at the output.
A generic 1-qubit quantum gate corresponds to a ``2 \times 2`` unitary matrix, which has the following form:
```math
U = e^{\imath \theta} \begin{pmatrix} a & -b^{*} \\\ b & a^{*} \end{pmatrix}
```
where ``a,b \in \mathbb{C}`` such that ``\lvert a \rvert^{2}+ \lvert b \rvert^{2} = 1``, and ``\alpha \in \mathbb{R}`` results in arbitrary rotation. This matrix is essentially a `Givens` rotation matrix [Wikipedia](https://en.wikipedia.org/wiki/Givens_rotation).
## Examples
### ``X`` gate.
* This gate *flips* the state of the qubit. In other words, it changes the state of the qubit from `` a\vert 0 \rangle + b\vert 1 \rangle`` to `` a\vert 1 \rangle+ b\vert 0 \rangle ``.
* The matrix representation for``X`` gate is, `` \sigma_{x}=\begin{pmatrix} 0 & 1 \\\ 1 & 0 \end{pmatrix} ``. Multiplying the vector representing the qubit to the matrix is equivalent to the gate operation. i.e., `` \begin{pmatrix} b \\\ a \end{pmatrix} =\begin{pmatrix} 0 & 1 \\\ 1 & 0 \end{pmatrix} \begin{pmatrix} a \\\ b \end{pmatrix} ``
### ``Y`` gate.
* This gate perform two *flips* (a *bit flip* and a *phase flip*) on the state of the qubit. In other words, it changes the state of the qubit from `` a\vert 0 \rangle + b\vert 1 \rangle``, to `` b\vert 0 \rangle - a\vert 1 \rangle``.
* The matrix representation for``Y`` gate is, `` \sigma_{y}=\begin{pmatrix} 0 & -1 \\\ 1 & 0 \end{pmatrix} ``. Multiplying the vector representing the qubit to the matrix is equivalent to the gate operation.
### ``Z`` gate.
* This gate performs a *sign flip* on the state of the qubit. In other words, it changes the state of the qubit from `` a\vert 0 \rangle + b\vert 1 \rangle``, to `` a\vert 0 \rangle - \vert 1 \rangle``.
* The matrix representation for ``X`` gate is, `` \sigma_{z}=\begin{pmatrix} 1 & 0 \\\ 0 & -1 \end{pmatrix} ``. Multiplying the vector representing the qubit to the matrix is equivalent to the gate operation.
The matrix representations `` \sigma_{x}, \;\sigma_y\;and\; \sigma_z `` are known as Pauli's matrices.
### Hadamard gate: ``H`` gate
* This gate works as follows: ``\vert 0 \rangle`` changes to `` \frac{1}{\sqrt2}(\vert 0 \rangle +\; \vert 1 \rangle) ``, and the ``\vert 1 \rangle`` changes to `` \frac{1}{\sqrt2}(\vert 0 \rangle - \vert 1 \rangle)``.
* For an example, `` a\vert 0 \rangle + b\vert 1 \rangle, `` changes to, `` \frac{a}{\sqrt2}(\vert 0 \rangle + \vert 1 \rangle) + \frac{b}{\sqrt2}(\vert 0 \rangle - \vert 1 \rangle)``.
* It can be simplified to give, ``\frac{a+b}{\sqrt2}\vert 0 \rangle + \frac{a-b}{\sqrt2} \vert 1 \rangle``.
Mathematically, the following transformation captures the essence of ``H`` gate.
```math
\begin{pmatrix} \frac{a+b}{\sqrt2} \\\\ \frac{a-b}{\sqrt2} \end{pmatrix} = \frac{1}{\sqrt2} \begin{pmatrix} 1 & 1 \\\ 1 & -1 \end{pmatrix} \begin{pmatrix} a \\\ b \end{pmatrix}
```
# Decomposition of Quantum Gates
## Two level decomposition
A 2-level unitary correspond to unitary operation that non-trivially perform on only 2 of the states. Any controlled 1-qubit gate can be abstracted into a 2-level unitary matrix, e.g. for a single qubit gate ``U = \begin{pmatrix} a & b \\\\ c & d \end{pmatrix}``.
## Universal Decomposition
Any arbitrary unitary gate acting on n-qubit can be implemented as a cascade of single-qubit and controlled-NOT (CNOT) gates.
```math
\textbf{U}|\psi \rangle = \begin{pmatrix} u_{11} & u_{12} & \ldots & u_{1,N} \\ u_{11} & u_{12} & \ldots & u_{1,N} \end{pmatrix}
```
## CNOT gates
CNOT stands for Controlled-NOT, which is one of the key quantum logic gate. It is a two qubit gate. The gate flips the second qubit (called the target qubit) when the first gate (the control gate) is ``\lvert 1 \rangle``, while the target qubit remain unchanged when the control gate is in state ``\lvert 0 \rangle``.
```math
\begin{aligned}
U_{\text{CNOT}} &= \lvert 0 \rangle \otimes \langle 0 \rvert \otimes \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} + \lvert 1 \rangle \otimes \langle 1 \rvert \otimes \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix}\\
&= \lvert 0 \rangle \langle 0 \rvert \otimes \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} + \lvert 1 \rangle \langle 1 \rvert \otimes \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix}\\
&= \begin{pmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0\end{pmatrix}
\end{aligned}
```
In terms of Gray matrix, we can also simply express this as,
```math
\begin{aligned}
\begin{pmatrix} \lvert \acute{\psi}_{1} \rangle \\ \lvert \acute{\psi}_{2} \rangle \end{pmatrix} &= G_{2} \begin{pmatrix} \lvert \psi_{1} \rangle \\ \lvert \psi_{2} \rangle \end{pmatrix} \\
&= \begin{pmatrix} 1 & 0 \\ 1 & 1 \end{pmatrix} \begin{pmatrix} \lvert \psi_{1} \rangle \\ \lvert \psi_{2} \rangle \end{pmatrix}
\end{aligned}
```
---
The simplest CNOT gate is the single qubit controlled CNOT discussed above, which can be explicitly denoted as ``C^{1}\text{NOT}``. Generalization of this to multi quibit controlled CNOT, denoted by ``C^{n-1}\text{NOT}``.
---
In quantum circuit design, applying a rotation for which the binary representations of i − 1 and j − 1 differ in a single bit can be accomplished by a single fully-controlled one-qubit rotation (a particular Givens rotation) and hence costs a small number of gates. All other rotations require a permutation of data before the rotation is applied and thus should be avoided.
### Generic U decomposion
#### Examples: 3 cubit generic quantum gate
```julia-repl
julia> A1,A2,A3,A4=sequenceΓ(3);
julia> A4
3×28 adjoint(::Matrix{Int64}) with eltype Int64:
6 8 8 7 7 7 3 3 3 3 4 4 4 4 4 2 2 2 2 2 2 1 1 1 1 1 1 1
5 6 5 8 6 5 7 8 6 5 3 7 8 6 5 4 3 7 8 6 5 2 4 3 7 8 6 5
6 8 6 7 8 6 3 7 8 6 4 3 7 8 6 2 4 3 7 8 6 1 2 4 3 7 8 6
```

| GrayCoding | https://github.com/nivupai/GrayCoding.jl.git |
|
[
"MIT"
] | 0.1.5 | 5eab5deb315ab62dd45d01e19f79a2b5d98b8839 | docs | 3200 | # Examples
## Recursive construction

### Recursive procedure

Reflect ``C[n-1]``, shift by ``q^{n-1}`` and augment (TBD).
For ``n \in \mathbb{N}``, positive integer and ``N = 2^{n}``. A Gray code ``G_{n}`` is an tuple ``G_{n} = (X_{1},X_{2},...,X_{N})`` which satisfies the following properties:
* ``X_{1}, X_{2}, ... , X_{N}`` are binary sequences (of length ``n``) corresponding to the binary representation of the numbers ``0, 1, \ldots , N − 1``, arranged in a specific order,
* For any `` 0 \le j \le N-1``, adjacent pairs ``X_{j},X_{j+1}`` differ in only one position (i.e., only one of the ``n`` bits would differ),
* The start and end sequences (i.e., sequences ``X_{1}`` and ``X_{N}`` differ in just one position.
Gray code ``G_{n}`` can be recursively constructed as follows.
Start with ``G_{1} = (0,1)`` and for ``N=2^n, n \ge 1``,
Let ``G_{n} = \left(X_{1},\ldots,X_{N−1},X_{N}\right)``,
```math
G_{n+1} = \left(0X_{1},\ldots,0X_{N−1},0X_{N},1X_{N},1X_{N−1},...,1X_{1}\right).
```
## Illustration
```julia-repl
using Plots
function plotmatrix(A;kwargs...)
a,b=size(A)
X = transpose(repeat(1:b, 1, a))[:]
Y = repeat(a:-1:1, b)[:]
scatter(X,Y, marker_z = A[:], marker=:rect,markersize = 4, color = :viridis,aspectratio=1,ylims=[0,size(G,1)+1],alpha=1,label=:none,colorkey=:none,axis=:none;kwargs...)
julia> plotmatrix(gray(6));
julia> plotmatrix(G,size=(800,400),color=:summer)
julia> plotmatrix(G,size=(800,200),color=:summer,markersize=7,xlims=[1,size(G,2)+0],ylims=[1/2,size(G,1)-0])
end
```
#### Binary Gray Code ``n=4``

#### Binary Gray Code ``n=5``

#### Binary Gray Code ``n=6``

## Linear Algebraic method
TBD
``g=Gb`` and ``b=Bg``, where ``G`` is a Jordan matrix, which is
```julia-repl
julia> n,q=4,2
julia> GrayCoding.GrayMatrix(n,q)
4×4 Matrix{Int64}:
1 0 0 0
1 1 0 0
1 1 1 0
1 1 1 1
4×4 Matrix{Int64}:
1 0 0 0
1 1 0 0
0 1 1 0
0 0 1 1
4×16 Matrix{Int64}:
0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1
0 0 0 0 1 1 1 1 0 0 0 0 1 1 1 1
0 0 1 1 0 0 1 1 0 0 1 1 0 0 1 1
0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1
4×16 Matrix{Int64}:
0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1
0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0
0 0 1 1 1 1 0 0 0 0 1 1 1 1 0 0
0 1 1 0 0 1 1 0 0 1 1 0 0 1 1 0
julia> G,B,g,b=GrayCoding.GrayMatrix(10,5);
julia> G
10×10 Matrix{Int64}:
1 0 0 0 0 0 0 0 0 0
1 1 0 0 0 0 0 0 0 0
1 1 1 0 0 0 0 0 0 0
1 1 1 1 0 0 0 0 0 0
1 1 1 1 1 0 0 0 0 0
1 1 1 1 1 1 0 0 0 0
1 1 1 1 1 1 1 0 0 0
1 1 1 1 1 1 1 1 0 0
1 1 1 1 1 1 1 1 1 0
1 1 1 1 1 1 1 1 1 1
julia>B
10×10 Matrix{Int64}:
1 0 0 0 0 0 0 0 0 0
4 1 0 0 0 0 0 0 0 0
0 4 1 0 0 0 0 0 0 0
0 0 4 1 0 0 0 0 0 0
0 0 0 4 1 0 0 0 0 0
0 0 0 0 4 1 0 0 0 0
0 0 0 0 0 4 1 0 0 0
0 0 0 0 0 0 4 1 0 0
0 0 0 0 0 0 0 4 1 0
0 0 0 0 0 0 0 0 4 1
``` | GrayCoding | https://github.com/nivupai/GrayCoding.jl.git |
|
[
"MIT"
] | 0.1.5 | 5eab5deb315ab62dd45d01e19f79a2b5d98b8839 | docs | 124 | # Digital Modulation scheme in Wireless Communication
# QAM modulation in WiFi systems
 | GrayCoding | https://github.com/nivupai/GrayCoding.jl.git |
|
[
"MIT"
] | 0.1.0 | 395e378bf16dca15f10aa4875e745363f77960ec | code | 102 | # This file is a part of BitArraynge.jl, licensed under the MIT License (MIT).
println("building...")
| BitArraynge | https://github.com/AstroFloof/BitArraynge.jl.git |
|
[
"MIT"
] | 0.1.0 | 395e378bf16dca15f10aa4875e745363f77960ec | code | 1283 | # This file is a part of BitArraynge.jl, licensed under the MIT License (MIT).
baremodule BitArraynge
using Base
using BitOperations
#= Credit to Gandalf#4004 in the Humans of Julia discord
I might use this at some point =#
# _cvt_int(x) = (bv = BitVector((false,)) ; bv.chunks[1] = UInt64(x) ; bv.len = 64 ; bv)
_bitindices(I) = range(bsizeof(I)-1, step=(-1), stop=0)
export bitsof
@inline bitsof(n::I, b_inds::StepRange) where {I <: Integer} = bget.(n, b_inds)
@inline bitsof(n::I) where {I <: Integer} = bget.(n, _bitindices(I))
export appendbitsof!
appendbitsof!(V::BitVector, n::I) where I <: Integer = append!(V, bitsof(n, _bitindices(I)))
appendbitsof!(V::BitVector, n::I, bitindices::StepRange) where I <: Integer = append!(V, bitsof(n, bitindices))
function appendbitsof!(V::BitVector, N::Vector{I}) where I <: Integer
bitindices = _bitindices(I)
for n in N
appendbitsof!(V, n, bitindices)
end
end
export bmat_from_ints
function bmat_from_ints(V::Vector{I}) where {I <: Integer}
hcat((V .|> bitsof)...)
end
export bvec_from_ints
function bvec_from_ints(V::Vector{I}) where {I <: Integer}
vcat((v .|> bitsof)...)
end
export bvec_from_int
@inline bvec_from_int(n::I) where {I <: Integer} = bitsof.(n)
end # module
| BitArraynge | https://github.com/AstroFloof/BitArraynge.jl.git |
|
[
"MIT"
] | 0.1.0 | 395e378bf16dca15f10aa4875e745363f77960ec | code | 92 | # This file is a part of BitArraynge.jl, licensed under the MIT License (MIT).
@assert true
| BitArraynge | https://github.com/AstroFloof/BitArraynge.jl.git |
|
[
"MIT"
] | 0.1.0 | e633f4f35195771abc47d2715e4173b19cfa0a6a | code | 14160 | module DistributedJLFluxML
using Distributed
using Flux.Optimise: AbstractOptimiser
using Flux
# using Zygote: @nograd # has been useful in the past, keeping it here incase.
function cond_put!(chan, dat)
if chan != nothing
return put!(chan, dat)
else
return nothing
end
end
function makeStatDict(status_name::String; kwargs...)
Dict(:statusName=>status_name,
:myid=>myid(),
:hostname=>gethostname(),
:timestamp=>time(),
kwargs...)
end
include("OptOutAllReduce.jl")
"""
Leaving this hear for future work. Looks like theres a problem with Distributed & Threads that
might be breaking this.
See https://github.com/JuliaLang/julia/issues/32677 and
https://github.com/JuliaLang/julia/pull/33555
When Julia 1.8 is realeased, readdress this
"""
function build_data_loader_from_RemoteChannel(rChan)
chan = Channel() do ch
while true
try
res = take!(rChan)
put!(ch, res)
catch err
if isa(err, InvalidStateException) & (err.state == :closed)
break
end
if isa(err, RemoteException) &
isa(err.captured.ex, InvalidStateException) &
(err.captured.ex.state == :closed)
break
end
#rethrow()
end
end
end
end
"""
std_grad_calc(xy, loss_f, model; device)
Standard combination on loss_f, model, input and output to be used in
```
l = Flux.gradient(θ) do
...
end
```
# Arguments
- `xy::Tuple`: A tuple with the first item as the model input and second item the expected model output
- `loss_f::Function`: evauates `loss_f(y̅, y)`
- `model::Chain`: The model to be trained
# Returns
- `Float`: the value of the loss calculated by loss_f
"""
function std_grad_calc(xy, loss_f, model; device=gpu)
x, y = xy
xs = size(x)
loss_fac_cpu = Float32(reduce(*, xs))
loss_fac_gpu = loss_fac_cpu |> device
loss_f(model(x), y, agg=sum)/loss_fac_gpu
end
"""
do_train_on_remote()
Not documented. Called though @swpanat in `train!()`. No one whould be calling this directly.
"""
function do_train_on_remote(loss_f, model, data, opt; status_chan=nothing,
saved_model_dir=nothing,
master=myid(),
cb=()->nothing,
device=gpu,
size_data_load_buff=2,
save_on_step_cb=st -> true,
grad_calc=std_grad_calc)
if (master == myid()) & (saved_model_dir != nothing)
global save_model_f = open(joinpath(saved_model_dir, "savedModelParam.jlb"), "w")
end
θ = Flux.params(model)
gr_share = [zeros(Float32, size(l)) for l in θ]
acum_gr_share = copy(gr_share)
# TODO: In future versions of Julia, we may not need to handle
# remote channels ourselves 5/13/22:saxton
# https://github.com/JuliaLang/julia/pull/33555
# https://github.com/JuliaLang/julia/pull/41966
#if isa(data, RemoteChannel)
# global data_dl = build_data_loader_from_RemoteChannel(data)
#else
# global data_dl = data
#end
# See docstring in build_data_loader_from_RemoteChannel
data_dl = Channel(size_data_load_buff, spawn=true) do ch
while true
t_dat = take!(data)
if typeof(t_dat) == Symbol
if t_dat == :End
put!(ch, t_dat)
break
end
end
gpu_data = (_d |> device for _d in t_dat)
put!(ch, gpu_data)
end
end
loss_rep = Float32(0.0)
step = 0
while true
xy = take!(data_dl)
if typeof(xy) == Symbol
if xy == :End
cond_put!(status_chan, makeStatDict("do_train_on_remote.finished";
:step=>step))
break
end
end
step += 1
cond_put!(status_chan, makeStatDict("do_train_on_remote.step";
:step=>step))
#loss_fac_cpu = Float32(230400)
gr = Flux.gradient(θ) do
#l = loss(xy...)
#l = loss_f(model(x), y; agg=sum)/loss_fac_gpu
#l = loss(x, y)/loss_fac_gpu
l = grad_calc(xy, loss_f, model; device=device)
loss_rep = l |> f32
return l
end
cond_put!(status_chan, makeStatDict("do_train_on_remote.step.grad";
:step=>step,
:loss=>loss_rep,
:ImNew=>3))
for (sh, acumm_sh, p) in zip(gr_share, acum_gr_share, gr.params)
if gr.grads[p] != nothing
copyto!(sh, gr.grads[p])
_ret_sh = OptOutAllReduce.allReduce(+, sh)
copyto!(gr.grads[p], _ret_sh[1]/_ret_sh[2])
end
end
cond_put!(status_chan, makeStatDict("do_train_on_remote.step.shared";
:step=>step))
Flux.Optimise.update!(opt, θ, gr)
cb()
if (master == myid()) & (saved_model_dir != nothing) & save_on_step_cb(step)
serialize(save_model_f, (step, θ))
flush(save_model_f)
cond_put!(status_chan, makeStatDict("do_train_on_remote.step.saved_model";
:step=>step))
end
end
cond_put!(status_chan, makeStatDict("do_train_on_remote.finished.wait";
:step=>step))
_ret_sh = OptOutAllReduce.allReduce(+, :Skip)
while true
if _ret_sh[2] ==0
break
end
_ret_sh = OptOutAllReduce.allReduce(+, :Skip)
end
cond_put!(status_chan, makeStatDict("do_train_on_remote.finished.done";
:step=>step))
return [p |> cpu for p in θ]
end
unit_test_example_path = joinpath(splitpath(pathof(DistributedJLFluxML))[1:end-2]...,"test")
"""
train!(loss, model, data, opt, workers;
cb, save_on_step_cb, status_chan, save_model_dir, device,
no_block, grad_calc=std_grad_calc)
Uses a `loss` function and training `data` to improve the `model` parameters according to a particular optimisation rule `opt`. Runs the training loop in parellel on `workers`, agrigates the gradients through an **allReduce**, then updates the model parameters.
# Example
See test dir $(unit_test_example_path), in particular $(joinpath(unit_test_example_path, "trainModel.jl")), for more details.
Partition and load data on workers. The following example loads an already partitioned version of the iris dataset
```
batch_size=8
epochs = 50
deser_fut = [@spawnat w global rawData = deserialize(f)
for (w, f) in zip(workersToHostData, shard_file_list)]
for fut in deser_fut
wait(fut)
end
epoch_length_worker = @fetchfrom p[1] nrow(rawData)
@everywhere p labels = ["Iris-versicolor", "Iris-virginica", "Iris-setosa"]
@everywhere p x_array =
Array(rawData[:,
[:sepal_l, :sepal_w,
:petal_l, :petal_w
]])
@everywhere p y_array =
Flux.onehotbatch(rawData[:,"class"],
labels)
@everywhere p dataChan = Channel(1) do ch
n_chunk = ceil(Int,size(x_array)[1]/\$batch_size)
x_dat = Flux.chunk(transpose(x_array), n_chunk)
y_dat = Flux.chunk(y_array, n_chunk)
for epoch in 1:\$epochs
for d in zip(x_dat, y_dat)
put!(ch, d)
end
end
end
@everywhere p datRemChan = RemoteChannel(() -> dataChan, myid())
datRemChansDict = Dict(k => @fetchfrom w datRemChan for (k, w) in zip(workersToRunTrain,workersToHostData))
```
Once the data is set up for your needs, then you need to define the model, loss, optimizer and pass it to `train!`
```
loss_f = Flux.Losses.logitcrossentropy
opt = Flux.Optimise.ADAM(0.001)
model = Chain(Dense(4,8),Dense(8,16), Dense(16,3))
DistributedJLFluxML.train!(loss_f, model, datRemChansDict, opt, p)
```
# Example
Argument `grad_calc` is meant for novel nomalization schemes. For example, if your `DataChannel` returns a 3 touple, say `(x, s, y)`, a desired grad calc coule be
```
function node_norm_grad_calc(xsy, loss_f, model; device=gpu)
x,s,y = xsy
loss_f(model(x), y, agg=sum)/s
end
```
When `train!` returns, it will have updated the parameters of `model`.
# Arguments
- `loss::Function`: evauates `loss(y, y̅)`
- `model::Chain`: The model to be trained
- `data::Dict{Int,RemoteChannel}`: The dict key is the worker id that the remote channel will be sent to
- `opt::AbstractOptimizer`: The optimized used during training
- `workers::AbstractArray`: List of workers ids to perform training on.
- `cb::Function`: a callback that is called after optimize.update! on each training worker
- `save_on_step_cb::Function`: The training step is passed to this cb on each training iteration. If the cb returns true, a copy of the model will be saved to `saved_model_dir`
- `saved_model_dir::String`: path to directory where saved model will be placed
- `device::Function`: the device a model will be copied to on remote worker. usually `gpu` or `cpu`
- `grad_calc::Function`: a function that will be called in Flux.gradient to combine a single data sample, the model, and the loss function. See `std_grad_calc` as an example.
# Returns
- `nothing`
# Throws
- `nothing`
"""
function train!(_loss_f, _model::Chain, _data,
_opt::AbstractOptimiser, trainWorkers;
cb=()->nothing,
save_on_step_cb=st -> true,
status_chan=nothing,
saved_model_dir=nothing,
device=gpu,
grad_calc=std_grad_calc)
OptOutAllReduce.init(trainWorkers)
model_fut = [@spawnat w global model = _model |> device for w in trainWorkers];
loss_f_fut = [@spawnat w global loss_f = _loss_f for w in trainWorkers];
opt_fut = [@spawnat w global opt = _opt for w in trainWorkers];
wait.([model_fut..., loss_f_fut..., opt_fut...])
train_fut = []
for w in trainWorkers
fut = @spawnat w do_train_on_remote(loss_f, model, _data[w], opt; status_chan=status_chan,
saved_model_dir=saved_model_dir,
master=trainWorkers[1],
device=device,
save_on_step_cb=st -> true,
grad_calc=grad_calc)
push!(train_fut, fut)
end
wait.(train_fut)
θ = Flux.params(_model)
θ_rem = fetch(train_fut[1])
for (p1,p2) in zip(θ, θ_rem)
copy!(p1, p2)
end
return train_fut
end
function do_eval_on_remote(model, data_dl;
status_chan=nothing, get_step=nothing,
device=gpu)
cond_put!(status_chan, makeStatDict("do_eval_on_remote.start";
:message=>"Starting",
))
y=[]
while true
x = take!(data_dl)
if typeof(x) == Symbol
if x == :End
break
end
end
x_device = x |> device
y_cpu = model(x_device) |> cpu
push!(y,y_cpu)
end
return y
end
"""
eval_model(saved_model_dir::String, model, _data, workers;
status_chan=nothing, get_step=nothing,
device=gpu)
Not tested yet. Still need to build model saving in `train!`
"""
function eval_model(saved_model_dir::String, model, _data, workers;
status_chan=nothing, get_step=nothing,
device=gpu)
save_model_f = open(joinpath(saved_model_dir, "savedModelParam.jlb"), "r")
open(save_mdel_f) do f
while true
try
global (step, _θ) = deserialize(f)
catch e
if isa(e, EOFError)
break
end
end
if step == get_step
break
end
end
end
θ = Flux.params(model)
for (ld, ls) in zip(θ, _θ)
copy!(ld, ls)
end
res = eval_model(model, _data, workers; status_chan=status_chan, get_step=get_step,device=device)
return res
end
"""
eval_model(model, data, evalWorkers;
status_chan=nothing, get_step=nothing,
device=gpu)
This function evaluates the model on a set of data partitioned across many workers. `eval_model` will deploy `model` and the approprate `RemoteChannel` from `data` to `evalWorkers`. There, it will call `model(x)` on the data iterated by `data[myid()]`. Finally, the results will be fetched and aggregated into a single array.
# Arguments
- `model::Chain`: The model that will be evaluated
- `data::Dict{Int,RemoteChannel}`: The dict key is the worker id that the remote channel will be sent to
- `evalWorkers::AbstractArray`: List of workers ids to perform evaluation on.
- `statusChan::RemoteChannel`: status messages and data will be placed on this channel to monitor progress
- `device::Function`: the device a model will be copied to on remote worker. usually `gpu` or `cpu`
# Returns
- `Array`: concatenated array of results from each of the workers
# Throws
- `nothing`
"""
function eval_model(_model, _data, evalWorkers; status_chan=nothing, device=gpu)
model_fut = [@spawnat w global model = _model |> device for w in evalWorkers];
wait.(model_fut)
eval_fut = []
for w in evalWorkers
fut = @spawnat w DistributedJLFluxML.do_eval_on_remote(model, _data[w]; status_chan=status_chan)
push!(eval_fut, fut)
end
wait.(eval_fut)
res = vcat(fetch.(eval_fut)...)
return res
end
end # module
| DistributedJLFluxML | https://github.com/asaxton/DistributedJLFluxML.jl.git |
|
[
"MIT"
] | 0.1.0 | e633f4f35195771abc47d2715e4173b19cfa0a6a | code | 4439 | module OptOutAllReduce
using Distributed
"""
init(_allReduceWorkers; ReduceType=Any, ChanDepth=2)
`init()` sets up `RemoteChannel()`'s and related indices for `allRecude()` to use on subsequent calls. See `allReduce` help for more details.
# Arguments
- `allReduceWorkers::Array{Int}`: Array of worker idx that will perform a allReduce between.
# Returns
- Nothing
# Throws
- Nothing
"""
function init(_allReduceWorkers; ReduceType=Any, ChanDepth=2)
if any([@fetchfrom w isdefined(OptOutAllReduce, :finished_init)
for w in _allReduceWorkers])
return
end
totAllReduceChan = Dict(w_i => RemoteChannel(()->Channel{ReduceType}(ChanDepth), w_i)
for w_i in _allReduceWorkers)
n_w = length(_allReduceWorkers)
w_l = sort(_allReduceWorkers)
allReduce_chan_fut = []
for m_id in _allReduceWorkers
w_i_i_map = Dict(w_i => i for (i,w_i) in zip(0:(n_w-1), w_l))
i_w_i_map = Dict(i => w_i for (i,w_i) in zip(0:(n_w-1), w_l))
_right_id = i_w_i_map[(w_i_i_map[ m_id] + 1) % n_w]
fut = @spawnat m_id global right_id = _right_id
push!(allReduce_chan_fut, fut)
fut = @spawnat m_id global allReduceChan = Dict(m_id => totAllReduceChan[m_id],
_right_id => totAllReduceChan[_right_id])
push!(allReduce_chan_fut, fut)
fut = @spawnat m_id global allReduceWorkers = _allReduceWorkers
push!(allReduce_chan_fut, fut)
fut = @spawnat m_id global finished_init = true
push!(allReduce_chan_fut, fut)
end
for fut in allReduce_chan_fut
wait(fut)
end
end
"""
allReduce(func, dat)
`allRecude()` performes the All Reduce collective, with an "Opt Out" option, accross the group of workers passed to init(). If a worker calls `allReduce()` passing the symbol `:Skip` to `dat` that worker will perform a "No Op" on the reduce. Lastly, values returned are a 2 item tuple with the first item is the value of the reduce and the second is number of items that were reduced.
# Examples
```
using Distributed
p = addProc(3)
@everywhere using DistributedFluxML
DistributedFluxML.OptOutAllReduce.init(p)
mock_vals = [1,2,3]
allR_fut = [@spawnat w DistributedFluxML.OptOutAllReduce.allReduce(+, v)
for (v,w) in zip(mock_vals,p)]
[fetch(fut) for fut in allR_fut] # [(6,3), (6,3), (6,3)]
mock_vals = [1,:Skip,3]
allR_fut = [@spawnat w DistributedFluxML.OptOutAllReduce.allReduce(+, v)
for (v,w) in zip(mock_vals,p)]
[fetch(fut) for fut in allR_fut] # [(4,2), (4,2), (4,2)]
```
# Arguments
- `func(x,y)::Function`: Aggrigation function is use in reduce. `func()` must be associative to produce meaninful results.
- `dat::Any`: Singlton data point to aggrigate or `Symbol` `:Skip` to skip reduce op.
# Returns
- `allReduce(...)::Tuple{Any, Int64}`
# Throws
- `ERROR`: If `init()` was not run or finish correctly, thows an error message telling you so
"""
function allReduce(func, dat)
if !@isdefined finished_init
throw("""allReduce is not being called on a worker which """*
"""init() was run. """*
"""See allReduce() docString for help. myid() $(myid()) """)
end
if !finished_init
throw("init() did not run successfully for $(myid())")
end
countD = dat == :Skip ? 0 : 1
pDat = countD
n_rWorkers = length(allReduceWorkers)
for _i in 1:(n_rWorkers-1)
put!(allReduceChan[myid()], pDat)
tDat = take!(allReduceChan[right_id])
pDat = tDat + countD
end
put!(allReduceChan[myid()], pDat)
numReduceSteps = take!(allReduceChan[right_id])
if numReduceSteps == 0
return (nothing, 0)
end
if dat == :Skip
tDat = nothing
for _i in 1:(n_rWorkers)
tDat = take!(allReduceChan[right_id])
put!(allReduceChan[myid()], tDat)
end
return (tDat, numReduceSteps)
else
pDat = dat
tDat = nothing
for _i in 1:(numReduceSteps-1)
put!(allReduceChan[myid()], pDat)
tDat = take!(allReduceChan[right_id])
pDat = func(tDat,dat)
end
for _i in 1:(n_rWorkers-numReduceSteps+1)
put!(allReduceChan[myid()], pDat)
tDat = take!(allReduceChan[right_id])
pDat = tDat
end
return (pDat, numReduceSteps)
end
end
end
| DistributedJLFluxML | https://github.com/asaxton/DistributedJLFluxML.jl.git |
|
[
"MIT"
] | 0.1.0 | e633f4f35195771abc47d2715e4173b19cfa0a6a | code | 2664 | @testset "Opt Out All Reduce" begin
DistributedJLFluxML.OptOutAllReduce.init(p)
@test all([@fetchfrom w DistributedJLFluxML.OptOutAllReduce.finished_init for w in p])
right_ids = Set([@fetchfrom w DistributedJLFluxML.OptOutAllReduce.right_id for w in p])
@test right_ids == Set(p)
test_vals = [1,2,3]
allR_fut = [@spawnat w DistributedJLFluxML.OptOutAllReduce.allReduce(+, v)
for (v,w) in zip(test_vals,p)]
if !all([isready(fut) for fut in allR_fut])
sleep(2)
end
t = @test all([isready(fut) for fut in allR_fut])
if isa(t, Test.Pass)
@test [fetch(fut) for fut in allR_fut] == [(6,3), (6,3), (6,3)]
end
test_vals = [1,:Skip,3]
allR_fut = [@spawnat w DistributedJLFluxML.OptOutAllReduce.allReduce(+, v)
for (v,w) in zip(test_vals,p)]
if !all([isready(fut) for fut in allR_fut])
sleep(2)
end
t = @test all([isready(fut) for fut in allR_fut])
if isa(t, Test.Pass)
@test [fetch(fut) for fut in allR_fut] == [(4,2), (4,2), (4,2)]
end
test_vals = [1,:Skip,:Skip]
allR_fut = [@spawnat w DistributedJLFluxML.OptOutAllReduce.allReduce(+, v)
for (v,w) in zip(test_vals,p)]
if !all([isready(fut) for fut in allR_fut])
sleep(2)
end
t = @test all([isready(fut) for fut in allR_fut])
if isa(t, Test.Pass)
@test [fetch(fut) for fut in allR_fut] == [(1,1), (1,1), (1,1)]
end
test_vals = [:Skip, :Skip,3]
allR_fut = [@spawnat w DistributedJLFluxML.OptOutAllReduce.allReduce(+, v)
for (v,w) in zip(test_vals,p)]
if !all([isready(fut) for fut in allR_fut])
sleep(2)
end
t = @test all([isready(fut) for fut in allR_fut])
if isa(t, Test.Pass)
@test [fetch(fut) for fut in allR_fut] == [(3,1), (3,1), (3,1)]
end
test_vals = [:Skip,2,:Skip]
allR_fut = [@spawnat w DistributedJLFluxML.OptOutAllReduce.allReduce(+, v)
for (v,w) in zip(test_vals,p)]
if !all([isready(fut) for fut in allR_fut])
sleep(2)
end
t = @test all([isready(fut) for fut in allR_fut])
if isa(t, Test.Pass)
@test [fetch(fut) for fut in allR_fut] == [(2,1), (2,1), (2,1)]
end
test_vals = [:Skip,:Skip,:Skip]
allR_fut = [@spawnat w DistributedJLFluxML.OptOutAllReduce.allReduce(+, v)
for (v,w) in zip(test_vals,p)]
if !all([isready(fut) for fut in allR_fut])
sleep(2)
end
t = @test all([isready(fut) for fut in allR_fut])
if isa(t, Test.Pass)
@test [fetch(fut) for fut in allR_fut] == [(nothing,0),(nothing,0),(nothing,0)]
end
end
| DistributedJLFluxML | https://github.com/asaxton/DistributedJLFluxML.jl.git |
|
[
"MIT"
] | 0.1.0 | e633f4f35195771abc47d2715e4173b19cfa0a6a | code | 1988 | @testset "evaluate model" begin
batch_size=8
@everywhere p evalDataChan = Channel(1) do ch
n_chunk = ceil(Int,size(x_array)[1]/$batch_size)
x_dat = Flux.chunk(transpose(x_array), n_chunk)
for d in x_dat
put!(ch, d)
end
put!(ch, :End)
end
@everywhere p evalDatRemChan = RemoteChannel(() -> evalDataChan, myid())
test_path = joinpath(splitpath(pathof(DistributedJLFluxML))[1:end-2]...,
"test")
trainWorkers_shift = circshift(p, 1)
# ^^^ shift workers to reuse workers as ^^^
# ^^^ remote data hosts ^^^
datRemChansDict = Dict(k => @fetchfrom w evalDatRemChan
for (k,w) in zip(p, trainWorkers_shift))
model = @fetchfrom p[1] DistributedJLFluxML.model
global res = DistributedJLFluxML.eval_model(model, datRemChansDict, p; status_chan)
y = vcat([@fetchfrom w Flux.chunk(y_array, ceil(Int,size(y_array)[2]/batch_size)) for w in trainWorkers_shift]...)
loss_f = Flux.Losses.logitcrossentropy
evalLosses = [loss_f(r,_y) for (r, _y) in zip(res, y)]
n_steps_in_batch = length(evalLosses)
_dtor_status_array = [s for s in status_array
if (s[:statusName] == "do_train_on_remote.step.grad")]
last_step = maximum([s[:step] for s in _dtor_status_array])
trainLosses = [s[:loss] for s in _dtor_status_array
if (s[:step] > (last_step - n_steps_in_batch))]
@test mean(evalLosses) < mean(trainLosses) + sqrt(r2(ols))*3 # test that eval_model used the model that we passed
max_block = maximum([length(i) for i in y])
shuf_mask = [length(i) == max_block for i in y]
shuffle_idx = collect(1:(length(y[shuf_mask])))
shuffle!(shuffle_idx)
shufEvalLosses = [loss_f(r,_y) for (r, _y) in zip(res[shuf_mask], y[shuf_mask][shuffle_idx])]
@test mean(shufEvalLosses) > mean(evalLosses) #test that eval_model consitantly agrigates w.r.t. it's input
end
| DistributedJLFluxML | https://github.com/asaxton/DistributedJLFluxML.jl.git |
|
[
"MIT"
] | 0.1.0 | e633f4f35195771abc47d2715e4173b19cfa0a6a | code | 2457 | using Test
using Distributed
using ClusterManagers
using Pkg
using Flux
using Statistics
using Random
using GLM
using DataFrames
using CSV
if Base.current_project() != nothing
proj_path = joinpath(["/",
splitpath(Base.current_project())[1:end-1]...])
p = addprocs(SlurmManager(3),
time="00:30:00",
exeflags="--project=$(proj_path)", ntasks_per_node=1)
else
p = addprocs(SlurmManager(3),
time="00:30:00",
ntasks_per_node=1)
end
ap_dir = joinpath(splitpath(Base.active_project())[1:end-1])
if "tmp" == splitpath(ap_dir)[2]
hostNames = [@fetchfrom w gethostname() for w in p]
ap_dir_list = [joinpath(ap_dir, d) for d in readdir(ap_dir)]
for (w, hn) in zip(p, hostNames)
@fetchfrom w begin
open(`mkdir $(ap_dir)`) do f
read(f, String)
end
end
end
for hn in hostNames
for fpn in ap_dir_list
open(`scp $(fpn) $(hn):$(fpn)`) do f
read(f, String)
end
end
end
end
@everywhere begin
using Pkg
Pkg.activate($(ap_dir))
# Pkg.instantiate()
end
@everywhere begin
using GLM
using DistributedJLFluxML
using Flux
using DataFrames
using CSV
end
status_chan = RemoteChannel(()->Channel{Any}(10000), myid())
status_array = []
stat_tsk = @async begin
while isopen(status_chan)
push!(status_array, take!(status_chan))
if length(status_array) > 10000
popfirst!(status_array)
end
end
end
## Load Mock data
mockData_path = joinpath(
splitpath(pathof(DistributedJLFluxML))[1:end-2]...,
"mockData", "iris.data"
)
headers = [:sepal_l, :sepal_w, :petal_l, :petal_w, :class]
__totData = CSV.read(mockData_path, DataFrame, header=headers)
_totData = [__totData[1:50, :],
__totData[51:100, :],
__totData[101:end, :]
]
totData = Dict(i=>v for (i,v) in zip(p, _totData))
@everywhere p rawData = $(totData)[myid()]
epoch_length_worker = @fetchfrom p[1] nrow(rawData)
@everywhere p labels = ["Iris-versicolor", "Iris-virginica", "Iris-setosa"]
@everywhere p x_array =
Array{Float32}(rawData[:,
[:sepal_l, :sepal_w,
:petal_l, :petal_w
]])
@everywhere p y_array =
Flux.onehotbatch(rawData[:,:class],
labels)
| DistributedJLFluxML | https://github.com/asaxton/DistributedJLFluxML.jl.git |
|
[
"MIT"
] | 0.1.0 | e633f4f35195771abc47d2715e4173b19cfa0a6a | code | 587 | using DataFrames
using CSV
using Random
using Serialization
mockData_path = joinpath(splitpath(pathof(DistributedFluxML))[1:end-2]...,"mockData")
data_fn = joinpath(mockData_path, "iris.data") #bezdekIris.data
col_names = ["sepal_l", "sepal_w", "petal_l", "petal_w", "class"]
df_tot = CSV.read(data_fn, DataFrame; header=col_names)
data_shard_fns = [joinpath(mockData_path, "iris_df_$(i).jlb") for i in 1:3]
sh_idx = collect(1:nrow(df_tot))
shuffle!(sh_idx)
for (fn, idx) in zip(data_shard_fns, [sh_idx[1:50], sh_idx[51:100], sh_idx[101:end]])
serialize(fn, df_tot[idx, :])
end
| DistributedJLFluxML | https://github.com/asaxton/DistributedJLFluxML.jl.git |
|
[
"MIT"
] | 0.1.0 | e633f4f35195771abc47d2715e4173b19cfa0a6a | code | 145 | using DistributedJLFluxML
include("globalTestInit.jl")
include("OptOutAllReduce.jl")
include("trainModel.jl")
include("evalModel.jl")
rmprocs(p)
| DistributedJLFluxML | https://github.com/asaxton/DistributedJLFluxML.jl.git |
|
[
"MIT"
] | 0.1.0 | e633f4f35195771abc47d2715e4173b19cfa0a6a | code | 2164 | @testset "train model" begin
batch_size=8
epochs = 50
@everywhere p dataChan = Channel(1) do ch
n_chunk = ceil(Int,size(x_array)[1]/$batch_size)
x_dat = Flux.chunk(transpose(x_array), n_chunk)
y_dat = Flux.chunk(y_array, n_chunk)
for epoch in 1:$epochs
for d in zip(x_dat, y_dat)
put!(ch, d)
end
end
put!(ch, :End)
end
@everywhere p datRemChan = RemoteChannel(() -> dataChan, myid())
trainWorkers_shift = circshift(p, 1)
# ^^^ shift workers to reuse workers as ^^^
# ^^^ remote data hosts ^^^
datRemChansDict = Dict(k => @fetchfrom w datRemChan for (k,w) in zip(p, trainWorkers_shift))
loss_f = Flux.Losses.logitcrossentropy
opt = Flux.Optimise.Adam(0.001)
model = Chain(Dense(4,8),Dense(8,16), Dense(16,3))
empty!(status_array)
DistributedJLFluxML.train!(loss_f, model, datRemChansDict, opt, p; status_chan)
finished_workers = Set([s[:myid] for s in status_array if s[:statusName] == "do_train_on_remote.finished"])
test_finshed_res = @test finished_workers == Set(p)
if isa(test_finshed_res, Test.Pass)
remote_params = [@fetchfrom w Flux.params(DistributedJLFluxML.model) for w in p]
θ = Flux.params(model)
@test all(θ .≈ remote_params[1])
@test all(θ .≈ remote_params[2])
@test all(θ .≈ remote_params[3])
end
global log_loss_dict = Dict(w => [(s[:step], log(s[:loss]))
for s in status_array
if s[:statusName] ==
"do_train_on_remote.step.grad" && s[:myid] == w]
for w in p)
raw_data= vcat(values(log_loss_dict)...)
raw_data_trunk = [l for l in raw_data if l[1] > epoch_length_worker*1]
data = DataFrame(raw_data_trunk)
rename!(data, [:Step, :LLoss])
data[!,:LLoss] = convert.(Float64,data[!,:LLoss])
global ols = lm(@formula(LLoss ~ Step), data)
@test coef(ols)[2] < 1e-4 # tests if loss is decaying
@test ftest(ols.model).pval < 1e-20 # tests if loss is decaying
end
| DistributedJLFluxML | https://github.com/asaxton/DistributedJLFluxML.jl.git |
|
[
"MIT"
] | 0.1.0 | e633f4f35195771abc47d2715e4173b19cfa0a6a | docs | 2817 | # jlDistributedFluxML
This package is to be used with FluxML to train, evaluate (inference), and analyze models on a distributed cluster. At the moment only the Slurm cluster manager has been tested.
## Getting started
Comming soon
### Training
These examples assumes that you have already partitioned the data into multiple `DataFrame`s and serialized them using `Serialization` package into `shard_file_list`
```
using Distributed
p = addProc(3)
@everywhere using DistributedFluxML
batch_size=8
epochs = 50
deser_fut = [@spawnat w global rawData = deserialize(f)
for (w, f) in zip(p, shard_file_list)]
for fut in deser_fut
wait(fut)
end
@everywhere p labels = ["Iris-versicolor", "Iris-virginica", "Iris-setosa"]
@everywhere p x_array =
Array(rawData[:,
[:sepal_l, :sepal_w,
:petal_l, :petal_w
]])
@everywhere p y_array =
Flux.onehotbatch(rawData[:,"class"],
labels)
@everywhere p dataChan = Channel(1) do ch
n_chunk = ceil(Int,size(x_array)[1]/$batch_size)
x_dat = Flux.chunk(transpose(x_array), n_chunk)
y_dat = Flux.chunk(y_array, n_chunk)
for epoch in 1:$epochs
for d in zip(x_dat, y_dat)
put!(ch, d)
end
end
put!(ch, :End)
end
@everywhere p datRemChan = RemoteChannel(() -> dataChan, myid())
trainWorkers_shift = circshift(p, 1)
# ^^^ shift workers to reuse workers as ^^^
# ^^^ remote data hosts ^^^
datRemChansDict = Dict(k => @fetchfrom w datRemChan for (k,w) in zip(p, trainWorkers_shift))
loss_f = Flux.Losses.logitcrossentropy
opt = Flux.Optimise.ADAM(0.001)
model = Chain(Dense(4,8),Dense(8,16), Dense(16,3))
empty!(status_array)
DistributedFluxML.train!(loss_f, model, datRemChansDict, opt, p; status_chan)
```
### Custom Gradiant Calc
Argument `grad_calc` in `train!` is meant for novel nomalization schemes. For example, if your `DataChannel` returns a 3 touple, say `(x, s, y)`, a desired grad calc coule be
```
function node_norm_grad_calc(xsy, loss_f, model; device=gpu)
x,s,y = xsy
loss_f(model(x), y, agg=sum)/s
end
```
### Opt Out All Reduce
```
using Distributed
p = addProc(3)
@everywhere using DistributedFluxML
DistributedFluxML.OptOutAllReduce.init(p)
mock_vals = [1,2,3]
allR_fut = [@spawnat w DistributedFluxML.OptOutAllReduce.allReduce(+, v)
for (v,w) in zip(mock_vals,p)]
[fetch(fut) for fut in allR_fut] # [(6,3), (6,3), (6,3)]
mock_vals = [1,:Skip,3]
allR_fut = [@spawnat w DistributedFluxML.OptOutAllReduce.allReduce(+, v)
for (v,w) in zip(mock_vals,p)]
[fetch(fut) for fut in allR_fut] # [(4,2), (4,2), (4,2)]
```
| DistributedJLFluxML | https://github.com/asaxton/DistributedJLFluxML.jl.git |
|
[
"MIT"
] | 0.2.0 | 525d055f0c6b9476e6dcf032286383ea941a395c | code | 10350 | # * Bits
module Bits
export bit, bits, bitsize, low0, low1, mask, masked, scan0, scan1, tstbit, weight
using Base: BitInteger, BitIntegerType
# * constants
const Index = Int # do not change
const Word = UInt # default integer type
"""
`INF::Int` indicates the position of the bit at "infinity", for types
which can carry an arbitrary number of bits, like BigInt.
`INF` is also used to indicate an arbitrary large number of bits.
Currently, `Bits.INF == typemax(Int)`.
"""
const INF = typemax(Index)
"""
`NOTFOUND::Int` indicates that no position matches the request, similar
to `nothing` with `findnext`. Currently, `Bits.NOTFOUND == 0`.
"""
const NOTFOUND = 0
const BitFloats = Union{Float16,Float32,Float64}
const MPFR_EXP_BITSIZE = sizeof(Clong) * 8
# * bitsize
"""
bitsize(T::Type) -> Int
bitsize(::T) -> Int
Return the number of bits that can be held by type `T`.
Only the second method may be defined when the number of bits
is a dymanic value, like for `BitFloat`.
# Examples
```jldoctest
julia> bitsize(Int32) == 32 &&
bitsize(true) == 1 &&
bitsize(big(0)) == Bits.INF &&
bitsize(1.2) == 64
true
julia> x = big(1.2); bitsize(x) == 256 + sizeof(x.exp)*8 + 1
true
```
"""
bitsize(::Type{BigInt}) = INF
bitsize(::Type{Bool}) = 1
bitsize(T::Type) = bitsize(Val(isbitstype(T)), T)
bitsize(isbits::Val{true}, T::Type) = sizeof(T) * 8
bitsize(isbits::Val{false}, T::Type) = throw(MethodError(bitsize, (T,)))
bitsize(x) = bitsize(typeof(x))
bitsize(x::BigFloat) = 1 + MPFR_EXP_BITSIZE + precision(x)
lastactualpos(x) = bitsize(x)
lastactualpos(x::BigInt) = abs(x.size) * sizeof(Base.GMP.Limb) * 8
asint(x::Integer) = x
asint(x::AbstractFloat) = reinterpret(Signed, x)
# * bit functions: weight, bit, tstbit, mask, low0, low1, scan0, scan1
# ** weight
"""
weight(x::Real) -> Int
Hamming weight of `x` considered as a binary vector.
Similarly to `count_ones(x)`, counts the number of `1` in the bit representation of `x`,
but not necessarily at the "bare-metal" level; for example `count_ones(big(-1))` errors out,
while `weight(big(-1)) == Bits.INF`, i.e. a `BigInt` is considered to be an arbitrary large
field of bits with twos complement arithmetic.
# Examples
```jldoctest
julia> weight(123)
6
julia> count(bits(123))
6
```
"""
weight(x::Real) = count_ones(x)
weight(x::BigInt) = x < 0 ? INF : count_ones(x)
# ** bit
"""
bit(x::Integer, i::Integer) -> typeof(x)
bit(x::AbstractFloat, i::Integer) -> Integer
Return the bit of `x` at position `i`, with value `0` or `1`.
If `x::Integer`, the returned bit is of the same type.
If `x::AbstractFloat` is a bits type, the returned bit is a signed integer with the same [`bitsize`](@ref) as `x`.
See also [`tstbit`](@ref).
# Examples
```jldoctest
julia> bit(0b101, 1)
0x01
julia> bit(0b101, 2)
0x00
julia> bit(-1.0, 64)
1
```
"""
bit(x::Integer, i::Integer) = (x >>> UInt(i-1)) & one(x)
bit(x::AbstractFloat, i::Integer) = bit(asint(x), i)
bit(x::Union{BigInt,BigFloat}, i::Integer) = tstbit(x, i) ? big(1) : big(0)
# ** tstbit
"""
tstbit(x::Real, i::Integer) -> Bool
Similar to [`bit`](@ref) but returns the bit at position `i` as a `Bool`.
# Examples
```jldoctest
julia> tstbit(0b101, 3)
true
```
"""
tstbit(x, i::Integer) = bit(x, i) % Bool
tstbit(x::BigInt, i::Integer) = Base.GMP.MPZ.tstbit(x, i-1)
# from Random module
using Base.GMP: Limb
const bits_in_Limb = bitsize(Limb)
const Limb_high_bit = one(Limb) << (bits_in_Limb-1)
function tstbit(x::BigFloat, i::Integer)
prec = precision(x)
if i > prec
i -= prec
if i > MPFR_EXP_BITSIZE
i == MPFR_EXP_BITSIZE + 1 ? (x.sign == -1) : false
else
tstbit(x.exp, i)
end
else
nlimbs = (prec-1) ÷ bits_in_Limb + 1
tstbit(x.d, i + nlimbs * bits_in_Limb - prec)
end
end
tstbit(p::Ptr{T}, i::Integer) where {T} =
tstbit(unsafe_load(p, 1 + (i-1) ÷ bitsize(T)),
mod1(i, bitsize(T)))
# ** mask
"""
mask(T::Type{<:Integer}:=UInt, i::Integer=bitsize(T)) -> T
Return an integer of type `T` whose `i` right-most bits are `1`, and the
others are `0` (i.e. of the form `0b0...01...1` with exactly `i` `1`s.
When `i` is not specified, all possible bits are set to `1`.
When `i < 0`, the result is not specified.
`T` defaults to `UInt`.
# Examples
```jldoctest
julia> mask(3)
0x0000000000000007
julia> mask(UInt8)
0xff
julia> bits(mask(Int32, 24))
<00000000 11111111 11111111 11111111>
```
"""
mask(::Type{T}, i::Integer) where {T} = one(T) << i - one(T)
# alternate implementation:
mask_2(T::BitIntegerType, i::Integer) = let s = bitsize(T)-i
mask(T) << s >>> s
end
mask(i::Integer) = mask(Word, i)
mask(::Type{T}=Word) where {T} = ~zero(T)
# TODO: optimize
mask(::Type{BigInt}, i::Integer) = one(BigInt) << i - 1
"""
mask(T::Type{<:Integer} := UInt, j::Integer, i::Integer) -> T
Return an integer of type `T` whose `j` right-most bits are `0`, the
following `i-j` bits are `1`, and the remaining bits are `0`
(i.e. of the form `0b0...01...10...0` with exactly `i-j` `1`s preceded by
`j` `0`s).
When `j < 0`, the result is not specified.
When `i < 0`, the result is equal to `~mask(T, j)`, i.e. of the form
`1...10...0` with exactly `j` zeros.
NOTE: unstable API, could be changed to mask(j, i-j) instead.
# Examples
```jldoctest
julia> bits(mask(UInt8, 2, 5))
<00011100>
julia> bits(mask(BigInt, 3, -1))
<...1 11111111 11111111 11111111 11111111 11111111 11111111 11111111 11111000>
```
"""
mask(::Type{T}, j::Integer, i::Integer) where {T} = mask(T, i-j) << j
# alternate implementation
mask_2(::Type{T}, j::Integer, i::Integer) where {T} = mask(T, i) & ~mask(T, j)
mask(j::Integer, i::Integer) = mask(Word, j, i)
# ** masked
"""
masked(x, [j::Integer], i::Integer) -> typeof(x)
Return the result of applying the mask `mask(x, [j], i)` to `x`, i.e.
`x & mask(x, [j], i)`.
If `x` is a float, apply the mask to the underlying bits.
# Examples
```jldoctest
julia> masked(0b11110011, 1, 5) === 0b00010010
true
julia> x = rand(); masked(-x, 0, 63) === x
true
```
"""
masked(x, i::Integer) = x & mask(typeof(x), i)
masked(x, j::Integer, i::Integer) = x & mask(typeof(x), j, i)
masked(x::AbstractFloat, i::Integer) = reinterpret(typeof(x), masked(asint(x), i))
masked(x::AbstractFloat, j::Integer, i::Integer) = reinterpret(typeof(x), masked(asint(x), j, i))
# ** low0 & low1, scan0 & scan1
"""
low0(x, n::Integer=1)
low1(x, n::Integer=1)
Return the position of the `n`th `0` (for `low0`) or `1` (for `low1`) in `x.
# Examples
```jldoctest
julia> low0(0b10101, 2)
4
julia> low1(0b10101, 4) == Bits.NOTFOUND
true
```
"""
low0, low1
low0(x) = scan0(x)
low1(x) = scan1(x)
low0(x, n::Integer) = low1(~asint(x), n)
function low1(x, n::Integer)
i = 0
while n > 0
i = scan1(x, i+1)
i === 0 && break
n -= 1
end
i
end
"""
scan0(x, n::Integer=1)
scan1(x, n::Integer=1)
Return the position of the first `0` (for `scan0`) or `1` (for `scan1`) after or including `n` in `x`.
# Examples
```jldoctest
julia> scan0(0b10101, 1)
2
julia> scan1(0b10101, 6) == Bits.NOTFOUND
true
```
"""
scan0, scan1
scan0(x, i::Integer=1) = scan1(~asint(x), i)
function scan1(x, i::Integer=1)
i < 1 && return NOTFOUND
y = asint(x) >>> (i % UInt - 1)
iszero(y) ? NOTFOUND : i + trailing_zeros(y)
end
@assert NOTFOUND === 0
# unfortunately, in Base.GMP.MPZ the wrapper converts to Int and fails for big(-1) or big(0)
scan0(x::BigInt, i::Integer=1) = 1 + ccall((:__gmpz_scan0, :libgmp), Culong, (Ref{BigInt}, Culong), x, i % Culong - 1) % Int
scan1(x::BigInt, i::Integer=1) = 1 + ccall((:__gmpz_scan1, :libgmp), Culong, (Ref{BigInt}, Culong), x, i % Culong - 1) % Int
# * bits & BitVector1
"""
bits(x::Real)
Create an immutable view on the bits of `x` as a vector of `Bool`, similar to a `BitVector`.
If `x` is a `BigInt`, the vector has length [`Bits.INF`](@ref).
Currently, no bounds check is performed when indexing into the vector.
# Examples
```jldoctest
julia> v = bits(Int16(2^8+2^4+2+1))
<00000001 00010011>
julia> permutedims([v[i] for i in 8:-1:1])
1×8 Array{Bool,2}:
false false false true false false true true
julia> bits(true)
<1>
julia> bits(big(2)^63)
<...0 10000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000>
julia> bits(Float32(-7))
<1|10000001|1100000 00000000 00000000>
julia> ans[1:23] # creates a vector of bits with a specific length
<1100000 00000000 00000000>
```
"""
bits(x::Real) = BitVector1(x)
# ** BitVector1
# similar to a BitVector, but with only 1 word to store bits (instead of 1 array thereof)
abstract type AbstractBitVector1 <: AbstractVector{Bool} end
struct BitVector1{T<:Real} <: AbstractBitVector1
x::T
end
struct BitVector1Mask{T<:Real} <: AbstractBitVector1
x::T
len::Int
end
Base.size(v::BitVector1) = (bitsize(v.x),)
Base.size(v::BitVector1Mask) = (v.len,)
Base.getindex(v::AbstractBitVector1, i::Integer) = tstbit(v.x, i)
function Base.getindex(v::AbstractBitVector1, a::AbstractVector{<:Integer})
xx, _ = foldl(a, init=(zero(asint(v.x)), 0)) do xs, i
x, s = xs
(x | bit(v.x, i) << s, s+1)
end
BitVector1Mask(xx, length(a))
end
function Base.getindex(v::AbstractBitVector1, a::AbstractUnitRange{<:Integer})
j, i = extrema(a)
x = masked(asint(v.x), j-1, i) >> (j-1)
BitVector1Mask(x, length(a))
end
# ** show
sig_exp_bits(x) = Base.Math.significand_bits(typeof(x)), Base.Math.exponent_bits(typeof(x))
sig_exp_bits(x::BigFloat) = precision(x), MPFR_EXP_BITSIZE
showsep(io, x, i) = (i % 8 == 0) && print(io, ' ')
function showsep(io, x::AbstractFloat, i)
sigbits, expbits = sig_exp_bits(x)
if i == sigbits || i == sigbits + expbits
print(io, '|')
elseif i < sigbits && i % 8 == 0 || i > sigbits && (i-sigbits) % 8 == 0
print(io, ' ')
end
end
function Base.show(io::IO, v::AbstractBitVector1)
if v.x isa BigInt && v isa BitVector1
print(io, "<...", v.x < 0 ? "1 " : "0 ")
else
print(io, "<")
end
l = v isa BitVector1 ? lastactualpos(v.x) : v.len
for i = l:-1:1
i != l && showsep(io, v.x, i)
show(io, v[i] % Int)
end
print(io, ">")
end
Base.show(io::IO, ::MIME"text/plain", v::AbstractBitVector1) = show(io, v)
end # module
| Bits | https://github.com/rfourquet/Bits.jl.git |
|
[
"MIT"
] | 0.2.0 | 525d055f0c6b9476e6dcf032286383ea941a395c | code | 6885 | using Bits, Test
using Bits: NOTFOUND
x ≜ y = typeof(x) == typeof(y) && x == y
@testset "bitsize" begin
for T in (Base.BitInteger_types..., Float16, Float32, Float64)
@test bitsize(T) === sizeof(T) * 8
@test bitsize(zero(T)) === bitsize(one(T)) === bitsize(T)
end
@test bitsize(BigInt) === Bits.INF
@test bitsize(Bool) === 1
@test bitsize(Float64) === 64
@test bitsize(Float32) === 32
@test bitsize(Float16) === 16
@test_throws MethodError bitsize(BigFloat)
@test bitsize(BigFloat(1, 256)) == 321
@test bitsize(BigFloat(1, 100)) == 165
end
@testset "bit functions" begin
@testset "weight" begin
for T = (Base.BitInteger_types..., BigInt)
@test weight(T(123)) === 6
T == BigInt && continue
@test weight(typemax(T)) === bitsize(T) - (T <: Signed)
@test weight(typemin(T)) === Int(T <: Signed)
end
@test weight(big(-1)) === weight(big(-999)) === Bits.INF
end
@testset "bit & tstbit" begin
val(::typeof(bit), x) = x
val(::typeof(tstbit), x) = x % Bool
for _bit = (bit, tstbit)
for T = (Base.BitInteger_types..., BigInt)
T0, T1 = T(0), T(1)
@test _bit(T(0), rand(1:bitsize(T))) ≜ val(_bit, T0)
@test _bit(T(1), 1) ≜ val(_bit, T1)
@test _bit(T(1), 2) ≜ val(_bit, T0)
@test _bit(T(5), 1) ≜ val(_bit, T1)
@test _bit(T(5), 2) ≜ val(_bit, T0)
@test _bit(T(5), 3) ≜ val(_bit, T1)
@test _bit(T(5), 4) ≜ val(_bit, T0)
end
@test _bit( 1.0, 64) ≜ val(_bit, 0)
@test _bit(-1.0, 64) ≜ val(_bit, 1)
@test _bit( Float32(1.0), 32) ≜ val(_bit, Int32(0))
@test _bit(-Float32(1.0), 32) ≜ val(_bit, Int32(1))
x = BigFloat(-1.0, 128)
for i=1:128+65
@test _bit(x, i) == val(_bit, big(i ∈ [128, 129, 128+65]))
end
end
end
@testset "mask" begin
for T = Base.BitInteger_types
i = rand(0:min(999, bitsize(T)))
@test mask(T, i) ≜ Bits.mask_2(T, i)
@test mask(T) ≜ -1 % T
end
i = rand(0:bitsize(Bits.Word))
@test mask(Bits.Word, i) === mask(i)
@test mask(Bits.Word) === mask()
@test mask(0) == 0
@test mask(1) == 1
@test mask(Sys.WORD_SIZE) === mask() === 0xffffffffffffffff
@test mask(UInt64, 64) == mask(64)
@test mask(UInt64, 63) == 0x7fffffffffffffff
@test mask(BigInt, 0) ≜ big(0)
@test mask(BigInt, 1) ≜ big(1)
@test mask(BigInt, 1024) ≜ big(2)^1024 - 1
# 2-arg mask
for T = (Base.BitInteger_types..., BigInt)
j, i = minmax(rand(0:min(999, bitsize(T)), 2)...)
m = mask(T, j, i)
@test m ≜ Bits.mask_2(T, j, i)
if T === Bits.Word
@test m === mask(j, i)
end
@test count_ones(m) == i-j
@test m >>> j ≜ mask(T, i-j)
@test mask(T, j, -1) ≜ ~mask(T, j)
end
@test mask(UInt64, 2, 4) === 0x000000000000000c
end
@testset "masked" begin
for T = (Base.BitInteger_types..., BigInt)
j, i = minmax(rand(0:min(999, bitsize(T)), 2)...)
@test masked(mask(T), j) ≜ mask(T, j)
@test masked(mask(T), j, i) ≜ mask(T, j, i)
@test masked(mask(T, i), i) ≜ mask(T, i)
@test masked(mask(T, i), j, i) ≜ mask(T, j, i)
T == BigInt && continue
x = rand(T)
@test masked(x, j) ≜ x & mask(T, j)
@test masked(x, j, i) ≜ x & mask(T, j, i)
end
@test masked(0b11110011, 1, 5) ≜ 0b00010010
@test masked(-1.0, 52, 63) === 1.0
end
@testset "low0, low1, scan0, scan1" begin
for T = (Base.BitInteger_types..., BigInt)
x = T(0b01011010)
@test low1(x, 0) == NOTFOUND
@test low1(x) == 2
@test low1(x, 1) == 2
@test low1(x, 2) == 4
@test low1(x, 3) == 5
@test low1(x, 4) == 7
for i = 5:min(128, bitsize(T))+1
@test low1(x, i) == NOTFOUND
end
@test low0(x, 0) == NOTFOUND
@test low0(x) == 1
@test low0(x, 1) == 1
@test low0(x, 2) == 3
@test low0(x, 3) == 6
@test low0(x, 4) == 8
for i = 5:min(128, bitsize(T))
@test low0(x, i) == (i+4 <= bitsize(T) ? i+4 : NOTFOUND)
end
@test scan1(x, 0) == NOTFOUND
@test scan1(x, 1) == 2
@test scan1(x) == 2
@test scan1(x, 2) == 2
@test scan1(x, 3) == 4
@test scan1(x, 4) == 4
@test scan1(x, 5) == 5
@test scan1(x, 6) == 7
@test scan1(x, 7) == 7
for i = 8:min(128, bitsize(T))+1
@test scan1(x, i) == NOTFOUND
end
@test scan0(x, 0) == NOTFOUND
@test scan0(x, 1) == 1
@test scan0(x) == 1
@test scan0(x, 2) == 3
@test scan0(x, 3) == 3
@test scan0(x, 4) == 6
@test scan0(x, 5) == 6
@test scan0(x, 6) == 6
@test scan0(x, 7) == 8
@test scan0(x, 8) == 8
for i = 9:min(128, bitsize(T))
@test scan0(x, i) == i
end
T === BigInt && continue
@test scan0(x, bitsize(T)+1) == NOTFOUND
end
end
end
@testset "bits" begin
for T in Base.BitInteger_types
v = bits(one(T))
@test length(v) == bitsize(T)
@test v[1] === true
@test count(v) == 1
v = bits(zero(T))
@test length(v) == bitsize(T)
@test v[1] === false
@test count(v) == 0
end
v = bits(7)
@test v[1] === v[2] === v[3] === true
@test count(v) == 3
for T = (Int64, UInt64)
v = bits(T(2)^63)
@test v[64] === true
@test count(v) == 1
end
@test bits(Float64(-16))[53:end] == [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
@test count(bits(Float64(-16))) == 4
@testset "array indexing" begin
v = bits(1234)[1:8]
@test v == [0, 1, 0, 0, 1, 0, 1, 1]
@test v isa Bits.BitVector1Mask
v = bits(1.2)[10:17]
@test v == [1, 0, 0, 1, 1, 0, 0, 1]
@test v isa Bits.BitVector1Mask{Int64}
@test all(bits(123)[[1, 2, 4, 5, 6, 7]])
@test count(bits(123)) == 6
# test optimization for v[i:j]
for T = (Base.BitInteger_types..., BigInt)
i, j = minmax(rand(1:min(999, bitsize(T)), 2)...)
v = bits(rand(T == BigInt ? (big(-999):big(999)) : T))
@test v[i:j] == v[i:1:j]
end
end
end
| Bits | https://github.com/rfourquet/Bits.jl.git |
|
[
"MIT"
] | 0.2.0 | 525d055f0c6b9476e6dcf032286383ea941a395c | docs | 841 | # Bits
[](https://travis-ci.org/rfourquet/Bits.jl)
This package implements functions to play with bits, of integers, and of floats to a certain extent.
For example:
```julia
julia> bits(0b110101011)
<00000001 10101011>
julia> ans[1:4]
<1011>
```
Currently, the following functions are exported:
`bit`, `bits`, `bitsize`, `low0`, `low1`, `mask`, `masked`, `scan0`, `scan1`, `tstbit`, `weight`.
They have a docstring, but no HTML documentation is available yet.
In these functions, the right-most bit of a value has index `1`, but in some applications it's more natural for it to have index `0`.
So the functions will likely be also implemented with indexes starting at `0`, and both alternatives will be available.
It's possible that the default will be changed.
| Bits | https://github.com/rfourquet/Bits.jl.git |
|
[
"MIT"
] | 0.1.0 | a3bd92d3cd222cd5cf9aa602ffe842faa909773a | code | 2282 | module AsmMacro
export @asm
function gen_arg(args, arg::Expr)
if arg.head === :ref
string(arg.args[2], "(", gen_arg(args, arg.args[1]), ")")
elseif arg.head === :macrocall
string(".", string(arg.args[1])[2:end])
else
error("?!! $arg")
end
end
function gen_arg(args, arg::Symbol)
idx = findfirst(isequal(arg), args)
idx === nothing && return string("%", arg)
"\$$(idx-1)"
end
# TODO add more of those
typ_to_llvm(::Type{Float64}) = "double"
typ_to_llvm(::Type{Float32}) = "float"
typ_to_llvm(::Type{Int32}) = "i32"
typ_to_llvm(::Type{Int64}) = "i64"
typ_to_llvm(::Type{Ptr{T}}) where {T} = typ_to_llvm(Int)
const DEBUG_ASM = Ref(false)
function gen_asm(args, xs)
io = IOBuffer()
argnames = Symbol[]
typs = []
for a in args
isa(a,Expr) && a.head === :(::) || error("invalid arg sig $a")
typ = eval(a.args[2])
push!(argnames,a.args[1])
push!(typs,typ)
end
println(io, "call void asm \"")
for ex in xs
isa(ex, LineNumberNode) && continue
isa(ex, Expr) && ex.head === :line && continue
if isa(ex,Expr)
if ex.head === :call
op = string(ex.args[1])
opargs = join(map(a -> gen_arg(argnames, a), ex.args[2:end]), ", ")
println(io, op, " ", opargs)
elseif ex.head === :macrocall
println(io, ".", string(ex.args[1])[2:end], ":")
else
dump(ex)
error("unknown expr $ex")
end
else
error("??? $(typeof(ex))")
end
end
llvmtypes = map(typ_to_llvm, typs)
for i = 1:length(llvmtypes)
llvmtypes[i] = string(llvmtypes[i], " %", i-1)
end
constr = map(_ -> "r", llvmtypes)
println(io, "\",\"", join(constr, ","), "\"(", join(llvmtypes, ", "), ")")
println(io, "ret void")
asm = String(take!(io))
DEBUG_ASM[] && println(asm)
Expr(:call, GlobalRef(Base, :llvmcall), asm, Cvoid, Tuple{typs...}, args...)
end
macro asm(f)
@assert f.head === :function
sig = f.args[1]
@assert sig.head === :call
body = f.args[2]
@assert body.head === :block
body.args = Any[gen_asm(sig.args[2:end], body.args)]
esc(f)
end
end # module
| AsmMacro | https://github.com/YingboMa/AsmMacro.jl.git |
|
[
"MIT"
] | 0.1.0 | a3bd92d3cd222cd5cf9aa602ffe842faa909773a | code | 1336 | using AsmMacro, Test
@testset "@asm" begin
@asm function add_loop_vec2(x::Ptr{Float64},n::Int,z::Ptr{Float64})
movq(n, rcx)
movapd(x[0], xmm0)
xorpd(xmm1,xmm1)
@loop
addpd(xmm0,xmm1)
dec(rcx)
jnz(@loop)
movapd(xmm1, z[0])
end
x = [1.0,2.0]
n = 10
z = similar(x)
add_loop_vec2(pointer(x),n,pointer(z))
@test z == x*n
@asm function add(z::Ptr{Int64}, x::Int64, y::Int64)
addq(x, y)
movq(y, z[0])
end
z = Int64[100]
add(pointer(z), Int64(1), Int64(2))
@test z[1] === Int64(3)
@asm function add(z::Ptr{Int32}, x::Int32, y::Int32)
addl(x, y)
movl(y, z[0])
end
z = Int32[100]
add(pointer(z), Int32(1), Int32(2))
@test z[1] === Int32(3)
@asm function add(z::Ptr{Float64}, x::Float64, y::Float64)
vaddsd(xmm0, xmm1, xmm1)
movq(xmm1, z[0])
end
z = Float64[100]
add(pointer(z), Float64(1), Float64(2))
@test z[1] === Float64(3.0)
@asm function add(z::Ptr{Float32}, x::Float32, y::Float32)
vaddss(xmm0, xmm1, xmm1)
movq(xmm1, z[0])
end
z = Float32[100]
add(pointer(z), Float32(1), Float32(2))
@test z[1] === Float32(3.0)
@test_throws Any (@eval @asm function add(x::Float64)
___
end)
end
| AsmMacro | https://github.com/YingboMa/AsmMacro.jl.git |
|
[
"MIT"
] | 0.1.0 | a3bd92d3cd222cd5cf9aa602ffe842faa909773a | docs | 1283 | # AsmMacro.jl
[](https://travis-ci.org/YingboMa/AsmMacro.jl)
[](https://codecov.io/gh/YingboMa/AsmMacro.jl)
`AsmMacro.jl` provides a relatively simple way to write assembly code in Julia.
## Examples
```julia
using AsmMacro
# z[1:4] <- x[1:4] + y[1:4]
@asm function add_avx256(x::Ptr{Float64},y::Ptr{Float64},z::Ptr{Float64})
vmovupd(x[0], ymm0)
vmovupd(y[0], ymm1)
vaddpd(ymm0, ymm1, ymm1)
vmovupd(ymm1, z[0])
end
x = [1.0,2.0,3.0,4.0]
y = [4.0,3.0,2.0,1.0]
z = similar(x)
add_avx256(pointer(x),pointer(y),pointer(z))
julia> z
4-element Array{Float64,1}:
5.0
5.0
5.0
5.0
```
```julia
using AsmMacro
# z[1:2] <- x[1:2]*n (with a loop)
@asm function add_loop_sse(x::Ptr{Float64},n::Int,z::Ptr{Float64})
movq(n, rcx)
movapd(x[0], xmm0)
xorpd(xmm1,xmm1)
@loop
addpd(xmm0,xmm1)
dec(rcx)
jnz(@loop)
movapd(xmm1, z[0])
end
x = [1.0,2.0]
n = 10
z = similar(x)
add_loop_sse(pointer(x),n,pointer(z))
julia> z
2-element Array{Float64,1}:
10.0
20.0
```
## Acknowledgement
This package is based on the original code by [Oscar Blumberg](https://github.com/carnaval).
| AsmMacro | https://github.com/YingboMa/AsmMacro.jl.git |
|
[
"MIT"
] | 0.4.2 | 4d90e518619265f7e97d16fdb6aa6af492d82285 | code | 3972 | module ScanDir
export scandir, scandirtree, DirEntry
import Base.Filesystem: uv_dirent_t
import Base: uv_error, _sizeof_uv_fs
module PosixFileTypes
@enum PosixFileType Unknown File Directory Link FIFO Socket CharDev BlockDev
end
using .PosixFileTypes: PosixFileType
struct DirEntry
name::String
path::String
type::PosixFileType
end
function Base.show(io::IO, ::MIME"text/plain", e::DirEntry)
print(io, "<", lowercase(string(e.type)), " ", repr(e.path), ">")
end
_islink(e::DirEntry) = e.type == PosixFileTypes.Link
for (i,s) in enumerate((:isfile, :isdir, :islink, :isfifo, :issocket, :ischardev, :isblockdev))
@eval Base.Filesystem.$s(e::DirEntry) = e.type == PosixFileType($i) || _islink(e) && $s(e.path)
end
filename(e::DirEntry) = e.name
# Implementation copied from Base.readdir and modified to return DirEntry's
function scandir(dir::AbstractString="."; sort=true)
# Allocate space for uv_fs_t struct
req = Libc.malloc(_sizeof_uv_fs)
try
# defined in sys.c, to call uv_fs_readdir, which sets errno on error.
err = ccall(:uv_fs_scandir, Int32, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Cint, Ptr{Cvoid}),
C_NULL, req, dir, 0, C_NULL)
err < 0 && uv_error("readdir($(repr(dir)))", err)
# iterate the listing into entries
entries = DirEntry[]
ent = Ref{uv_dirent_t}()
while Base.UV_EOF != ccall(:uv_fs_scandir_next, Cint, (Ptr{Cvoid}, Ptr{uv_dirent_t}), req, ent)
ent_name = unsafe_string(ent[].name)
ent_path = joinpath(dir, ent_name)
push!(entries, DirEntry(ent_name, ent_path, PosixFileType(ent[].typ)))
end
# Clean up the request string
# on newer julia versions this can be: Base.Filesystem.uv_fs_req_cleanup(req)
ccall(:uv_fs_req_cleanup, Cvoid, (Ptr{Cvoid},), req)
# sort entries unless opted out
sort && sort!(entries; by=filename)
return entries
finally
Libc.free(req)
end
end
_channel_try_io(f, channel, onerror) = try
f()
catch err
isa(err, Base.IOError) || rethrow()
try
onerror(err)
catch err2
close(channel, err2)
end
return
end
# Implementation copied from Base.walkdir and modified to use scandir,
# to avoid unnecessary stat()ing
function scandirtree(root="."; topdown=true, follow_symlinks=false, onerror=throw, prune=_->false)
function _scandirtree(chnl, root)
isfilelike(e) = (!follow_symlinks && islink(e)) || !isdir(e)
tryf(f, p) = _channel_try_io(()->f(p), chnl, onerror)
content = tryf(scandir, root)
content === nothing && return
dirs = DirEntry[]
files = DirEntry[]
for entry in content
prune(entry) && continue
filelike = tryf(isfilelike, entry)
filelike === nothing && return
push!(filelike ? files : dirs, entry)
end
if topdown
push!(chnl, (; root, dirs, files))
end
for dir in dirs
_scandirtree(chnl, joinpath(root, dir.name))
end
if !topdown
push!(chnl, (; root, dirs, files))
end
nothing
end
TreeEntry = NamedTuple{(:root, :dirs, :files), Tuple{String, Vector{DirEntry}, Vector{DirEntry}}}
return Channel{TreeEntry}(chnl -> _scandirtree(chnl, root))
end
function walkdir(root="."; topdown=true, follow_symlinks=false, onerror=throw, prune=_->false)
scan_channel = scandirtree(root; topdown, follow_symlinks, onerror, prune)
WalkdirEntry = NamedTuple{(:root, :dirs, :files), Tuple{String, Vector{String}, Vector{String}}}
return Channel{WalkdirEntry}() do channel
_channel_try_io(channel, onerror) do
for (root, dirs, files) in scan_channel
push!(channel, (; root, dirs = [e.name for e in dirs], files = [e.name for e in files]))
end
end
end
end
end # module
| ScanDir | https://github.com/yha/ScanDir.jl.git |
|
[
"MIT"
] | 0.4.2 | 4d90e518619265f7e97d16fdb6aa6af492d82285 | code | 5963 | using Test
using ScanDir
# Tests copied and modified from julia's test/file.jl
dirwalk = mktempdir()
cd(dirwalk) do
for i=1:2
mkdir("sub_dir$i")
open("file$i", "w") do f end
mkdir(joinpath("sub_dir1", "subsub_dir$i"))
touch(joinpath("sub_dir1", "file$i"))
end
touch(joinpath("sub_dir2", "file_dir2"))
has_symlinks = !Sys.iswindows() || (Sys.windows_version() >= Sys.WINDOWS_VISTA_VER)
follow_symlink_vec = has_symlinks ? [true, false] : [false]
has_symlinks && symlink(abspath("sub_dir2"), joinpath("sub_dir1", "link"))
for prune_subsub in [false, true]
prune = prune_subsub ? e->startswith(e.name,"subsub") : _->false
subsubs = prune_subsub ? [] : ["subsub_dir1", "subsub_dir2"]
for follow_symlinks in follow_symlink_vec
chnl = ScanDir.walkdir(".", follow_symlinks=follow_symlinks, prune=prune)
root, dirs, files = take!(chnl)
@test root == "."
@test dirs == ["sub_dir1", "sub_dir2"]
@test files == ["file1", "file2"]
root, dirs, files = take!(chnl)
@test root == joinpath(".", "sub_dir1")
if has_symlinks
if follow_symlinks
@test dirs == ["link"; subsubs]
@test files == ["file1", "file2"]
else
@test dirs == subsubs
@test files == ["file1", "file2", "link"]
end
else
@test dirs == subsubs
@test files == ["file1", "file2"]
end
root, dirs, files = take!(chnl)
if follow_symlinks
@test root == joinpath(".", "sub_dir1", "link")
@test dirs == []
@test files == ["file_dir2"]
root, dirs, files = take!(chnl)
end
if !prune_subsub
for i=1:2
@test root == joinpath(".", "sub_dir1", "subsub_dir$i")
@test dirs == []
@test files == []
root, dirs, files = take!(chnl)
end
end
@test root == joinpath(".", "sub_dir2")
@test dirs == []
@test files == ["file_dir2"]
@test !isready(chnl)
end
for follow_symlinks in follow_symlink_vec
chnl = ScanDir.walkdir(".", follow_symlinks=follow_symlinks, topdown=false, prune=prune)
root, dirs, files = take!(chnl)
if follow_symlinks
@test root == joinpath(".", "sub_dir1", "link")
@test dirs == []
@test files == ["file_dir2"]
root, dirs, files = take!(chnl)
end
if !prune_subsub
for i=1:2
@test root == joinpath(".", "sub_dir1", "subsub_dir$i")
@test dirs == []
@test files == []
root, dirs, files = take!(chnl)
end
end
@test root == joinpath(".", "sub_dir1")
if has_symlinks
if follow_symlinks
@test dirs == ["link"; subsubs]
@test files == ["file1", "file2"]
else
@test dirs == subsubs
@test files == ["file1", "file2", "link"]
end
else
@test dirs == subsubs
@test files == ["file1", "file2"]
end
root, dirs, files = take!(chnl)
@test root == joinpath(".", "sub_dir2")
@test dirs == []
@test files == ["file_dir2"]
root, dirs, files = take!(chnl)
@test root == "."
@test dirs == ["sub_dir1", "sub_dir2"]
@test files == ["file1", "file2"]
@test !isready(chnl)
end
end
# These subtly depend on timing, so removed for now
# TODO need better way to test onerror
# #test of error handling
# chnl_error = ScanDir.walkdir(".")
# chnl_noerror = ScanDir.walkdir(".", onerror=x->x)
# root, dirs, files = take!(chnl_error)
# #@show root
# @test root == "."
# @test dirs == ["sub_dir1", "sub_dir2"]
# @test files == ["file1", "file2"]
# rm(joinpath("sub_dir1"), recursive=true)
# @test_throws Base.IOError take!(chnl_error) # throws an error because sub_dir1 do not exist
# root, dirs, files = take!(chnl_noerror)
# @test root == "."
# @test dirs == ["sub_dir1", "sub_dir2"]
# @test files == ["file1", "file2"]
# root, dirs, files = take!(chnl_noerror) # skips sub_dir1 as it no longer exist
# @test root == joinpath(".", "sub_dir2")
# @test dirs == []
# @test files == ["file_dir2"]
# Test that symlink loops don't cause errors
if has_symlinks
mkdir(joinpath(".", "sub_dir3"))
foo = joinpath(".", "sub_dir3", "foo")
# this symlink requires admin privileges on Windows:
symlink_err = false
try
symlink("foo", foo)
catch e
symlink_err = true
showerror(stderr, e); println(stderr)
@warn "Could not create symlink. Symlink loop tests skipped."
end
if !symlink_err
let files = ScanDir.walkdir(joinpath(".", "sub_dir3"); follow_symlinks=true)
# wording of this error differs on julia 1.5, so just check for IOError
#@test_throws Base._UVError("stat($(repr(foo)))", Base.UV_ELOOP) take!(files)
@test_throws Base.IOError take!(files)
end
root, dirs, files = take!(ScanDir.walkdir(joinpath(".", "sub_dir3"); follow_symlinks=false))
@test root == joinpath(".", "sub_dir3")
@test dirs == []
@test files == ["foo"]
end
end
end
rm(dirwalk, recursive=true)
| ScanDir | https://github.com/yha/ScanDir.jl.git |
|
[
"MIT"
] | 0.4.2 | 4d90e518619265f7e97d16fdb6aa6af492d82285 | docs | 2762 | # ScanDir.jl
*Faster reading of directories*
This package provides two functions:
- `scandir`, which returns a vector of `DirEntry` objects, each specifying a filename and a type (file, directory, link etc.).
- `ScanDir.walkdir`, which is a faster version of `Base.walkdir`, using `scandir` to avoid unnecessary `stat` calls.
Julia's builtin `readdir` function returns filenames in a directory but discards the type information returned from the underlying `libuv` function call.
The `scandir` function exposes this information in the `DirEntry` struct.
The name `scandir` was chosen to parallel python's `os.scandir`, which offers similar functionality.
Benchmarks of `ScanDir.walkdir` on one Windows machine have shown a speedup factor of 4\~4.5 on a local drive, and 30\~35 (!) on a network-mapped drive, compared to `Base.walkdir`.
## Usage
`scandir(path::AbstractString=".")` returns a vector of `DirEntry`.
Each `DirEntry`'s filename is accessible via the `name` field.
Its type can be queried by the standard `Base` functions (`isfile`, `isdir`, `islink`, `isfifo`, `issocket`, `ischardev`, `isblockdev`). This will only call `stat` if necessary -- which happens if the entry is a symlink and the type of the target needs to be determined. In this case, the link is followed from the path supplied to `scandir`, so the result may depend on the working directory if that path is relative.
```julia
julia> dir = mktempdir();
julia> cd(dir)
julia> mkdir("subdir");
julia> touch("file");
julia> symlink("subdir", "link")
julia> entries = scandir()
3-element Array{ScanDir.DirEntry,1}:
ScanDir.DirEntry("file", ".\\file", 1)
ScanDir.DirEntry("link", ".\\link", 3)
ScanDir.DirEntry("subdir", ".\\subdir", 2)
julia> isdir.(entries) # triggers `stat` call for "link" only
3-element BitArray{1}:
0
1
1
```
`ScanDir.walkdir` is a faster implementation of `Base.walkdir` (https://docs.julialang.org/en/v1/base/file/#Base.Filesystem.walkdir), and has a compatible interface. Its interface differs from `Base.walkdir` in two ways:
- it returns named tuples (root=..., dirs=..., file=...)
- it supports a `prune` keyword argument to filter the returned contents.
```julia
julia> touch("subdir/file2");
julia> mkdir("subdir/skipme");
julia> touch("subdir/skipme/file3");
julia> collect(ScanDir.walkdir("."))
3-element Array{Any,1}:
(root = ".", dirs = ["subdir"], files = ["file", "link"])
(root = ".\\subdir", dirs = ["skipme"], files = ["file2"])
(root = ".\\subdir\\skipme", dirs = String[], files = ["file3"])
julia> collect(ScanDir.walkdir(".", prune = e->startswith(e.name, "skip")))
2-element Array{Any,1}:
(root = ".", dirs = ["subdir"], files = ["file", "link"])
(root = ".\\subdir", dirs = String[], files = ["file2"])
```
| ScanDir | https://github.com/yha/ScanDir.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 10994 | #TODO add precompile statements
"""
kmeans(data::PopData; k::Int64, iterations::Int64 = 100, matrixtype::Symbol = :pca)
Perform Kmeans clustering (using Kmeans++) on a `PopData` object. Returns a `KmeansResult`
object. Use the keyword argument `iterations` (default: 100) to set the maximum number of iterations allowed to
achieve convergence. Interally, kmeans clustering is performed on either the principal components of the scaled allele frequencies,
or just the scaled allele frequencies themselves. In both cases, `missing` values are replaced by the global mean allele frequency.
**Keyword Arguments**
- `k`: the number of desired clusters, given as an `Integer`
- `iterations::Int64`: the maximum number of iterations to attempt to reach convergence (default: `100`)
- `matrixtype`: type of input matrix to compute (default: `:pca`)
- `:pca`: matrix of Principal Components
- `:freq`: matrix of scaled allele frequencies
**Example**
```julia
julia> cats = @nancycats ;
julia> km = kmeans(cats, k = 2)
```
"""
function kmeans(data::PopData; k::Int64, iterations::Int64 = 100, matrixtype::Symbol = :pca)
mtx = matrixtype == :pca ?
pca(data, center = false, scale = true).proj |> permutedims : matrixtype == :freq ?
matrix(data, center = false, scale = true) :
throw(ArgumentError("matrixtype :$matrixtype invalid, choose between :pca or :freq"))
kmeans(mtx, k, maxiter = iterations)
end
"""
kmedoids(data::PopData; k::Int64, iterations::Int64 = 100, distance::PreMetric = euclidean, matrixtype::Symbol = :pca)
Perform Kmedoids clustering on a `PopData` object. Returns a `KmedoidsResult`
object. Use the keyword argument `iterations` (default: 100) to set the maximum number of iterations allowed to
achieve convergence. Interally, kmeans clustering is performed on either the principal components of the scaled allele frequencies,
or just the scaled allele frequencies themselves. In both cases, `missing` values are replaced by the global mean allele frequency.
**Keyword Arguments**
- `k`: the number of desired clusters, given as an `Integer`
- `iterations::Int64`: the maximum number of iterations to attempt to reach convergence (default: `100`)
- `distance`: type of distance matrix to calculate on `matrixtype` (default: `euclidean`)
- see [Distances.jl](https://github.com/JuliaStats/Distances.jl) for a list of options (e.g. sqeuclidean, etc.)
- `matrixtype`: type of input matrix to compute (default: `:pca`)
- `:pca`: matrix of Principal Components
- `:freq`: matrix of scaled allele frequencies
"""
function kmedoids(data::PopData; k::Int64, iterations::Int64 = 100, distance::PreMetric = euclidean, matrixtype::Symbol = :pca)
mtx = matrixtype == :pca ?
pairwise(distance, pca(data, center = false, scale = true).proj |> permutedims, dims = 2) : matrixtype == :freq ?
pairwise(distance, matrix(data, center = false, scale = true), dims = 1) :
throw(ArgumentError("matrixtype :$matrixtype invalid, choose between :pca or :freq"))
kmedoids(mtx, k, maxiter = iterations)
end
"""
hclust(data::PopData; linkage::Symbol = :single, branchorder::Symbol = :r, distance::PreMetric = euclidean, matrixtype::Symbol = :pca)
An expansion of `Clustering.hclust` (from Clustering.jl) to perform hierarchical clustering on a PopData object. This is a convenience method
which converts the `PopData` object to either an allele frequency or PCA matrix, converts that into a distance matrix, and performs hierarchical
clustering on that distance matrix. Returns an `Hclust` object, which contains many metrics but does not include cluster assignments. Use
`cutree(::PopData, ::Hclust; krange...)` to compute the sample assignments for a range of `k` clusters.
**Keyword Arguments**
- `linkage`: defines how the distances between the data points are aggregated into the distances between the clusters (default: `:single`)
- `:single`: use the minimum distance between any of the cluster members
- `:average`: use the mean distance between any of the cluster members
- `:complete`: use the maximum distance between any of the members
- `:ward`: the distance is the increase of the average squared distance of a point to its cluster centroid after merging the two clusters
- `:ward_presquared`: same as `:ward`, but assumes that the distances in the distance matrix are already squared.
- `branchorder`: algorithm to order leaves and branches (default: `:r`)
- `:r`: ordering based on the node heights and the original elements order (compatible with R's hclust)
- `:optimal`: branches are ordered to reduce the distance between neighboring leaves from separate branches using the "fast optimal leaf ordering" [algorithm](https://doi.org/10.1093/bioinformatics/17.suppl_1.S22)
- `distance`: type of distance matrix to calculate on `matrixtype` (default: `euclidean`)
- see [Distances.jl](https://github.com/JuliaStats/Distances.jl) for a list of options (e.g. sqeuclidean, etc.)
- `matrixtype`: type of input matrix (default: `:pca`)
- `:pca`: matrix of Principal Components
- `:freq`: matrix of allele frequencies
"""
function hclust(data::PopData; linkage::Symbol = :single, branchorder::Symbol = :r, distance::PreMetric = euclidean, matrixtype::Symbol = :pca)
mtx = matrixtype == :pca ?
pairwise(distance, pca(data, center = false, scale = true).proj |> permutedims, dims = 2) : matrixtype == :freq ?
pairwise(distance, matrix(data, center = false, scale = true), dims = 1) :
throw(ArgumentError("matrixtype :$matrixtype invalid, choose between :pca or :freq"))
hclust(mtx, linkage = linkage, branchorder = branchorder)
end
"""
cutree(::PopData, hcres::Hclust; krange::UnitRange{Int64}, height::Union{Int64, Nothing} = nothing)
cutree(::PopData, hcres::Hclust; krange::Vector{Int64}, height::Union{Int64, Nothing} = nothing)
An expansion to the `Clustering.cutree` method (from Clustering.jl) that performs cluster assignments over `krange`
on the `Hclust` output from `hclust()`. Returns a `DataFrame` of sample names and columns corresponding to assignments
per k in `krange`. The `PopData` object is used only for retrieving the sample names.
**Keyword Arguments**
- `krange`: the number of desired clusters, given as a vector (ex. `[2,4,5]`) or range (`2:5`)
- `h::Integer`: the height at which the tree is cut (optional)
"""
function cutree(data::PopData, hcres::Hclust; krange::Union{UnitRange{Int64},Vector{Int64}}, height::Union{Int64, Nothing} = nothing)
tmp = map(i -> cutree(hcres, k = i, h = height), krange)
out = DataFrame([tmp[i] for i in 1:length(krange)], Symbol.(krange))
insertcols!(out, 1, :name => unique(data.genodata.name))
return out
end
"""
fuzzycmeans(data::PopData; c::Int64, fuzziness::Int64 = 2, iterations::Int64 = 100, matrixtype::Symbol = :pca)
An expansion of `Clustering.fuzzy_cmeans` (from Clustering.jl) to perform Fuzzy C-means clustering on a PopData object. This is a convenience method
which converts the `PopData` object to either an allele frequency or PCA matrix, and performs Fuzzy C-means
clustering on the Euclidean distance matrix of that. Returns a `FuzzyCMeansResult` object, which contains the assignment weights in the
`.weights` field.
**Keyword Arguments**
- `c`: the number of desired clusters, given as an `Integer`
- `fuzziness::Integer`: clusters' fuzziness, must be >1 (default: `2`)
- a fuzziness of 2 is common for systems with unknown numbers of clusters
- `iterations::Int64`: the maximum number of iterations to attempt to reach convergence (default: `100`)
- `matrixtype`: type of input matrix to compute (default: `:pca`)
- `:pca`: matrix of Principal Components
- `:freq`: matrix of scaled allele frequencies
"""
function fuzzycmeans(data::PopData; c::Int64, fuzziness::Int64 = 2, iterations::Int64 = 100, matrixtype::Symbol = :pca)
mtx = matrixtype == :pca ?
pca(data, center = false, scale = true).proj |> permutedims : matrixtype == :freq ?
matrix(data, center = false, scale = true) :
throw(ArgumentError("matrixtype :$matrixtype invalid, choose between :pca or :freq"))
fuzzy_cmeans(mtx, c, fuzziness, maxiter = iterations)
end
"""
dbscan(::PopData; radius::Float64, minpoints::Int64 = 2, distance::PreMetric = euclidean, matrixtype::Symbol = :pca)
An expansion of `Clustering.dbscan` (from Clustering.jl) to perform Density-based Spatial Clustering of Applications with Noise (DBSCAN)
on a PopData object. This is a convenience method which converts the `PopData` object to either an allele frequency or PCA matrix, and performs
DBSCAN clustering on the distance matrix of that. Returns a `DbscanResult` object, which contains the assignments in the
`.assignments` field.
**Keyword Arguments**
- `radius::Float64`: the radius of a point neighborhood
- `minpoints::Int`: the minimum number of a core point neighbors (default: `2`)
- `distance`: type of distance matrix to calculate on `matrixtype` (default: `euclidean`)
- see [Distances.jl](https://github.com/JuliaStats/Distances.jl) for a list of options (e.g. sqeuclidean, etc.)
- `matrixtype`: type of input matrix (default: `:pca`)
- `:pca`: matrix of Principal Components
- `:freq`: matrix of allele frequencies
"""
function dbscan(data::PopData; radius::Float64, minpoints::Int64 = 2, distance::PreMetric = euclidean, matrixtype::Symbol = :pca)
mtx = matrixtype == :pca ?
pairwise(distance, pca(data, center = false, scale = true).proj |> permutedims, dims = 2) : matrixtype == :freq ?
pairwise(distance, matrix(data, center = false, scale = true), dims = 1) :
throw(ArgumentError("matrixtype :$matrixtype invalid, choose between :pca or :freq"))
dbscan(mtx, radius, minpoints)
end
"""
```julia
cluster(::PopData, method::Function ; kwargs)
```
A convenience wrapper to perform clustering on a `PopData` object determined by a designated `method` (see below). The
chosen method must also be supplied with the appropriate keyword arguments for that method. For more information on
a specific method, see its docstring with `?methodname`
**Clustering Methods**
- `kmeans`: K-means++ clustering
- kwargs: `k`, `iterations`, `matrixtype`
- `kmedoids`: K-medoids clustering
- kwargs: `k`, `iterations`, `distance`, `matrixtype`
- `hclust`: Hierarchical clustering
- kwargs: `linkage`, `branchorder`, `distance`, `matrixtype`
- `fuzzycmeans`: Fuzzy C-means lustering
- kwargs: `c`, `fuzziness`, `iterations`, `matrixtype`
- `dbscan`: Density-based Spatial Clustering of Applications with Noise (DBSCAN)
- kwargs: `radius`, `minpoints`, `distance`, `matrixtype`
"""
function cluster(data::PopData, method::Function ; kwargs...)
methodlist = [:kmeans, :kmedoids, :hclust, :fuzzycmeans, :dbscan]
Symbol(method) ∉ methodlist && throw(ArgumentError("$method (2nd positional argument) is not a valid method. See `?cluster` for more information."))
method(data; kwargs...)
end
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 10796 | """
missingdata(data::PopData; by::Union{String, Symbol} = "sample")
Get missing genotype information in a `PopData`. Specify a mode of operation
to return a DataFrame corresponding with that missing information.
#### Modes
- "sample" - returns a count and list of missing loci per individual (default)
- "population" - returns a count of missing genotypes per population
- "locus" - returns a count of missing genotypes per locus
- "locusxpopulation" - returns a count of missing genotypes per locus per population
### Example:
```
missingdata(@gulfsharks, by = "pop")
```
"""
function missingdata(data::PopData; by::Union{String, Symbol} = "sample")
if string(by) ∈ ["sample", "population", "locus", "locusxpopulation"]
_missingdata(data, Val(Symbol(by)))
else
throw(ArgumentError("Mode \"$by\" not recognized. Please specify one of: sample, population, locus, or full"))
end
end
function _missingdata(data::PopData, ::Val{:sample})
DataFrames.combine(DataFrames.groupby(data.genodata, :name), :genotype => (i -> count(ismissing, i)) => :missing)
end
function _missingdata(data::PopData, ::Val{:population})
DataFrames.combine(DataFrames.groupby(data.genodata, :population), :genotype => (i -> count(ismissing, i)) => :missing)
end
function _missingdata(data::PopData, ::Val{:locus})
DataFrames.combine(DataFrames.groupby(data.genodata, :locus), :genotype => (i -> count(ismissing, i)) => :missing)
end
function _missingdata(data::PopData, ::Val{:locusxpopulation})
DataFrames.combine(DataFrames.groupby(data.genodata, [:locus, :population]), :genotype => (i -> count(ismissing, i)) => :missing)
end
function _pwiseidenticalhelper(x::T,y::U)::Float64 where T<:AbstractArray where U<:AbstractArray
μsum = 0
l = 0
for i in 1:length(x)
eq = x[i] == y[i]
μsum += eq === missing ? 0 : eq
l += eq === missing ? 0 : 1
end
return μsum / l
end
"""
pairwiseidentical(data::PopData)
Return a pairwise matrix of the percent of identical genotypes at each locus between all pairs of individuals.
### Example:
```
julia> cats = @nancycats ;
julia> pairwiseidentical(cats)
237×237 Named Matrix{Float64}
A ╲ B │ N215 N216 … N289 N290
──────┼──────────────────────────────────────────
N215 │ 1.0 0.5 … 0.142857 0.166667
N216 │ 0.5 1.0 0.142857 0.166667
N217 │ 0.25 0.125 0.125 0.142857
N218 │ 0.375 0.25 0.25 0.142857
N219 │ 0.375 0.375 0.25 0.142857
⋮ ⋮ ⋮ ⋱ ⋮ ⋮
N296 │ 0.5 0.333333 0.0 0.0
N297 │ 0.166667 0.166667 0.428571 0.285714
N281 │ 0.142857 0.142857 0.25 0.428571
N289 │ 0.142857 0.142857 1.0 0.142857
N290 │ 0.166667 0.166667 … 0.142857 1.0
```
"""
function pairwiseidentical(data::PopData)
locmtx = locimatrix(data)
ids = samplenames(data)
n = length(ids)
result = NamedArray(zeros(Float64, n, n))
setnames!(result, String.(ids),1)
setnames!(result, String.(ids),2)
@inbounds for i in 1:n-1
@inbounds v1 = view(locmtx,i,:)
@inbounds for j in i+1:n
@inbounds v2 = view(locmtx,j,:)
res = _pwiseidenticalhelper(v1, v2)
@inbounds result[i,j] = res
@inbounds result[j,i] = res
end
end
# fill in diagonal
for i in 1:n
@inbounds result[i,i] = 1.0
end
return result
end
"""
pairwiseidentical(data::PopData, sample_names::Vector{String})
Return a pairwise matrix of the percent of identical genotypes at
each nonmissing locus between all pairs of provided `sample_names`.
### Example:
```
julia> cats = @nancycats ;
julia> interesting_cats = samplenames(cats)[1:5]
5-element Array{String,1}:
"N215"
"N216"
"N217"
"N218"
"N219"
julia> pairwiseidentical(cats, interesting_cats)
5×5 Named Matrix{Float64}
A ╲ B │ N217 N218 N219 N220 N221
──────┼─────────────────────────────────────────────────
N217 │ 1.0 0.0 0.111111 0.222222 0.111111
N218 │ 0.0 1.0 0.333333 0.111111 0.444444
N219 │ 0.111111 0.333333 1.0 0.111111 0.333333
N220 │ 0.222222 0.111111 0.111111 1.0 0.222222
N221 │ 0.111111 0.444444 0.333333 0.222222 1.0
```
"""
function pairwiseidentical(data::PopData, sample_names::Vector{T}) where T<:AbstractString
all_samples = samplenames(data)
missingsamples = setdiff(sample_names, all_samples)
if !isempty(missingsamples)
throw(ArgumentError("Samples not found in the PopData:\n " * join(missingsamples, "\n ")))
end
sampidx = indexin(sample_names, all_samples)
locmtx = locimatrix(data)
n = length(sampidx)
result = NamedArray(zeros(Float64, n, n))
s_names = T == String ? sample_names : string.(sample_names)
setnames!(result, s_names,1)
setnames!(result, s_names,2)
@inbounds for i in 1:n-1
@inbounds v1 = view(locmtx,sampidx[i],:)
@inbounds for j in i+1:n
@inbounds v2 = view(locmtx,sampidx[j],:)
res = _pwiseidenticalhelper(v1, v2)
@inbounds result[i,j] = res
@inbounds result[j,i] = res
end
end
# fill in diagonal
for i in 1:n
@inbounds result[i,i] = 1.0
end
return result
end
"""
genofreqtable(data::PopData; by::Union{Symbol,String} = "global")
Return a table of the observed `global` (default) or `population` genotype frequencies in a PopData object.
### Example:
```
julia> cats = @nancycats ;
julia> genofreqtable(cats)
341×4 DataFrame
Row │ locus genotype count frequency
│ String Tuple… Int64 Float64
─────┼───────────────────────────────────────
1 │ fca8 (135, 143) 16 0.0737327
2 │ fca8 (133, 135) 9 0.0414747
3 │ fca8 (135, 135) 23 0.105991
4 │ fca8 (137, 143) 8 0.0368664
⋮ │ ⋮ ⋮ ⋮ ⋮
338 │ fca37 (206, 220) 1 0.00421941
339 │ fca37 (208, 218) 1 0.00421941
340 │ fca37 (184, 184) 3 0.0126582
341 │ fca37 (208, 210) 3 0.0126582
333 rows omitted
julia> genofreqtable(cats, by = "population")
1094×5 DataFrame
Row │ locus population genotype count frequency
│ String String Tuple… Int64 Float64
──────┼──────────────────────────────────────────────────
1 │ fca8 1 (135, 143) 3 0.375
2 │ fca8 1 (133, 135) 2 0.25
3 │ fca8 1 (135, 135) 2 0.25
4 │ fca8 1 (137, 143) 1 0.125
⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮
1091 │ fca37 17 (208, 208) 10 0.769231
1092 │ fca37 17 (182, 182) 1 0.0769231
1093 │ fca37 17 (182, 208) 1 0.0769231
1094 │ fca37 17 (208, 220) 1 0.0769231
1086 rows omitted
```
"""
function genofreqtable(data::PopData; by::Union{Symbol,String} = "global")
strby = lowercase(string(by))
if strby == "global"
grp = groupby(dropmissing(data.genodata, :genotype), [:locus, :genotype])
counts = DataFrames.combine(grp, nrow => :count)
counts = DataFrames.combine(
groupby(counts, :locus), :genotype, :count,
:count => (x -> x / sum(x)) => :frequency
)
elseif strby == "population"
grp = groupby(dropmissing(data.genodata, :genotype), [:locus, :population, :genotype])
counts = DataFrames.combine(grp, nrow => :count)
counts = DataFrames.combine(
groupby(counts, [:locus, :population]), :genotype, :count,
:count => (x -> x / sum(x)) => :frequency
)
else
throw(ArgumentError("Please use by = \"global\" (default) or \"population\""))
end
end
"""
allelefreqtable(data::PopData; by::Union{Symbol,String} = "global")
Return a table of the observed `global` (default) or `population` allele frequencies in a PopData object.
### Example:
```
julia> cats = @nancycats ;
julia> allelefreqtable(cats)
108×4 DataFrame
Row │ locus allele count frequency
│ String Int16? Int64 Float64
─────┼───────────────────────────────────
1 │ fca8 135 105 0.241935
2 │ fca8 143 44 0.101382
3 │ fca8 133 33 0.0760369
4 │ fca8 137 83 0.191244
⋮ │ ⋮ ⋮ ⋮ ⋮
105 │ fca37 226 2 0.00421941
106 │ fca37 216 7 0.0147679
107 │ fca37 224 2 0.00421941
108 │ fca37 204 6 0.0126582
100 rows omitted
julia> allelefreqtable(cats, by = "population")
839×5 DataFrame
Row │ locus population allele count frequency
│ String String Int16? Int64 Float64
─────┼──────────────────────────────────────────────
1 │ fca8 1 135 9 0.5625
2 │ fca8 1 143 4 0.25
3 │ fca8 1 133 2 0.125
4 │ fca8 1 137 1 0.0625
⋮ │ ⋮ ⋮ ⋮ ⋮ ⋮
836 │ fca37 16 210 5 0.208333
837 │ fca37 17 208 22 0.846154
838 │ fca37 17 182 3 0.115385
839 │ fca37 17 220 1 0.0384615
831 rows omitted
```
"""
function allelefreqtable(data::PopData; by::Union{Symbol,String} = "global")
strby = lowercase(string(by))
if strby == "global"
grp = groupby(dropmissing(data.genodata, :genotype), :locus)
_alleles = DataFrames.combine(grp, :genotype => alleles => :allele, ungroup = false)
counts = DataFrames.combine(
_alleles,
:allele,
nrow => :n,
ungroup = false
)
counts = DataFrames.combine(
groupby(DataFrame(counts), [:locus, :allele]),
nrow => :count,
[:n, :allele] => ((n,al) -> length(al)/first(n)) => :frequency
)
elseif strby == "population"
grp = groupby(dropmissing(data.genodata, :genotype), [:locus, :population])
_alleles = DataFrames.combine(grp, :genotype => alleles => :allele, ungroup = false)
counts = DataFrames.combine(
_alleles,
:allele,
nrow => :n,
ungroup = false
)
counts = DataFrames.combine(
groupby(DataFrame(counts), [:locus, :population, :allele]),
nrow => :count,
[:n, :allele] => ((n,al) -> length(al)/first(n)) => :frequency
)
else
throw(ArgumentError("Please use by = \"global\" (default) or \"population\""))
end
end
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 2611 | """
_chisqlocus(locus::T) where T <: GenoArray
Calculate the chi square statistic and p-value for a locus
Returns a tuple with chi-square statistic, degrees of freedom, and p-value
"""
function _chisqlocus(locus::T) where T <: GenoArray
## Get expected number of genotypes in a locus
expected = genocount_expected(locus)
## Get observed number of genotypes in a locus
observed = genocount_observed(locus)
chisq_stat = expected
@inbounds for (genotype, freq) in expected
o = get(observed, genotype, 0.0)
e = freq
@inbounds chisq_stat[genotype] = (o - e)^2 / e
end
chisq_stat = sum(values(chisq_stat))
df = length(expected) - allelecount(locus)
p_val = df > 0 ? 1.0 - Distributions.cdf(Distributions.Chisq(df), chisq_stat) : missing
return (chisq_stat, df, p_val)
end
"""
hwetest(data::PopData; by::String = "locus", correction = "none")
Calculate chi-squared test of HWE for each locus and returns observed and
expected heterozygosity with chi-squared, degrees of freedom and p-values for
each locus. Use `by = "population"` to perform this separately for each population
(default: `by = "locus"`). Use `correction =` to specify a P-value
adjustment method for multiple testing.
#### example
`hwetest(@gulfsharks, correction = "bh")` \n
`hwetest(@gulfsharks, by = "population", correction = "bh")` \n
### `correction` methods (case insensitive)
- `"bonferroni"` : Bonferroni adjustment
- `"holm"` : Holm adjustment
- `"hochberg"` : Hochberg adjustment
- `"bh"` : Benjamini-Hochberg adjustment
- `"by"` : Benjamini-Yekutieli adjustment
- `"bl"` : Benjamini-Liu adjustment
- `"hommel"` : Hommel adjustment
- `"sidak"` : Šidák adjustment
- `"forwardstop"` or `"fs"` : Forward-Stop adjustment
- `"bc"` : Barber-Candès adjustment
"""
@inline function hwetest(data::PopData; by::String = "locus", correction::String = "none")
if by == "locus"
out_table =DataFrames.combine(
groupby(data.genodata, :locus),
:genotype => _chisqlocus => :chisq
)
DataFrames.select!(out_table, :locus, :chisq => [:chisq, :df, :P])
else
tmp =DataFrames.combine(
groupby(data.genodata, [:locus, :population]),
:genotype => _chisqlocus => :chisq
)
out_table = select!(tmp, :locus, :population, :chisq => [:chisq, :df, :P])
end
if correction == "none"
return out_table
else
column_name = Symbol("P_"*correction)
transform!(out_table, :P => (i -> _p_adjust(i, correction)) => column_name)
end
end
const hwe = hwetest
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 5841 | """
counthet(geno::T, allele::Int) where T<:GenoArray
counthet(geno::T, allele::AbstractVector{U}) where T<:GenoArray where U<:Integer
Given a `GenoArray`, count the number of times `allele` appears in the
heterozygous state.
"""
function counthet(geno::T, allele::U) where T<:GenoArray where U<:Integer
mapreduce(i -> _ishet(i, allele), +, skipmissing(geno))
end
function counthet(geno::T, allele::AbstractVector{U}) where T<:GenoArray where U<:Integer
tmp = skipmissing(geno)
isempty(tmp) && return fill(0, length(allele))
map(_allele -> mapreduce(i -> _ishet(i, _allele), +, tmp), allele)
end
"""
counthom(geno::T, allele::Int) where T<:GenoArray
Given a `GenoArray`, count the number of times `allele` appears in the
homozygous state.
"""
function counthom(geno::T, allele::U) where T<:GenoArray where U <: Signed
mapreduce(i -> _ishom(i, allele), +, skipmissing(geno))
end
function counthom(geno::T, allele::AbstractVector{U}) where T<:GenoArray where U<:Integer
tmp = skipmissing(geno)
isempty(tmp) && return fill(0, length(allele))
map(_allele -> mapreduce(i -> _ishom(i, _allele), +, tmp), allele)
end
"""
_genediversitynei87(het_exp::Union{Missing,AbstractFloat}, het_obs::Union{Missing,AbstractFloat}, n::Union{Integer, Float64}, corr::Bool = true)
Calculate overall gene diversity with the adjustment/correction, use `corr = false` to ignore sample-size correction `* n/(n-1)`.
Hₜ = 1 −sum(pbar²ᵢ + Hₛ/(ñ * np) − Het_obs/(2ñ*np))
- _ñ_ is the number of genotypes for a locus for a population
- _np_ is the number of genotypes of a locus across all populations
- i.e. sum(_ñ_)
- _pbar²_ is the observed homozygosity of a locus for that population
- _Hₛ_ is the within population gene diversity given by:
- Hₛ = ñ/(ñ-1) * (1 - sum(pbar²ᵢ - Het_observed / 2ñ))
(Nei M. (1987) Molecular Evolutionary Genetics. Columbia University Press).
"""
@inline function _genediversitynei87(het_exp::T, het_obs::T, n::Union{Integer,T}; corr::Bool = true) where T<: AbstractFloat
corr_val = corr ? n/(n-1.0) : 1.0
return @fastmath (het_exp - (het_obs/n/2.0)) * corr_val
end
@inline function _genediversitynei87(het_exp::AbstractFloat, het_obs::Missing, n::Union{Integer,AbstractFloat}; corr::Bool = true)
return missing
end
@inline function _genediversitynei87(het_exp::Missing, het_obs::AbstractFloat, n::Union{Integer,AbstractFloat}; corr::Bool = true)
return missing
end
@inline function _genediversitynei87(het_exp::Missing, het_obs::Missing, n::Union{Integer,AbstractFloat}; corr::Bool = true)
return missing
end
"""
_hetero_obs(data::T) where T <: GenoArray
Returns observed heterozygosity as a mean of the number of heterozygous genotypes, defined
as genotypes returning `true` for `_ishet()`. This is numerically feasible because
`true` values are mathematically represented as `1`, whereas `false` are represented
as `0`.
"""
@inline function _hetero_obs(data::T) where T <: GenoArray
skipm = skipmissing(data)
isempty(skipm) && return missing
h = 0
n = 0.0
for i in skipm
h += _ishet(i)
n += 1.0
end
h / n
end
"""
_hetero_exp(allele_freqs::Vector{T}) where T <: GenoArray
Returns the expected heterozygosity of an array of genotypes,
calculated as 1 - sum of the squared allele frequencies.
"""
@inline function _hetero_exp(data::T) where T <: GenoArray
frq = allelefreq(data)
isempty(frq) && return missing
exp = 0.0
@inbounds for i in values(frq)
exp += i^2
end
1.0 - exp
end
"""
heterozygosity(data::PopData; by::Union{Symbol,String} = "locus")
Calculate observed and expected heterozygosity in a `PopData` object. For loci,
heterozygosity is calculated in the Nei fashion, such that heterozygosity is
calculated as the average over heterozygosity per locus per population.
### Modes
- `"locus"` : heterozygosity per locus (default)
- `"sample"` : heterozygosity per individual/sample
- `"population"`: heterozygosity per population
## Example
heterozygosity(@nancycats, by = "population" )
"""
function heterozygosity(data::PopData; by::Union{String, Symbol} = "locus")
strby = lowercase(string(by))
if strby ∈ ["locus", "sample", "population"]
_heterozygosity(data, Val(Symbol(strby)))
else
error("please specify by = \"locus\", \"sample\", or \"population\"")
end
end
function _heterozygosity(data::PopData, ::Val{:locus})
tmp = DataFrames.combine(
groupby(data.genodata, [:locus, :population]),
:genotype => nonmissing => :n_tmp,
:genotype => _hetero_obs => :het_pop_obs,
:genotype => _hetero_exp => :het_pop_exp
)
DataFrames.combine(
groupby(tmp, :locus),
:n_tmp => sum => :n,
:het_pop_obs => (h_o -> mean(skipmissing(h_o))) => :het_obs,
:het_pop_exp => (h_e -> mean(skipmissing(h_e))) => :het_exp
)
end
function _heterozygosity(data::PopData, ::Val{:sample})
DataFrames.combine(
groupby(data.genodata, :name),
:genotype => nonmissing => :n,
:genotype => _hetero_obs => :het_obs
)
end
function _heterozygosity(data::PopData, ::Val{:population})
DataFrames.combine(
groupby(data.genodata, :population),
:genotype => nonmissing => :n,
:genotype => _hetero_obs => :het_obs,
:genotype => _hetero_exp => :het_exp
)
end
#NOTE this is not intended to be performant. It's a convenience function.
"""
samplehet(data::PopData, individual::String)
Calculate the observed heterozygosity for an individual in a `PopData` object.
"""
@inline function samplehet(data::PopData, individual::String)
individual ∉ samplenames(data) && throw(ArgumentError("$individual not found in data"))
data.genodata[data.genodata.name .== individual, :genotype] |> _hetero_obs
end
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 1646 | """
pca(data::PopData; maxpc::Int = 0, method::Symbol = :svd, missings::String = "mean", pratio::Float64 = 0.99, center::Bool = false, scale::Bool = true)
Perform a Principal Component Analysis on a PopData object. Returns an indexible `MultivariateStats.PCA` object.
#### Arguments
- `data::PopData`: a `PopData` object
#### keyword arguments
- `method::Symbol`: The PCA method to use (default: `:svd`)
- `:cov`: based on covariance matrix decomposition
- `:svd`: based on Singular Value Decomposition of the input data
- `maxpc::Int`: The maximum number of principal components to retain (default: 0 = `(min(d, ncol-1))`)
- `missings::String`: How to treat missing genotypes in the allele frequency matrix (default: `mean`)
- `"mean"`: replace `missing` values with the mean frequency for that allele in that locus
- `"missing"`: keep `missing` values as they are
- `"zero"`: replace `missing` values with `0`
- `pratio::Float64`: The maxium ratio of variances preserved in the principal subspace (default = `0.99`)
- `center::Bool`: whether to center the allele frequency matrix (default: `false`)
- `scale::Bool`: whether to Z-score scale the allele frequency matrix (default: `true`)
"""
function pca(data::PopData; maxpc::Int = 0, method::Symbol = :svd, missings::String = "mean", pratio::Float64 = 0.99, center::Bool = false, scale::Bool = true)
meankw = center ? 0 : nothing
mtx = matrix(data, scale = scale, center = center, missings = missings)
pckw = iszero(maxpc) ? min(size(mtx, 1), size(mtx,2) - 1) : maxpc
fit(PCA, mtx; maxoutdim=pckw, mean = meankw, pratio = pratio, method = method)
end
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 3839 | """
# Population genetics analyses in Julia
Repository: https://www.github.com/biojulia/PopGen.jl/
Documentation: https://biojulia.net/PopGen.jl/
\nA few things things you can do to get started:
## Import Data
- `PopGen.read(filename, kwargs...)`
- `genepop(infile, kwargs...)` or similar file-specific importer
- use available `@gulfsharks` or `@nancycats` datasets
## Explore PopData
- `populations(PopData)` to view population information
- `loci(PopData)` to view locus names
- `samplenames(PopData)` to view sample names
- `missingdata(PopData, by = ...)` to view missing information
## Manipulate PopData
- `populations!(PopData, ...)` to rename populations
- `locations!(PopData, ...)` to add geographical coordinates
- `exclude!(PopData, kwargs...)` to selectively remove data
## Analyses
- `richness(PopData)` to calculate allelic richness
- `Kinship(PopData, method = ...)` to get pairwise Kinship of individuals
- `summary(PopData)` to calculate F-statistics, heterozygosity, etc.
- `hwetest(PopData)` to test for Hardy-Weinberg Equilibrium
- `pairwisefst(PopData)` to calculate FST between pairs of populations
- `cluster(PopData, kmeans, k = 3)` to perform Kmeans++ clustering
"""
module PopGen
# o O o O o O o O o O
# o | | O o | | O o | | O o | | O o | | O
# | | | | O | | | | Dependencies| | | | O | | | | O
# O | | o O | | o O | | o O | | o O | | o
# O o O o O o O o O o
using Reexport
using PopGenCore
@reexport module PopGenCore
export PopData, PopDataInfo, GenoArray, Genotype, SNP, Msat
export genodata, metadata, info
export isbiallelic, ishom, ishet
export baypass, delimited, csv, genepop, vcf, bcf, plink, @nancycats, @gulfsharks
export sampleinfo, sampleinfo!, locusinfo, locusinfo!, samplenames, loci
export copy, size, sort, dropmonomorphic, dropmonomorphic!
export dropmultiallelic, dropmultiallelic!
export locationdata, locationdata!
export locidataframe, locimatrix
export genotypes, genotypes
export populations, populations!
export exclude, remove, omit, exclude!, remove!, omit!, keep, keep!
export filter, filter!
end
@reexport import PopGenCore: read, write
using Distributions, DataFrames, PooledArrays, NamedArrays
using Distances
using Random: shuffle
using OnlineStats
using Term.Progress
using MultipleTesting, StatsBase
import Clustering: kmeans, kmedoids, hclust, Hclust, cutree, fuzzy_cmeans, dbscan
import MultivariateStats: fit, PCA
import TSne: tsne
# o O o O o O o O o O
# o | | O o | | O o | | O o | | O o | | O
# | | | | O | | | |Include Files| | | | O | | | | O
# O | | o O | | o O | | o O | | o O | | o
# O o O o O o O o O o
include("Utils.jl")
# heterozygosity functions
include("Heterozygosity.jl")
export heterozygosity, samplehet
# manipulation and exploration
include("DataExploration.jl")
export pairwiseidentical, missingdata, genofreqtable, allelefreqtable
# summary information
include("SummaryInfo.jl")
export alleleaverage, richness, summary, summarystats
#Analyses
include("HardyWeinberg.jl")
export hwetest, hwe
include("FStatistics/FstGlobal.jl")
include("FStatistics/FstByLocus.jl")
include("FStatistics/PairwiseFST.jl")
include("FStatistics/FstPermutations.jl")
export pairwisefst, Hudson, Nei, WeirCockerham
include("Kinship/KinshipPairwise.jl")
export kinship, kinshiptotable
include("Kinship/KinshipMoments.jl")
export QuellerGoodnight, Ritland, Lynch, LynchRitland, LynchLi, LiHorvitz, Moran, Blouin, Loiselle #, Wang
include("Kinship/KinshipPostHocs.jl")
export kinshipposthoc
include("Clustering.jl")
export cluster, kmeans, kmedoids, hclust, cutree, fuzzycmeans, dbscan
include("PCA.jl")
export pca
include("TSne.jl")
export tsne
end # module PopGen
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 928 | function _onehot_biallelic(data::PopData)
data.metadata.biallelic == false && throw(ArgumentError("Data must be biallelic. If practical to do so, use dropmultiallelic() to remove non-biallelic loci"))
gmtx = locimatrix(data)
mapreduce(hcat, eachcol(gmtx)) do locus
alle = uniquebialleles(locus)
d = Dict{Union{Missing, NTuple{2, eltype(alle)}}, Int8}(
(alle[1], alle[1]) => Int8(0),
(alle[1], alle[2]) => Int8(1),
(alle[2], alle[2]) => Int8(2),
missing => Int8(-1)
)
Int8[d[i] for i in locus]
end
end
function randomforest(data::PopData)
missing ∈ data.genodata.genotype && throw(error("Unfortunately, random forest analysis cannot work with missing data. You may try to filter out loci and/or samples with missing data or use a genotype inputation method (e.g. fastPhase, BEAGLE)."))
input = _onehot_biallelic(data)
end
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 5474 | """
alleleaverage(data::PopData; rounding::Bool = true)
Returns a NamedTuple of the average number of alleles ('mean') and
standard deviation (`stdev`) of a `PopData`. Use `rounding = false` to
not round results. Default (`true`) rounds to 4 digits.
"""
function alleleaverage(data::PopData; rounding::Bool = true)
tmp = richness(data)
if rounding
(mean = round(mean(tmp.richness), digits = 4), stdev = round(variation(tmp.richness), digits = 4))
else
(mean = mean(tmp.richness), stdev = variation(tmp.richness))
end
end
"""
richness(data::PopData; by::String = "locus")
Calculates various allelic richness and returns a table of per-locus
allelic richness. Use `by = "population"` to calculate richness by
locus by population.
"""
function richness(data::PopData; by::String = "locus")
if by == "locus"
grp = groupby(data.genodata, :locus)
elseif by == "population"
grp = groupby(data.genodata, [:locus, :population])
else
throw(ArgumentError("Please use by = \"locus\" (default) or \"population\""))
end
DataFrames.combine(
grp,
:genotype => (geno -> length(uniquealleles(geno))) => :richness
)
end
"""
summary(data::PopData; by::String = "global")
Provides summary statistics for a `PopData` object. Use `by = "locus"` for
summary information by locus. Global values are given as unweighted means of
the per-locus parameters.
### Het_obs
observed heterozygosity given as:\n
1 - ∑ₖ ∑ᵢ Pₖᵢᵢ/np \n
where Pkii represents the proportion of homozygote `i` in sample `k` and `np`
is the number of samples in that population
### HT
overall gene diversity given as: \n
1 - ∑ᵢ(p̄ᵢ² + (HS / (ñ × np)) - Het_obs / (2 × ñ × np)) \n
where p̄ᵢ = ∑ₖpₖᵢ / np
### HS
within population gene diversity given as: \n
1 - ∑ᵢ(pᵢ² + HS / (ñ × np) - Het_obs / (2 × ñ × np)) \n
where ñ = np / ∑ₖ(1/nₖ) \n
where p̄ᵢ² = ∑ₖ(pᵢₖ² / np)
### DST
amount of gene diversity among samples given as: \n
HT - HS
### DST′
amount of gene diversity among samples adjusted for sample size given as: \n
(np / (np-1)) × Dst
### HT′
overall gene diversity adjusted for sample size given as: \n
HS + DST′
### FST
proportion of the total genetic variance in subpopulations relative to the total genetic variance given as: \n
DST / HT
### FST′
proportion of the total genetic variance in subpopulations relative to the total genetic variance, adjusted for heterozygosity given as: \n
DST′ / HT′
### FIS
proportion of the genetic variance in a locus relative to the genetic variance within subpopulations given as: \n
1 - (Het_obs / HS)
### DEST
population differentiation (Jost 2008) given as: \n
(np/(np-1)) × (Ht'-Hs) / (1-Hs)
"""
function Base.summary(data::PopData; by::String = "global")
# observed/expected het per locus per pop
het_df = DataFrames.combine(
groupby(data.genodata, [:locus, :population]),
:genotype => nonmissing => :n,
:genotype => _hetero_obs => :het_obs,
:genotype => _hetero_exp => :het_exp,
:genotype => allelefreq => :alleles
)
# collapse down to retrieve averages and counts
n_df = DataFrames.combine(
groupby(het_df, :locus),
:n => countnonzeros => :count,
:n => (n -> countnonzeros(n) / reciprocalsum(n)) => :mn,
[:het_obs, :het_exp, :n] => ((o,e,n) -> mean(skipmissing(_genediversitynei87.(e, o, countnonzeros(n) / reciprocalsum(n))))) => :HS,
:het_obs => (o -> mean(skipmissing(o)))=> :Het_obs,
:alleles => (alleles -> sum(values(avg_allelefreq(alleles, 2))))=> :avg_freq
)
Ht = 1.0 .- n_df.avg_freq .+ (n_df.HS ./ n_df.mn ./ n_df.count) - (n_df.Het_obs ./ 2.0 ./ n_df.mn ./ n_df.count)
DST = Ht .- n_df.HS
DST′ = n_df.count ./ (n_df.count .- 1) .* DST
HT′ = n_df.HS .+ DST′
if lowercase(by) == "locus"
FIS = 1.0 .- (n_df.Het_obs ./ n_df.HS)
FST = DST ./ Ht
DEST = DST′ ./ (1.0 .- n_df.HS)
FST′ = DST′ ./ HT′
insertcols!(
select!(
n_df,
:locus,
:Het_obs => (i -> round.(i, digits = 4)) => :Het_obs,
:HS => (i -> round.(i, digits = 4)) => :HS,
),
:HT => round.(Ht, digits = 4),
:DST => round.(DST, digits = 4),
:HT′ => round.(HT′, digits = 4),
:DST′ => round.(DST′, digits = 4),
:FST => round.(FST, digits = 4),
:FST′ => round.(FST′, digits = 4),
:FIS => round.(FIS, digits = 4),
:DEST => round.(DEST, digits = 4)
)
elseif lowercase(by) == "global"
Het_obs = mean(skipinfnan(n_df.Het_obs))
HS = mean(skipinfnan(n_df.HS))
Ht = mean(skipinfnan(Ht))
DST = mean(skipinfnan(DST))
DST′ = mean(skipinfnan(DST′))
HT′ = mean(skipinfnan(HT′))
DataFrame(
:Het_obs => round.(Het_obs, digits = 4),
:HS => round.(HS, digits = 4),
:HT => round.(Ht , digits = 4),
:DST => round.(DST, digits = 4),
:HT′ => round.(HT′, digits = 4),
:DST′ => round.(DST′, digits = 4),
:FST => round(DST / Ht, digits = 4),
:FST′ => round(DST′ / HT′, digits = 4),
:FIS => round(1.0 - (Het_obs / HS), digits = 4),
:DEST => round(DST′ / (1.0 - HS), digits = 4)
)
else
throw(ArgumentError("Use by = \"global\" or \"locus\""))
end
end
const summarystats = summary
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 2283 | """
tsne(data::PopData, args...; kwargs...)
Perform t-SNE (t-Stochastic Neighbor Embedding) on a PopData object, returning a DataFrame. Converts the
PopData object into a matrix of allele frequencies with missing values replaced with
the global mean frequency of that allele. First performs PCA on that matrix, retaining
`reduce_dims` dimensions of the PCA prior to t-SNE analysis. The positional and keyword arguments
are the same as `tsne` from `TSne.jl`.
### Arguments
- `data`: a `PopData` object
- `ndims`: Dimension of the embedded space (default: `2`)
- `reduce_dims` the number of the first dimensions of X PCA to use for t-SNE, if 0, all available dimension are used (default: `0`)
- `max_iter`: Maximum number of iterations for the optimization (default: `1000`)
- `perplexity`: The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity. Consider selecting a value between 5 and 50. Different values can result in significantly different results (default: `30`)
### Keyword Arguments (optional)
- `distance`: type `Function` or `Distances.SemiMetric`, specifies the function to
use for calculating the distances between the rows
- `pca_init`: whether to use the first `ndims` of the PCA as the initial t-SNE layout,
if `false` (the default), the method is initialized with the random layout
- `max_iter`: how many iterations of t-SNE to do
- `verbose`: output informational and diagnostic messages
- `progress`: display progress meter during t-SNE optimization (default: `true`)
- `min_gain`: `eta`: `initial_momentum`, `final_momentum`, `momentum_switch_iter`,
`stop_cheat_iter`: `cheat_scale` low-level parameters of t-SNE optimization
- `extended_output`: if `true`, returns a tuple of embedded coordinates matrix,
point perplexities and final Kullback-Leibler divergence
"""
function tsne(data::PopData, ndim, reduce_dims, max_iter, perplexity; kwargs...)
mtx = freqmatrix_mean(data)
_sne = tsne(mtx, ndim, reduce_dims, max_iter, perplexity; kwargs...)
res = DataFrame(_sne, String["dim"*"$i" for i in 1:size(_sne, 2)])
insertcols!(res, 1, :name => data.sampleinfo.name, :population => data.sampleinfo.population)
return res
end | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 2641 | ## experimental and not exported or documented!
function _adjacency_matrix(data::PopData)
data_loci = groupby(data.genodata, :locus)
out_vec = Vector{Array{Int8,2}}(undef, data.metadata.loci)
for (j,i) in enumerate(data_loci)
uniq = unique(skipmissing(i.genotype))
adj_mat = fill(Int8(0), data.metadata.samples, length(uniq))
for (j,k) in zip(i.genotype, eachrow(adj_mat))
k .= Ref(j) .=== uniq
end
out_vec[j] = adj_mat
end
return out_vec
end
"""
_p_adjust(pvals::Vector{T}, method::String) where T <: Union{Missing, <:AbstractFloat}
Modification to `MultipleTesting.adjust` to include `missing` values in the
returned array. See MultipleTesting.jl docs for full more detailed information.
**Example**
```
julia> _p_adjust([0.1, 0.01, 0.005, 0.3], "bh")
```
### `correction` methods (case insensitive)
- `"bonferroni"` : Bonferroni adjustment
- `"holm"` : Holm adjustment
- `"hochberg"` : Hochberg adjustment
- `"bh"` : Benjamini-Hochberg adjustment
- `"by"` : Benjamini-Yekutieli adjustment
- `"bl"` : Benjamini-Liu adjustment
- `"hommel"` : Hommel adjustment
- `"sidak"` : Šidák adjustment
- `"forwardstop"` or `"fs"` : Forward-Stop adjustment
- `"bc"` : Barber-Candès adjustment
"""
@inline function _p_adjust(pvals::Vector{T}, method::String) where T <: Union{Missing, <:AbstractFloat}
# make a dict of all possible tests and their respective functions
d = Dict(
"bonferroni" => Bonferroni(),
"holm" => Holm(),
"hochberg" => Hochberg(),
"bh" => BenjaminiHochberg(),
"by" => BenjaminiYekutieli(),
"bl" => BenjaminiLiu(),
"hommel" => Hommel(),
"sidak" => Sidak(),
"forwardstop" => ForwardStop(),
"fs" => ForwardStop(),
"bc" => BarberCandes(),
)
p_copy = copy(pvals)
p_copy[.!ismissing.(p_copy)] .= adjust(p_copy[.!ismissing.(p_copy)] |> Vector{Float64}, d[lowercase(method)])
return p_copy
end
"""
_relabel(arr::Vector{T}) where T<:AbstractString
Relable a vector of strings into integers as though it is factors.
This is useful for reclassifying e.g. population names as integers.
"""
function _relabel(arr::AbstractVector{T}) where T<:AbstractString
findfirst.(isequal.(arr), Ref(unique(arr)))
end
feature_req() = "\nPlease open an Issue or Pull Request on https://www.github.com/biojulia/PopGen.jl if you would like this feature implemented"
"""
_is_logging(io)
Returns `false` if in a normal session, `true` if in a CI/HPC environment
"""
_is_logging(io) = isa(io, Base.TTY) == false || (get(ENV, "CI", nothing) == "true")
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 4051 | function _hudson_fst_lxl(data::PopData)
idx_pdata = groupby(data.genodata, :population)
pops = getindex.(keys(idx_pdata), :population)
nloci = data.metadata.loci
locnames = loci(data)
allpairs = pairwisepairs(pops)
npops = data.metadata.populations
npairs = Int64(npops * (npops-1) / 2)
results = Vector{Vector{Union{Missing, Float64}}}(undef, npairs)
p1 = PooledArray(repeat(getindex.(allpairs,1), inner = nloci), compress = true)
p2 = PooledArray(repeat(getindex.(allpairs,2), inner = nloci), compress = true)
locs = PooledArray(repeat(locnames, outer = npairs), compress = true)
@inbounds @sync for (i,j) in enumerate(allpairs)
Base.Threads.@spawn begin
@inbounds pop1 = reshape(idx_pdata[(population = j[1],)].genotype, :, nloci)
@inbounds pop2 = reshape(idx_pdata[(population = j[2],)].genotype, :, nloci)
@inbounds results[i] = _hudson_fst_lxl(pop1, pop2)
end
end
return PairwiseFST(
DataFrame(:population1 => p1, :population2 => p2,:locus => locs, :fst => reduce(vcat, results)),
"Hudson estimator"
)
end
function _hudson_fst_lxl(population_1::T, population_2::T) where T<:AbstractMatrix
@views Union{Float64,Missing}[Hudson(population_1[:,i], population_2[:,i]) for i in 1:size(population_1,2)]
end
function _nei_fst_lxl(population_1::T, population_2::T) where T<:AbstractMatrix
result = Vector{Union{Missing,Float64}}(undef, size(population_1, 2))
for (i,p1) in enumerate(eachcol(population_1))
p2 = view(population_2, :, i)
n1 = nonmissing(p1)
n2 = nonmissing(p2)
# number of populations represented per locus
n_pop_per_loc = Float64((n1 > 0) + (n2 > 0))
n_pop_per_loc < 2.0 && continue
# corrected n for population size
corr_n_per_loc = n_pop_per_loc / (reciprocal(n1) + reciprocal(n2))
# observed heterozygosity
het_obs_p1 = _hetero_obs(p1)
het_obs_p2 = _hetero_obs(p2)
# expected heterozygosity
het_exp_p1 = _hetero_exp(p1)
het_exp_p2 = _hetero_exp(p2)
# genic diversity for population 1 and 2
p1_nei = _genediversitynei87(het_exp_p1, het_obs_p1, corr_n_per_loc)
p2_nei = _genediversitynei87(het_exp_p2, het_obs_p2, corr_n_per_loc)
# mean genic diversity
HS = (p1_nei + p2_nei) / n_pop_per_loc
alle_freq_p1 = allelefreq(p1)
alle_freq_p2 = allelefreq(p2)
avg_freq = sum(values(avg_allelefreq([alle_freq_p1, alle_freq_p2],2)))
Het_obs = (het_obs_p1 + het_obs_p2) / n_pop_per_loc
Ht = 1.0 - avg_freq + (HS / corr_n_per_loc / n_pop_per_loc) - (Het_obs / 2.0 / corr_n_per_loc / n_pop_per_loc)
DST = 2 * (Ht - HS)
DST′ = DST * (n_pop_per_loc - 1)
HT′ = HS + DST′
@inbounds result[i] = round(DST′ / HT′, digits = 5)
end
return result
end
function _nei_fst_lxl(data::PopData)
idx_pdata = groupby(data.genodata, :population)
pops = getindex.(keys(idx_pdata), :population)
npops = length(pops)
nloci = data.metadata.loci
locnames = loci(data)
allpairs = pairwisepairs(pops)
npairs = Int64(npops * (npops-1) / 2)
results = Vector{Vector{Union{Missing,Float64}}}(undef, npairs)
@inbounds p1 = PooledArray(repeat(getindex.(allpairs,1), inner = nloci), compress = true)
@inbounds p2 = PooledArray(repeat(getindex.(allpairs,2), inner = nloci), compress = true)
locs = PooledArray(repeat(locnames, outer = npairs), compress = true)
@inbounds @sync for (i,j) in enumerate(allpairs)
Base.Threads.@spawn begin
@inbounds pop1 = reshape(idx_pdata[(population = j[1],)].genotype, :, nloci)
@inbounds pop2 = reshape(idx_pdata[(population = j[2],)].genotype, :, nloci)
@inbounds results[i] = _nei_fst_lxl(pop1, pop2)
end
end
return PairwiseFST(
DataFrame(:population1 => p1, :population2 => p2,:locus => locs, :fst => reduce(vcat, results)),
"Nei estimator"
)
end | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 9056 | function Hudson(data::PopData)
idx_pdata = groupby(data.genodata, :population)
pops = getindex.(keys(idx_pdata), :population)
npops = data.metadata.populations
n_loci = data.metadata.loci
results = zeros(Float64, npops, npops)
@sync for i in 2:npops
Base.Threads.@spawn begin
for j in 1:(i-1)
@inbounds pop1 = reshape(idx_pdata[i].genotype, :, n_loci)
@inbounds pop2 = reshape(idx_pdata[j].genotype, :, n_loci)
@inbounds results[i,j] = Hudson(pop1,pop2)
end
end
end
return PairwiseFST(DataFrame(results, Symbol.(pops)), "Hudson et al. 1992")
end
# helper function to do the math for Hudson FST on a locus
function Hudson(pop1::T, pop2::T) where T<:GenoArray
p1_frq = allelefreq(pop1)
p2_frq = allelefreq(pop2)
# find the first allele and assume it's "p"
p_allele = first(keys(p1_frq))
p1 = get(p1_frq, p_allele, 0.0)
q1 = 1.0 - p1
p2 = get(p2_frq, p_allele, 0.0)
q2 = 1.0 - p2
# degrees of freedom is the number of loci - 1
df1 = (nonmissing(pop1) * 2.0) - 1.0
df2 = (nonmissing(pop2) * 2.0) - 1.0
numerator = (p1 - p2)^2 - (p1*q1/df1) - (p2*q2/df2)
denominator = (p1*q2) + (p2*q1)
return numerator/denominator
end
function Hudson(population_1::T, population_2::T) where T<:AbstractMatrix
tot = 0.0
n = 0.0
for i in 1:size(population_1, 2)
fst = Hudson(view(population_1,:,i), view(population_2,:,i))
tot += !isnan(fst) && !ismissing(fst) ? fst : continue
n += 1.0
end
return tot/n
end
## Nei 1987 FST ##
function Nei(population_1::T, population_2::T) where T<:AbstractMatrix
DSTtot = 0.0
HTtot = 0.0
Ntot = 0.0
for (i,p1) in enumerate(eachcol(population_1))
p2 = view(population_2, :, i)
n1 = nonmissing(p1)
n2 = nonmissing(p2)
# number of populations represented per locus
n_pop_per_loc = Float64((n1 > 0) + (n2 > 0))
n_pop_per_loc < 2.0 && continue
# corrected n for population size
corr_n_per_loc = n_pop_per_loc / (reciprocal(n1) + reciprocal(n2))
# observed heterozygosity
het_obs_p1 = _hetero_obs(p1)
het_obs_p2 = _hetero_obs(p2)
# expected heterozygosity
het_exp_p1 = _hetero_exp(p1)
het_exp_p2 = _hetero_exp(p2)
# genic diversity for population 1 and 2
p1_nei = _genediversitynei87(het_exp_p1, het_obs_p1, corr_n_per_loc)
p2_nei = _genediversitynei87(het_exp_p2, het_obs_p2, corr_n_per_loc)
# mean genic diversity
HS = (p1_nei + p2_nei) / n_pop_per_loc
alle_freq_p1 = allelefreq(p1)
alle_freq_p2 = allelefreq(p2)
avg_freq = sum(values(avg_allelefreq([alle_freq_p1, alle_freq_p2],2)))
Het_obs = (het_obs_p1 + het_obs_p2) / n_pop_per_loc
Ht = 1.0 - avg_freq + (HS / corr_n_per_loc / n_pop_per_loc) - (Het_obs / 2.0 / corr_n_per_loc / n_pop_per_loc)
DST = 2 * (Ht - HS)
DST′ = DST * (n_pop_per_loc - 1)
HT′ = HS + DST′
DSTtot += DST′
HTtot += HT′
Ntot += 1.0
end
return round((DSTtot/Ntot) / (HTtot/Ntot), digits = 5)
end
function Nei(data::PopData)
idx_pdata = groupby(data.genodata, :population)
pops = getindex.(keys(idx_pdata), :population)
npops = data.metadata.populations
n_loci = data.metadata.loci
results = zeros(Union{Missing,Float64}, npops, npops)
@inbounds @sync for i in 2:npops
Base.Threads.@spawn begin
@inbounds for j in 1:(i-1)
@inbounds pop1 = reshape(idx_pdata[i].genotype, :, n_loci)
@inbounds pop2 = reshape(idx_pdata[j].genotype, :, n_loci)
@inbounds results[i,j] = Nei(pop1,pop2)
end
end
end
return PairwiseFST(DataFrame(results, Symbol.(pops)), "Nei 1987")
end
## Weir & Cockerham 1984 FST ##
function WeirCockerham(population_1::T, population_2::T) where T<:AbstractMatrix
n_pops = 2
# get genotype counts
# reshape as a matrix of loci x pop (row x col)
n_per_locpop = hcat(map(nonmissing, eachcol(population_1)), map(nonmissing, eachcol(population_2)))
n_total = sum(n_per_locpop, dims = 2)
# screen for completely absent loci
present_loc = 0 .∉ eachrow(n_per_locpop)
if 0 ∈ present_loc
# index locus names by the missing indices
pop_1 = @view population_1[:,present_loc]
pop_2 = @view population_2[:,present_loc]
n_per_locpop = hcat(map(nonmissing, eachcol(pop_1)), map(nonmissing, eachcol(pop_2)))
n_total = sum(n_per_locpop, dims = 2)
else
pop_1 = population_1
pop_2 = population_2
end
merged = vcat(pop_1, pop_2)
n_pop_per_loc = map(countnonzeros, eachrow(n_per_locpop))
# global allele counts
glob_allelecounts = map(allelecount, eachcol(merged))
# global allele frequencies
glob_allelefreqs = map(allelefreq, eachcol(merged))
# allele freqs per locus per population
pop_1_freq = map(allelefreq, eachcol(pop_1))
pop_2_freq = map(allelefreq, eachcol(pop_2))
# expand out the n matrix to be same dimensions as uniquealleles x pop
n_expanded = reduce(hcat, repeat.(eachrow(n_per_locpop), 1, glob_allelecounts)) |> permutedims
# expand n_total matrix to be same dimensions as uniquealleles x pop
n_tot_expanded = reduce(vcat, repeat.(eachrow(n_total), glob_allelecounts))
# calculate corrected n per locus
corr_n_per_loc = (n_total .- (sum(n_per_locpop .^2, dims = 2) ./ n_total)) ./ (n_pop_per_loc .- 1)
# expand corr_n matrix to be same dimensions as uniquealleles x pop
corr_n_per_loc_exp = reduce(vcat, repeat.(eachrow(corr_n_per_loc), glob_allelecounts))
# list of alleles in each locus
_alleles_perloc = [sort(uniquealleles(i)) for i in eachcol(merged)]
# extremely convoluted, creates a big matrix of allele freqs per locus per population
# TODO are there too many reshapes going on?
#TODO move into its own function? This seems like it could be a recurring thing
afreq_tmp = hcat(pop_1_freq, pop_2_freq)
allelefreq_pop = reshape(
reduce(vcat,
map(zip(eachrow(afreq_tmp), _alleles_perloc)) do (_freqs_p, _alle)
reduce(hcat,
map(_freqs_p) do _freqs
[get(_freqs, i, 0.) for i in _alle] # query the dict of allele freqs
end
)
end
),
:, n_pops # reshape by these dimensions
)
# global allele freqs
_freqs = map(i -> values(sort(i)), glob_allelefreqs) |> Base.Iterators.flatten |> collect
#heterozygotes per allele per locus per population
# gets emptied from the popfirst! calls below(!)
_p1 = collect(eachcol(pop_1))
_p2 = collect(eachcol(pop_2))
genos_vec = permutedims([_p1 _p2])[:]
# create matrix of heterozygote occurrences per allele per pop
het_mtx = reduce(vcat, # vcat will concatenate the returned matrices into a single matrix
map(_alleles_perloc) do _alleles # map across the vector of alleles for each locus
reduce(hcat,
# each element in x is locus × population, so use a comprehension to
# do counthet() as many times as there are populations, popfirst!'ing
# the first element of x until it's ultimately empty
# then concatenate it into a matrix
[counthet(popfirst!(genos_vec), _alleles) for pop_n in 1: n_pops]
)
end
)
μ_het = (2 * n_expanded .* allelefreq_pop - het_mtx) / 2
SSG = sum(n_expanded .* allelefreq_pop - μ_het, dims = 2)
SSi = sum(n_expanded .* (allelefreq_pop - 2 * allelefreq_pop .^ 2) + μ_het, dims = 2)
SSP = 2 .* sum(n_expanded .* reduce(hcat, map(i -> (i .- _freqs) .^ 2, eachcol(allelefreq_pop))), dims = 2)
n_correction = reduce(vcat, fill.(n_pop_per_loc, glob_allelecounts))
MSG = SSG ./ n_tot_expanded
MSP = SSP ./ (n_correction .- 1)
MSI = SSi ./ (n_tot_expanded - n_correction)
σ_w = MSG
σ_b = 0.5 * (MSI - MSG)
σ_a = 0.5 ./ corr_n_per_loc_exp .* (MSP - MSI)
σ = hcat(σ_a, σ_b, σ_w)
σ_total = map(i -> sum(skipinfnan(i)), eachcol(σ))
fst_total = round(σ_total[1] / sum(σ_total), digits = 5)
end
function WeirCockerham(data::PopData)
idx_pdata = groupby(data.genodata, :population)
pops = getindex.(keys(idx_pdata), :population)
npops = data.metadata.populations
n_loci = data.metadata.loci
results = zeros(Float64, npops, npops)
@sync for i in 2:npops
Base.Threads.@spawn begin
for j in 1:(i-1)
@inbounds pop1 = reshape(idx_pdata[i].genotype, :, n_loci)
@inbounds pop2 = reshape(idx_pdata[j].genotype, :, n_loci)
@inbounds results[i,j] = WeirCockerham(pop1,pop2)
end
end
end
return PairwiseFST(DataFrame(results, Symbol.(pops)), "Weir & Cockerham")
end
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 2476 | """
_fst_permute(population_1::T, population_2::T) where T<:AbstractMatrix
Returns two matrices with rows (samples) shuffled between them. Respects the
number of rows of the original matrices (i.e. population sizes).
"""
function _fst_permute(data::T, n1::Integer, n2::Integer) where T<:AbstractMatrix
perm_matrix = @inbounds view(data, shuffle(1:(n1 +n2)), :)
#partition shuffled matrix into original sample sizes
new_pop_1, new_pop_2 = partitionarray(perm_matrix, [n1, n2])
return new_pop_1, new_pop_2
end
#= deprecated? #
function _fst_permutation(data::T) where T<:AbstractMatrix
@inbounds data[shuffle(1:size(data,1)), :]
end
=#
"""
_fst_permution(data::PopData, method::Function, iterations::Int64)
Returns a `PairwiseFST` object containing a dataframe of Pairwise FST calculations. The contained
dataframe has FST values below the diagonal and P values above it. This method is used internally
and wrapped by the public API provided in `pairwisefst()`.
"""
function _fst_permutation(data::PopData, method::Function, iterations::Int64)
idx_pdata = groupby(data.genodata, :population)
pops = getindex.(keys(idx_pdata), :population)
npops = data.metadata.populations
n_loci = data.metadata.loci
results = zeros(Float64, npops, npops)
#perm_vector = zeros(Int64, iterations-1)
pbar = ProgressBar(;refresh_rate=90, transient = true)
job = addjob!(pbar; description= "$(string(method)) FST: ", N = Int64((npops * (npops-1))/2))
start!(pbar)
@inbounds for i in 2:npops
pop1 = reshape(idx_pdata[i].genotype, :, n_loci)
n_pop1 = size(pop1, 1)
@inbounds for j in 1:(i-1)
pop2 = reshape(idx_pdata[j].genotype, :, n_loci)
n_pop2 = size(pop2, 1)
merged = vcat(pop1, pop2)
fst_val = method(pop1,pop2)
pval = 0
@inbounds @sync for iter in 1:iterations-1
Base.Threads.@spawn begin
@inbounds perm_p1, perm_p2 = _fst_permute(merged, n_pop1, n_pop2)
pval += fst_val <= method(perm_p1, perm_p2)
end
end
@inbounds results[i,j] = fst_val
@inbounds results[j,i] = (pval + 1) / iterations
update!(job)
end
end
stop!(pbar)
println("Below diagonal: FST values | Above diagonal: P values")
return PairwiseFST(DataFrame(results, Symbol.(pops)), "$method estimator (with p-values)")
end
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 2829 | # generate a wrapper struct so we can nicely print the results
"""
```julia
struct PairwiseFST
results::DataFrame
method::String
```
A convenience data structure which stores the `results` and `method` of a `pairwisefst` analysis.
The object pretty-prints a bit more information to the console, especially when doing a global pairwise FST.
"""
struct PairwiseFST
results::DataFrame
method::String
end
# pretty-printing of FST results
function Base.show(io::IO, data::PairwiseFST)
issymmetrical = size(data.results, 1) == size(data.results, 2)
rwnames = issymmetrical ? names(data.results) : nothing
show(
io,
data.results,
show_row_number = false,
rowlabel = Symbol(" "),
eltypes = false,
row_names = rwnames,
title = "Pairwise FST: " * data.method
)
end
#TODO update the docs with this implementation
"""
pairwisefst(data::PopData; method::Function, by::String = "global", iterations::Int64)
Calculate pairwise FST between populations in a `PopData` object. Set `iterations`
to a value greater than `0` to perform a single-tailed permutation test to obtain
P-values of statistical significance. Use `by = "locus"` to perform a locus-by-locus FST for
population pairs (iterations and significance testing ignored). Returns a `PairwiseFST` object,
stores a `DataFrame` of the `results`, along with the `method` used to obtain the estimates.
#### Methods:
- `Hudson`: Hudson et al. (1992) method (only for biallelic data)
- `Nei`: Nei (1987) method
- `WeirCockerham` : Weir & Cockerham (1984) method (default)
**Examples**
```julia
data = @nancycats
wc = pairwise_fst(data, method = WeirCockerham)
wc_sig = pairwise_fst(data, iterations = 1000)
```
"""
function pairwisefst(data::PopData; method::Function = WeirCockerham, by::String = "global", iterations::Int64 = 0)
# sanity checks
mth = Symbol(method)
if mth ∉ [:Hudson, :Nei, :WeirCockerham]
throw(ArgumentError("The \`method\` keyword argument ($method) is not recognized and must be one of Hudson, Nei, or WeirCockerham. See ?pairwisefst for usage information"))
elseif mth == :Hudson
isbiallelic(data) || throw(ArgumentError("Data must be biallelic to se the Hudson estimator"))
end
if by == "locus"
if mth == :Hudson
return _hudson_fst_lxl(data)
elseif mth == :Nei
return _nei_fst_lxl(data)
else
throw(ArgumentError("Method $mth is not (yet) supported for by-locus calculations."))
end
elseif by == "global"
if iterations == 0
method(data)
else
_fst_permutation(data, method, iterations)
end
else
throw(ArgumentError("The keyword argument \`by\` must be either \"global\" or \"locus\""))
end
end | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 8475 |
using Convex
using ECOS
"""
probability_state_table(x::Tuple, y::Tuple, loc::Symbol, alleles::Dict)
Calculate the probability of observing the particular allele state given each of
the 9 Jacquard Identity States for a single locus to create Table 1 from
Milligan 2003.
"""
function probability_state_table(x::Tuple, y::Tuple, alleles::Dict)
#TODO Improve how groups are decided based on how similar things are done with moments estimators
#=
Calculate Pr(Li | Sj)
If the allele identity falls into this class (L1-L9), generate the
probabilities of it belonging to each of the different classes and
return that array of 9 distinct probabilities
=#
## class L1 - AᵢAᵢ AᵢAᵢ ##
if x[1] == x[2] == y[1] == y[2]
p = alleles[x[1]]
[p, p^2, p^2, p^3, p^2, p^3, p^2, p^3, p^4]
## class L2 - AᵢAᵢ AⱼAⱼ ##
elseif (x[1] == x[2]) & (y[1] == y[2]) & (x[1] != y[1])
p = (alleles[x[1]], alleles[y[1]])
[0, prod(p), 0, prod(p) * p[2], 0, prod(p) * p[1], 0, 0, prod(p) * prod(p)]
## class L3a - AᵢAᵢ AᵢAⱼ ## - has issues because of allele order
elseif ((x[1] == x[2] == y[1]) & (x[1] != y[2]))
p = (alleles[x[1]], alleles[y[2]])
[0, 0, prod(p), 2 * prod(p) * p[1], 0, 0, 0, prod(p) * p[1], 2 * prod(p) * p[1]^2]
## class L3b - AᵢAᵢ AⱼAᵢ ## - has issues because of allele order
elseif ((x[1] == x[2] == y[2]) & (x[1] != y[1]))
p = (alleles[x[1]], alleles[y[1]])
[0, 0, prod(p), 2 * prod(p) * p[1], 0, 0, 0, prod(p) * p[1], 2 * prod(p) * p[1]^2]
## class L4 - AᵢAᵢ AⱼAₖ ##
elseif (x[1] == x[2]) & (y[1] != y[2]) & (x[1] != y[1]) & (x[1] != y[2])
p = (alleles[x[1]], alleles[y[1]], alleles[y[2]])
[0, 0, 0, 2 * prod(p), 0, 0, 0, 0, 2 * prod(p) * p[1]]
## L5a - AiAj AiAi ## - has issues because of allele order
elseif ((x[1] == y[1] == y[2]) & (x[1] != x[2]))
p = (alleles[x[1]], alleles[x[2]])
[0, 0, 0, 0, prod(p), 2 * prod(p) * p[1], 0, prod(p) *p[1], 2 * prod(p) * p[1]^2]
## L5b - AjAi AiAi ## - has issues because of allele order
elseif (x[2] == y[1] == y[2] & (x[1] != x[2]))
p = (alleles[x[2]], alleles[x[1]])
[0, 0, 0, 0, prod(p), 2 * prod(p) * p[1], 0, prod(p) *p[1], 2 * prod(p) * p[1]^2]
## L6 - AjAk AiAi ##
elseif (x[1] != x[2]) & (y[1] == y[2]) & (x[1] != y[1]) & (x[2] != y[1])
p = (alleles[y[1]], alleles[x[1]], alleles[x[2]])
[0, 0, 0, 0, 0, 2 * prod(p), 0, 0, 2 * prod(p) * p[1]]
## L7 - AiAj AiAj ##
elseif (x[1] == y[1]) & (x[2] == y[2]) & (x[1] != x[2])
p = (alleles[x[1]], alleles[x[2]])
[0, 0, 0, 0, 0, 0, 2 * prod(p), prod(p) * sum(p), 4 * prod(p) * prod(p)]
## L8a - AiAj AiAk ## - has issues because of allele order
elseif ((x[1] == y[1]) & (x[1] != x[2]) & (y[1] != y[2]) & (x[2] != y[2]))
p = (alleles[x[1]], alleles[x[2]], alleles[y[2]])
[0, 0, 0, 0, 0, 0, 0, prod(p), 4 * prod(p) * p[1]]
## L8b - AjAi AkAi ## - has issues because of allele order
elseif ((x[2] == y[2]) & (x[1] != x[2]) & (y[1] != y[2]) & (x[1] != y[1]))
p = (alleles[x[2]], alleles[x[1]], alleles[y[1]])
[0, 0, 0, 0, 0, 0, 0, prod(p), 4 * prod(p) * p[1]]
## L8c - AjAi AiAk ## - has issues because of allele order
elseif ((x[2] == y[1]) & (x[1] != x[2]) & (y[1] != y[2]) & (x[1] != y[2]))
p = (alleles[x[2]], alleles[x[1]], alleles[y[2]])
[0, 0, 0, 0, 0, 0, 0, prod(p), 4 * prod(p) * p[1]]
## L8d - AiAj AkAi ## - has issues because of allele order
elseif ((x[1] == y[2]) & (x[1] != x[2]) & (y[1] != y[2]) & (x[1] != y[1]))
p = (alleles[x[1]], alleles[x[2]], alleles[y[1]])
[0, 0, 0, 0, 0, 0, 0, prod(p), 4 * prod(p) * p[1]]
## L9 - AiAj AkAl ##
elseif (x[1] != x[2]) & (x[1] != y[1]) & (x[1] != y[2]) & (x[2] != y[1]) & (x[2] != y[2]) & (y[1] != x[2])
p = (alleles[x[1]], alleles[x[2]], alleles[y[1]], alleles[y[2]])
[0, 0, 0, 0, 0, 0, 0, 0, 4 * prod(p)]
else
[-9, -9, -9, -9, -9, -9, -9, -9, -9]
end
end
"""
Δ_optim(Pr_L_S::Transpose{Float64,Array{Float64,2}}, verbose::Bool)
Takes the probability of the allelic state given the identity by descent from
all available loci (either allowing for inbreeding or not) and calculated the
maximum likelihood Δ coefficients
"""
function Δ_optim(Pr_L_S::Array{Float64,2}, verbose::Bool = false)
#Δ is what needs to be optimized
#consist of 9 values between 0 and 1 which must also sum to 1
#is then used to calculate relatedness
Δ = Variable(9)
problem = maximize(sum(log(Pr_L_S * Δ)))
problem.constraints += 0 <= Δ[1:9]
problem.constraints += Δ[1:9] <= 1
problem.constraints += sum(Δ) <= 1
problem.constraints += 0 <= sum(Δ)
problem.constraints += 2 * (Δ[1] + 0.5 * (Δ[3] + Δ[5] + Δ[7]) + 0.25 * Δ[8]) <= 1
problem.constraints += 0 <= 2 * (Δ[1] + 0.5 * (Δ[3] + Δ[5] + Δ[7]) + 0.25 * Δ[8])
Convex.solve!(problem, ECOS.Optimizer(verbose = verbose, maxit=100), verbose = verbose) #maxit=100,
#Convex.solve!(problem, ECOSSolver(verbose = verbose, maxit=100, feastol=5e-6, reltol = 1e-3, reltol_inacc = 5e-2), verbose = verbose)
#Convex.solve!(problem, SCSSolver(verbose = verbose, max_iters = 100), verbose = verbose)
Δ.value, problem.status
# Should probably include some output that confirms that it did in fact
# converge and/or use multiple random starts to confirm not a local maxima
end
#### No inbreeding assumption
function Δ_optim_noInbreeding(Pr_L_S::Array{Float64,2}, verbose::Bool = false)
#Δ is what needs to be optimized
#consist of 9 values between 0 and 1 which must also sum to 1
#is then used to calculate relatedness
Δ = Variable(3)
problem = maximize(sum(log(Pr_L_S * Δ)))
problem.constraints += 0 <= Δ[1:3]
problem.constraints += Δ[1:3] <= 1
problem.constraints += sum(Δ) <= 1
problem.constraints += 0 <= sum(Δ)
problem.constraints += 2 * (0.5 * Δ[1] + 0.25 * Δ[2]) <= 1
problem.constraints += 0 <= 2 * (0.5 * Δ[1] + 0.25 * Δ[2])
Convex.solve!(problem, ECOS.Optimizer(verbose = verbose, maxit=100), verbose = verbose) #maxit=100,
#Convex.solve!(problem, ECOSSolver(verbose = verbose, maxit=100, feastol=5e-6, reltol = 1e-3, reltol_inacc = 5e-2), verbose = verbose)
#Convex.solve!(problem, SCSSolver(verbose = verbose, max_iters = 100), verbose = verbose)
Δ.value, problem.status
# Should probably include some output that confirms that it did in fact
# converge and/or use multiple random starts to confirm not a local maxima
end
"""
dyadicLikelihood(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}; alleles::NamedTuple)
Calculates the maximum likelihood based kinship using all available loci following following Milligan (2002)
-Single Locus Equation:
-How to combine multiple loci: NA inherently multi-locus
-Assumes inbreeding can be present
Milligan, B. G. (2003). Maximum-likelihood estimation of relatedness. Genetics, 163(3), 1153-1167.
"""
function dyadicLikelihood(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
kw_dict = Dict(kwargs...)
Pr_Ls = Array{Float64}(undef, length(locus_names), 9)
idx = 0
@inbounds for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
idx += 1
Pr_Ls[idx, :] = probability_state_table(gen1, gen2, alleles[loc])
end
if kw_dict[:inbreeding] == true
Δ = Δ_optim(Pr_Ls)
θ = Δ[1][1] + 0.5 * (Δ[1][3] + Δ[1][5] + Δ[1][7]) + 0.25 * Δ[1][8]
else
Pr_Ls = Pr_Ls[:, 7:9]
Δ = Δ_optim_noInbreeding(Pr_Ls)
θ = (0.5 * Δ[1][1] + 0.25 * Δ[1][2])
end
return 2 * θ#, Δ[1], Δ[2] #secondary outputs for convergence & Δvalues if we want
end
#= should be redundant now
function dyadicLikelihood_noInbreeding(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
#TODO Add inbreeding toggle
Pr_Ls = Array{Float64}(undef, length(locus_names), 9)
idx = 0
for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
idx += 1
Pr_Ls[idx, :] = probability_state_table(gen1, gen2, allelefrequencies[loc])
end
Pr_Ls = Pr_Ls[:, 7:9]
Δ = Δ_optim_noInbreeding(Pr_Ls)
θ = (0.5 * Δ[1][1] + 0.25 * Δ[1][2])
return 2 * θ#, Δ[1], Δ[2] #secondary outputs for convergence & Δvalues if we want
end
=# | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 8318 | ######## Moments-based Estimator Methods ###########
function _blouin(geno1::NTuple{2,T}, geno2::NTuple{2,T})::Float64 where T<:Union{Int16, Int8}
@inbounds ((geno1[1] ∈ geno2) & (geno2[1] ∈ geno1)) + ((geno1[2] ∈ geno2) & (geno2[2] ∈ geno1))
end
function Blouin(ind1::GenoArray, ind2::GenoArray)::Float64
res = 0.0
n = 0
@inbounds for i in eachindex(ind1)
@inbounds i1 = ind1[i]
@inbounds i2 = ind2[i]
if (i1 === missing) | (i2 === missing)
continue
else
res += _blouin(i1, i2)
n += 1
end
end
return res / n / 2.0
end
function _lihorvitz(geno1::NTuple{2,T}, geno2::NTuple{2,T})::Float64 where T<:Union{Int16, Int8}
@inbounds (geno1[1] == geno2[1]) + (geno1[1] == geno2[2]) + (geno1[2] == geno2[1]) + (geno1[2] == geno2[2])
end
function LiHorvitz(ind1::GenoArray, ind2::GenoArray)::Float64
res = 0.0
n = 0
@inbounds for i in eachindex(ind1)
@inbounds i1 = ind1[i]
@inbounds i2 = ind2[i]
if (i1 === missing) | (i2 === missing)
continue
else
res += _lihorvitz(i1, i2)
n += 1
end
end
res / n / 4.0
end
function _lynch(geno1::NTuple{2,T}, geno2::NTuple{2,T})::Float64 where T<:Union{Int16, Int8}
@inbounds ((geno1[1] ∈ geno2) + (geno1[2] ∈ geno2) + (geno2[1] ∈ geno1) + (geno2[2] ∈ geno1))
end
function Lynch(ind1::GenoArray, ind2::GenoArray)::Float64
res = 0.0
n = 0
@inbounds for i in eachindex(ind1)
@inbounds i1 = ind1[i]
@inbounds i2 = ind2[i]
if (i1 === missing) | (i2 === missing)
continue
else
res += _lynch(i1, i2)
n += 1
end
end
res / n / 4.0
end
function _loiselle_num(geno1::NTuple{2,T}, geno2::NTuple{2,T}, frqdict::Dict{T, Float64})::Float64 where T<:Union{Int16, Int8}
sum(pairs(frqdict)) do (allele, frq)
@inbounds ((((geno1[1] == allele) + (geno1[2] == allele)) / 2.0) - frq) * ((((geno2[1] == allele) + (geno2[2] == allele)) / 2.0) - frq)
end
end
function _loiselle_denom(freqs)::Float64
sum(freqs) do fq
fq * (1.0-fq)
end
end
function Loiselle(ind1::GenoArray, ind2::GenoArray, allelefrq::U; kwargs...) where U <: Tuple
numer = 0.0
denom = 0.0
@inbounds for i in eachindex(ind1)
@inbounds i1 = ind1[i]
@inbounds i2 = ind2[i]
if (i1 === missing) | (i2 === missing)
continue
else
@inbounds frqs = allelefrq[i]
fq = values(frqs)
numer += _loiselle_num(i1, i2, frqs)
@inbounds denom += _loiselle_denom(fq)
end
end
numer / denom + 2.0 / (2.0 * kwargs[:n_samples] - 1.0)
end
##BUG THIS DOESNT AGREE WITH THE ORIGINAL
## CHECK AGAINST COANCESTRY
function _lynchli(geno1::NTuple{2,T}, geno2::NTuple{2,T})::Float64 where T<:Union{Int16, Int8}
a,b = geno1 ; c,d = geno2
0.5 * (((a == c) + (a == d) + (b == c) + (b == d)) / (2.0 * (1.0 + (a == b))) + ((a == c) + (a == d) + (b == c) + (b == d)) / (2.0 * (1.0 + (c == d))))
end
function _lynchliS0(alleles)::Float64
res1 = 0.0
res2 = 0.0
@inbounds for i in alleles
res1 += i^2
res2 += i^3
end
return 2.0 * res1 - res2
end
function LynchLi(ind1::T, ind2::T, alleles::U; kwargs...) where T <: GenoArray where U <: Tuple
#TODO Change to unbiased formulation (eq 25)
numerator = 0.0
denom = 0.0
@inbounds for i in eachindex(ind1)
@inbounds i1 = ind1[i]
@inbounds i2 = ind2[i]
@inbounds loc = values(alleles[i])
S0 = _lynchliS0(loc)
if (i1 === missing) | (i2 === missing)
continue
else
# this is Sxy - S0
numerator += _lynchli(i1, i2) - S0
denom += 1.0 - S0
end
end
return numerator / denom
end
#BUG not consistent with Coancestry
function _lynchritland(geno1::NTuple{2,T}, geno2::NTuple{2,T}, frqdict::Dict{T, Float64}) where T<:Union{Int16, Int8}
a,b = geno1
c,d = geno2
fq_a = frqdict[a]
fq_b = frqdict[b]
fq_c = frqdict[c]
fq_d = frqdict[d]
n1 = fq_a * ((b == c) + (b == d)) + fq_b * ((a == c) + (a == d)) - 4.0 * fq_a * fq_b
n2 = fq_c * ((d == a) + (d == b)) + fq_d * ((c == a) + (c == b)) - 4.0 * fq_c * fq_d
d1 = 2.0 * (1.0 + (a == b)) * (fq_a + fq_b) - 8.0 * fq_a * fq_b
d2 = 2.0 * (1.0 + (c == d)) * (fq_c + fq_d) - 8.0 * fq_c * fq_d
WL1 = ((1.0 + (a == b)) * (fq_a + fq_b) - 4.0 * fq_a * fq_b) / (2.0 * fq_a * fq_b)
WL2 = ((1.0 + (c == d)) * (fq_c + fq_d) - 4.0 * fq_c * fq_d) / (2.0 * fq_c * fq_d)
numer = ((n1 / d1) * WL1 + (n2 / d2) * WL2)
denom = WL1 + WL2
return (numer, denom)
end
function LynchRitland(ind1::GenoArray, ind2::GenoArray, allelefrq::U; kwargs...) where U <: Tuple
numer = 0.0
denom = 0.0
@inbounds for i in eachindex(ind1)
@inbounds i1 = ind1[i]
@inbounds i2 = ind2[i]
@inbounds freqs = allelefrq[i]
if (i1 === missing) | (i2 === missing)
continue
else
n, d = _lynchritland(i1, i2, freqs)
numer += n
denom += d
end
end
return numer / (denom / 2.0)
end
function _moran(geno1::NTuple{2,T}, geno2::NTuple{2,T}, frqdict::Dict{T, Float64}) where T<:Union{Int16, Int8}
num = 0.0 ; denom = 0.0
@inbounds for (allele, fq) in frqdict
g1 = ((((geno1[1] == allele) + (geno1[2] == allele)) / 2.0) - fq)
g2 = ((((geno2[1] == allele) + (geno2[2] == allele)) / 2.0) - fq)
num += g1 * g2
denom += ((g1^2) + (g2^2))
end
return (num, denom)
end
function Moran(ind1::GenoArray, ind2::GenoArray, allelefrq::U; kwargs...) where U <: Tuple
#TODO NEED TO CHECK TO CONFIRM EQUATIONS
numer = 0.0
denom = 0.0
@inbounds for i in eachindex(ind1)
@inbounds i1 = ind1[i]
@inbounds i2 = ind2[i]
@inbounds freqs = allelefrq[i]
if (i1 === missing) | (i2 === missing)
continue
else
n, d = _moran(i1, i2, freqs)
numer += n
denom += d
end
end
return numer/(denom / 2.0)
end
#BUG not consistent with Coancestry
function _quellergoodnight(geno1::NTuple{2,T}, geno2::NTuple{2,T}, frqdict::Dict{T, Float64}) where T<:Union{Int16, Int8}
a,b = geno1
c,d = geno2
ident = ((a == c) + (a == d) + (b == c) + (b == d))
fq_a = frqdict[a]
fq_b = frqdict[b]
fq_c = frqdict[c]
fq_d = frqdict[d]
num1 = ident - 2.0 * (fq_a + fq_b)
num2 = ident - 2.0 * (fq_c + fq_d)
denom1 = (2.0 * (1.0 + (a==b) - fq_a - fq_b))
denom2 = (2.0 * (1.0 + (c==d) - fq_c - fq_d))
return (num1, num2, denom1, denom2)
end
function QuellerGoodnight(ind1::GenoArray, ind2::GenoArray, allelefrq::U; kwargs...) where U <: Tuple
numer1 = 0.0
denom1 = 0.0
numer2 = 0.0
denom2 = 0.0
@inbounds for i in eachindex(ind1)
@inbounds i1 = ind1[i]
@inbounds i2 = ind2[i]
@inbounds freqs = allelefrq[i]
if (i1 === missing) | (i2 === missing)
continue
else
@inbounds n1, n2, d1, d2 = _quellergoodnight(i1, i2, freqs)
numer1 += n1
denom1 += d1
numer2 += n2
denom2 += d2
end
end
return (numer1/denom1 + numer2/denom2)/2.0
end
#BUG Math not consistent with Coancestry
function _ritland(geno1::NTuple{2,T}, geno2::NTuple{2,T}, frqdict::Dict{T, Float64}) where T<:Union{Int16, Int8}
a,b = geno1
c,d = geno2
A = length(frqdict) - 1.0
R = 0.0
for (allele, frq) in frqdict
R += ((((a == allele) + (b == allele)) * ((c == allele) + (d == allele))) / (4.0 * frq))
end
R = ((2.0 / A) * (R - 1.0)) * A
return (R, A)
end
function Ritland(ind1::GenoArray, ind2::GenoArray, allelefrq::U; kwargs...) where U <: Tuple
numer = 0.0
denom = 0.0
@inbounds for i in eachindex(ind1)
@inbounds i1 = ind1[i]
@inbounds i2 = ind2[i]
@inbounds freqs = allelefrq[i]
if (i1 === missing) | (i2 === missing)
continue
else
n, d = _ritland(i1, i2, freqs)
numer += n
denom += d
end
end
return numer / denom
end | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 17304 | """
Blouin(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}; alleles::NamedTuple)
Allele sharing index described by Blouin (1996)
- Single Locus Equation: The number of alleles shared between individuals over ploidy.
- If both allele positions are shared (e.g. AA x AA or AB x AB) then 1
- If one allele position is shared (e.g. AB x AC) then 0.5
- If neither allele position is shared (e.g. AB x CD) then 0
- How to combine multiple loci: Single locus estimates are simply averaged together
- Assumes no inbreeding
Blouin, M. S., Parsons, M., Lacaille, V., & Lotz, S. (1996). Use of microsatellite loci to classify individuals by relatedness. Molecular ecology, 5(3), 393-401.
"""
function Blouin(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
isempty(locus_names) && return missing
Mxy = Vector{Float64}(undef, length(locus_names))
loc_id = 0
for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
loc_id += 1
i,j = gen1
k,l = gen2
Mxy[loc_id] = (((i ∈ gen2) & (k ∈ gen1)) + ((j ∈ gen2) & (l ∈ gen1))) / 2
end
return mean(Mxy)
end
"""
LiHorvitz(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}; alleles::NamedTuple)
Allele sharing index described by Li and Horvitz (1953)
-Single Locus Equation: If all alleles are the same between individuals (eg. AA x AA) then 1.
- If two alleles are shared between individuals (eg. AA x AB or AB x AB) then 0.5.
- If only one allele is shared between individuals (eg. AB x AC) then 0.25.
- If no alleles are shared (eg. AB x CD) then 0.
- How to combine multiple loci: Single locus estimates are simply averaged together
- Assumes no inbreeding
Li, C. C., & Horvitz, D. G. (1953). Some methods of estimating the inbreeding coefficient. American journal of human genetics, 5(2), 107.
"""
function LiHorvitz(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
isempty(locus_names) && return missing
Bxy = Vector{Float64}(undef, length(locus_names))
loc_id = 0
for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
loc_id += 1
i,j = gen1
k,l = gen2
Bxy[loc_id] = sum([i, j] .∈ [k,l]') / 4
end
return mean(Bxy)
end
"""
Loiselle(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}; alleles::NamedTuple)
Calculates the moments based estimator of pairwise relatedness using the estimator propsed by
Loiselle et al (1995) and modified to individual dyads by Heuertz et al. (2003).
- Multiple Locus Equation:
- Assumes no inbreeding
See equations 22 in: Wang(2017) for variant of estimator used
Loiselle, B. A., Sork, V. L., Nason, J., & Graham, C. (1995). Spatial genetic structure of a tropical understory shrub, <i>Psychotria officinalis</i> (Rubiaceae). American journal of botany, 82(11), 1420-1425.
Heuertz, M., Vekemans, X., Hausman, J. F., Palada, M., & Hardy, O. J. (2003). Estimating seed vs. pollen dispersal from spatial genetic structure in the common ash. Molecular Ecology, 12(9), 2483-2495.
Wang, J. (2017). Estimating pairwise relatedness in a small sample of individuals. Heredity, 119(5), 302-313.
"""
function Loiselle(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
isempty(locus_names) && return missing
d_kw = Dict(kwargs...)
numerator1 = 0.0
denominator1 = 0.0
for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
for allele in keys(alleles[loc])
fq = alleles[loc][allele]
numerator1 += ((sum(gen1 .== allele) / 2.0) - fq) * ((sum(gen2 .== allele) / 2.0) - fq)
denominator1 += fq * (1.0 - fq)
end
end
return numerator1 / denominator1 + 2.0 / (2 * d_kw[:n_samples] - 1)
end
"""
Lynch(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}; alleles::NamedTuple)
Allele sharing index described by Lynch (1988)
- Single Locus Equation: If all alleles are the same between individuals (eg. AA x AA) then 1.
- If both individuals are heterozygous with the same alleles or one is homozygous for the shared allele (eg. AB x AB or AA x AB) then 0.75.
- If only one allele is shared between individuals (eg. AB x AC) then 0.5.
- If no alleles are shared (eg. AB x CD) then 0.
- How to combine multiple loci: Single locus estimates are simply averaged together
- Assumes no inbreeding
Lynch, M. (1988). Estimation of relatedness by DNA fingerprinting. Molecular biology and evolution, 5(5), 584-599.
"""
function Lynch(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
isempty(locus_names) && return missing
Sxy = Vector{Float64}(undef, length(locus_names))
loc_id = 0
for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
loc_id += 1
i,j = gen1
k,l = gen2
Sxy[loc_id] = ((i ∈ gen2) + (j ∈ gen2) + (k ∈ gen1) + (l ∈ gen1)) / 4
end
return mean(Sxy)
end
"""
LynchLi(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}; alleles::NamedTuple)
Calculates the moments based estimator of pairwise relatedness by Lynch (1988) & improved by Li et al. (1993).
- Single Locus Equation:
- How to combine multiple loci: Sum the difference between observed and expected similarity across all loci and then divide by the sum of 1 - the expected similarity
- Assumes no inbreeding
See equations 13 - 16 in Wang (2017) for variant of estimator used
Li, C. C., Weeks, D. E., & Chakravarti, A. (1993). Similarity of DNA fingerprints due to chance and relatedness. Human heredity, 43(1), 45-52.
Wang, J. (2017). Estimating pairwise relatedness in a small sample of individuals. Heredity, 119(5), 302-313.
"""
function LynchLi(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
isempty(locus_names) && return missing
numerator1 = 0.0
denominator1 = 0.0
for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
a,b = gen1
c,d = gen2
Sxy = (0.5) * (((a == c) + (a == d) + (b == c) + (b == d)) / (2.0 * (1.0 + (a == b))) + ((a == c) + (a == d) + (b == c) + (b == d)) / (2.0 * (1.0 + (c == d))))
#TODO Change to unbiased formulation (eq 25)
S0 = 2.0 * sum(values(alleles[loc]) .^ 2) - sum(values(alleles[loc]) .^ 3)
numerator1 += Sxy - S0
denominator1 += 1.0 - S0
end
return numerator1 / denominator1
end
"""
LynchRitland(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}; alleles::NamedTuple)
Calculates the moments based estimator of pairwise relatedness by Lynch and Ritland (1999).
- Single Locus Equation:
- How to combine multiple loci: Weighted average of each term seperately weighted by the sample variance (assuming zero relatedness) and subsequently divided by the average sampling variance
- Assumes no inbreeding
See equation 10 in Wang (2017) for variant of estimator used
Lynch, M., & Ritland, K. (1999). Estimation of pairwise relatedness with molecular markers. Genetics, 152(4), 1753-1766.
Wang, J. (2017). Estimating pairwise relatedness in a small sample of individuals. Heredity, 119(5), 302-313.
"""
function LynchRitland(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
isempty(locus_names) && return missing
numerator1 = 0.0
denominator1 = 0.0
for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
a,b = gen1
c,d = gen2
fq_a, fq_b, fq_c, fq_d = map(i -> alleles[loc][i], (a,b,c,d))
n1 = fq_a * ((b == c) + (b == d)) + fq_b * ((a == c) + (a == d)) - 4.0 * fq_a * fq_b
n2 = fq_c * ((d == a) + (d == b)) + fq_d * ((c == a) + (c == b)) - 4.0 * fq_c * fq_d
d1 = 2.0 * (1.0 + (a == b)) * (fq_a + fq_b) - 8.0 * fq_a * fq_b
d2 = 2.0 * (1.0 + (c == d)) * (fq_c + fq_d) - 8.0 * fq_c * fq_d
WL1 = ((1 + (a == b)) * (fq_a + fq_b) - 4 * fq_a * fq_b) / (2 * fq_a * fq_b)
WL2 = ((1 + (c == d)) * (fq_c + fq_d) - 4 * fq_c * fq_d) / (2 * fq_c * fq_d)
numerator1 += ((n1 / d1) * WL1 + (n2 / d2) * WL2)
denominator1 += (WL1 + WL2) / 2.0
end
return numerator1 / denominator1
end
"""
Moran(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}; alleles::NamedTuple)
Reinterpretation of Moran's I (commonly used for spatial autocorrelation) to estimate genetic relatedness
by Hardy and Vekemans (1999)
- Multiple Locus Equation:
- Assumes no inbreeding
Hardy, O. J., & Vekemans, X. (1999). Isolation by distance in a continuous population: reconciliation between spatial autocorrelation analysis and population genetics models. Heredity, 83(2), 145-154.
"""
function Moran(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
#TODO NEED TO CHECK TO CONFIRM EQUATIONS
isempty(locus_names) && return missing
numerator1 = 0.0
denominator1 = 0.0
for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
for allele in keys(alleles[loc])
fq = alleles[loc][allele]
numerator1 += ((sum(gen1 .== allele) / 2.0) - fq) * ((sum(gen2 .== allele) / 2.0) - fq)
#denominator1 += ((sum(gen1 .== allele) / 2.0) - fq)^2
denominator1 += (((sum(gen1 .== allele) / 2.0) - fq)^2 + ((sum(gen2 .== allele) / 2.0) - fq)^2) / 2.0
end
#denominator1 += (1 / (length(alleles[loc]) - 1))
end
return (numerator1 / denominator1)
end
function Moran_experimental(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
#TODO NEED TO CHECK TO CONFIRM EQUATIONS
isempty(locus_names) && return missing
numerator1 = Vector{Float64}(undef, length(locus_names))
denominator1 = similar(numerator1)
numerator1 = numerator1 .* 0.0
denominator1 = denominator1 .* 0.0
idx = 0
for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
idx += 1
for allele in keys(alleles[loc])
fq = alleles[loc][allele]
numerator1[idx] += ((sum(gen1 .== allele) / 2.0) - fq) * ((sum(gen2 .== allele) / 2.0) - fq)
denominator1[idx] += (((sum(gen1 .== allele) / 2.0) - fq)^2) #+ ((sum(gen2 .== allele) / 2.0) - fq)^2) / 2
end
denominator1[idx] += (1 / (length(alleles[loc]) - 1))
end
return mean(numerator1 ./ denominator1)
end
"""
QuellerGoodnight(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}; alleles::NamedTuple)
Calculates the moments based estimator of pairwise relatedness developed by Queller & Goodnight (1989).
- Single Locus Equation:
- How to combine multiple loci:
- Multiple loci are combined by independently summing the two numerator and two denominator terms before performing the final division and averaging the two components.
- Assumes no inbreeding
See equation 3 in Wang(2017) for variant of estimator used.
Queller, D. C., & Goodnight, K. F. (1989). Estimating relatedness using genetic markers. Evolution, 43(2), 258-275.
Wang, J. (2017). Estimating pairwise relatedness in a small sample of individuals. Heredity, 119(5), 302-313.
"""
function QuellerGoodnight(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
isempty(locus_names) && return missing
numerator1 = 0.0
numerator2 = 0.0
denominator1 = 0.0
denominator2 = 0.0
for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
a,b = gen1
c,d = gen2
ident = ((a == c) + (a == d) + (b == c) + (b == d))
fq_a, fq_b, fq_c, fq_d = map(i -> alleles[loc][i], (a,b,c,d))
numerator1 += ident - 2.0 * (fq_a + fq_b)
numerator2 += ident - 2.0 * (fq_c + fq_d)
denominator1 += (2.0 * (1.0 + (a==b) - fq_a - fq_b))
denominator2 += (2.0 * (1.0 + (c==d) - fq_c - fq_d))
end
return (numerator1/denominator1 + numerator2/denominator2)/2.0
end
"""
Ritland(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}; alleles::NamedTuple)
Calculates the moments based estimator of pairwise relatedness proposed by Li and Horvitz (1953) and implemented/made popular by Ritland (1996).
- Single Locus Equation:
- How to combine multiple loci: A weighted average of individual locus specific estimates weighted by sampling variance
- Assumes no inbreeding
See equation 7 in: Wang (2017) for variant of estimator used
Ritland, K. (1996). Estimators for pairwise relatedness and individual inbreeding coefficients. Genetics Research, 67(2), 175-185.
Wang, J. (2017). Estimating pairwise relatedness in a small sample of individuals. Heredity, 119(5), 302-313.
"""
function Ritland(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
isempty(locus_names) && return missing
numerator1 = 0.0
denominator1 = 0.0
for (loc,gen1,gen2) in zip(locus_names, ind1, ind2)
a,b = gen1
c,d = gen2
A = ((alleles[loc] |> length) - 1)
R = 0.0
for i in unique((a,b,c,d))
# Individual locus relatedness value (eq 7 in paper)
R += ((((a == i) + (b == i)) * ((c == i) + (d == i))) / (4.0 * alleles[loc][i]))
end
R = (2.0 / A) * (R - 1.0)
# numerator for weighted combination of loci
numerator1 += (R * A)
# denominator for weighted combination of loci
denominator1 += A
end
return numerator1 / denominator1
end
### Wang 2002 helper functions ###
function _a_wang_base(m::Int, alleles::Dict)
sum(values(alleles) .^ m)
end
function _a_wang(N::Int, alleles::Dict)
#unbiased estimator
a = 0.0
b = (N * _a_wang_base(2, alleles) - 1) / (N - 1)
c = (N^2 * _a_wang_base(3, alleles) - 3 * (N - 1) * b - 1) / ((N - 1) * (N - 2))
d = (N^3 * _a_wang_base(4, alleles) - 6 * (N - 1) * (N - 2) * c - 7 * (N - 1) * b - 1) / (N^3 - 6 * N^2 + 11 * N - 6)
return [a, b, c, d]
end
"""
Wang(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}; alleles::NamedTuple)
Calculates the moments based estimator of pairwise relatedness by Wang (2002).
-Single Locus Equation:
-How to combine multiple loci: Each individual locus subcomponent (b-g) and each genotypic state (P1-P3) is averaged weighted by the average similarity of unrelated dyads at each locus. Then the values of V, Φ, Δ, and r are calculated
-Assumes no inbreeding
-Corrected for sampling bias in allele frequencies to get an unbiased estimator
Wang, J. (2002). An estimator for pairwise relatedness using molecular markers. Genetics, 160(3), 1203-1215.
"""
function Wang(ind1::T, ind2::T, locus_names::Vector{Symbol}, alleles::U; kwargs...) where T <: GenoArray where U <: NamedTuple
#TODO NEED TO CHECK TO CONFIRM EQUATIONS
isempty(locus_names) && return missing
kw_dict = Dict(kwargs...)
P1 = Vector{Float64}(undef, length(locus_names))
P2, P3, P4, u, b, c, d, e, f, g = map(i -> similar(P1), 1:10)
loc_id = 0
for (loc,gen1,gen2, N) in zip(locus_names, ind1, ind2, kw_dict[:loc_n])
loc_id += 1
i,j = gen1
k,l = gen2
#N = nonmissing(data.genodata[data.genodata.locus .== string(loc), :genotype])
a = _a_wang(2 * N, alleles[loc])
a2_sq = a[2] ^ 2
u[loc_id] = 2 * a[2] - a[3]
# Which category of dyad
Sxy = ((i ∈ gen2) + (j ∈ gen2) + (k ∈ gen1) + (l ∈ gen1)) / 4
# Both alleles shared between individuals either the same or different
P1[loc_id] = 1.0 * (Sxy == 1)
# One allele shared between individuals and one is homozygous for that allele
P2[loc_id] = 1.0 * (Sxy == (3/4))
# One allele shared with the other two being unique
P3[loc_id] = 1.0 * (Sxy == (1/2))
P4[loc_id] = 1.0 * ((P1 + P2 + P3) == 0)
b[loc_id] = (2.0 * a2_sq - a[4])
c[loc_id] = (a[2] - 2.0 * a2_sq + a[4])
d[loc_id] = (4.0 * (a[3] - a[4]))
e[loc_id] = (2.0 * (a[2] - 3.0 * a[3] + 2.0 * a[4]))
f[loc_id] = (4.0 * (a[2] - a2_sq - 2.0 * a[3] + 2.0 * a[4]))
g[loc_id] = (1.0 - 7.0 * a[2] + 4.0 * a2_sq + 10.0 * a[3] - 8.0 * a[4])
end
#return (1 / (sum(1/u) * u)) * r
w = (1 / (sum(1/u) * u))
P1 = w * P1
P2 = w * P2
P3 = w * P3
b = w * b
c = w * c
d = w * d
e = w * e
f = w * f
g = w * g
#Eq 11
V = (1.0 - b)^2 * (e^2 * f + d * g^2) -
(1.0 - b) * (e * f - d * g)^2 +
2.0 * c * d * f * (1.0 - b) * (g + e) +
c^2 * d * f * (d + f)
#Eq 9
Φ = (d * f * ((e + g) * (1.0 - b) + c * (d + f)) * (P1 - 1.0) +
d * (1.0 - b) * (g * (1.0 - b - d) + f * (c + e)) * P3 +
f * (1.0 - b) * (e * (1.0 - b - f) + d * (c + g)) * P2) / V
#Eq 10
Δ = (c * d * f * (e + g) * (P1 + 1.0 - 2 * b) +
((1.0 - b) * (f * e^2 + d * g^2) - (e * f - d * g)^2) * (P1 - b) +
c * (d * g - e * f) * (d * P3 - f * P2) - c^2 * d * f * (P3 + P2 - d - f) -
c * (1.0 - b) * (d * g * P3 + e * f * P2)) / V
r = (Φ/2.0 + Δ)
return r
end
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 10212 | #=
function testkin(x, y)
sym = Symbol(y)
old = kinship(x, method = y)
new = kinshiptotable(kinship_new(x, method = y))
old[:, sym] = round.(old[:, sym], digits = 5)
new[:, :kinship] = round.(new.kinship, digits = 5)
if old[:, sym] == new.kinship
return true
else
insertcols!(old, :new => new.kinship)
return old[old[:, sym] .!= old.new, :]
end
end
=#
function kinship(data::PopData; method::Function, iterations::Int = 0, interval::Vector{Float64} = [0.025, 0.975])
# sanity checks
data.metadata.ploidy != 2 && error("kinship analyses currently only support diploid samples")
length(interval) != 2 && throw(ArgumentError("Keyword argument \`interval\` must be a vector with 2 elements."))
if (iterations == 0) && (Symbol(method) ∈ [:Blouin, :LiHorvitz, :Lynch])
_kinship_noboot_nofreq(data, method)
elseif (iterations == 0) && (Symbol(method) ∈ [:Loiselle, :LynchLi, :LynchRitland, :Moran, :QuellerGoodnight, :Ritland])
_kinship_noboot_freq(data, method)
elseif (iterations > 0) && (Symbol(method) ∈ [:Blouin, :LiHorvitz, :Lynch])
_kinship_boot_nofreq(data, method, iterations, interval)
elseif (iterations > 0) && (Symbol(method) ∈ [:Loiselle, :LynchLi, :LynchRitland, :Moran, :QuellerGoodnight, :Ritland])
_kinship_boot_freq(data, method, iterations, interval)
else
throw(ArgumentError("Invalid method provided: $method. See the docstring \`?kinship\` for usage information."))
end
end
function kinship(data::PopData, samplenames::AbstractVector{T}; method::Function, iterations::Int = 0, interval::Vector{Float64} = [0.025, 0.975]) where T<:AbstractString
# sanity checks
data.metadata.ploidy != 2 && error("kinship analyses currently only support diploid samples")
length(interval) != 2 && throw(ArgumentError("Keyword argument \`interval\` must be a vector with 2 elements."))
newdata = data[data.genodata.name .∈ Ref(samplenames)]
if (iterations == 0) && (Symbol(method) ∈ [:Blouin, :LiHorvitz, :Lynch])
_kinship_noboot_nofreq(newdata, method)
elseif (iterations == 0) && (Symbol(method) ∈ [:Loiselle, :LynchLi, :LynchRitland, :Moran, :QuellerGoodnight, :Ritland])
_kinship_noboot_freq(newdata, method)
elseif (iterations > 0) && (Symbol(method) ∈ [:Blouin, :LiHorvitz, :Lynch])
_kinship_boot_nofreq(newdata, method, iterations, interval)
elseif (iterations > 0) && (Symbol(method) ∈ [:Loiselle, :LynchLi, :LynchRitland, :Moran, :QuellerGoodnight, :Ritland])
_kinship_boot_freq(newdata, method, iterations, interval)
else
throw(ArgumentError("Invalid method provided: $method. See the docstring \`?kinship\` for usage information."))
end
end
### Internal implementations ###
#### Non-Bootstrapped ####
# Returns a NamedMatrix
function _kinship_noboot_nofreq(data::PopData, method::Function)
locmtx = locimatrix(data)
ids = samplenames(data)
n = length(ids)
result = NamedArray{Float64}(n, n)
setnames!(result, String.(ids),1)
setnames!(result, String.(ids),2)
@inbounds for i in 1:n-1
@inbounds v1 = view(locmtx,i,:)
@inbounds for j in i+1:n
@inbounds v2 = view(locmtx,j,:)
est = method(v1, v2)
@inbounds result[j,i] = result[i,j] = est
end
end
return result
end
function _kinship_noboot_freq(data::PopData, method::Function)
locmtx = locimatrix(data)
ids = samplenames(data)
n = length(ids)
result = NamedArray{Float64}(n, n)
setnames!(result, String.(ids),1)
setnames!(result, String.(ids),2)
allelefrequencies = @inbounds Tuple(allelefreq(i) for i in eachcol(locmtx))
@inbounds for i in 1:n-1
@inbounds v1 = view(locmtx,i,:)
@inbounds for j in i+1:n
@inbounds v2 = view(locmtx,j,:)
@inbounds result[j,i] = result[i,j] = method(v1, v2, allelefrequencies, n_samples = n)
end
end
return result
end
#### Bootstrapped ####
# Returns a DataFrame
# Includes a progress bar from Term.jl
# Uses OnlineStats.jl to calculate mean/variance/CI without additional allocations
function _kinship_boot_nofreq(data::PopData, method::Function, iterations::Int, interval::Vector{Float64} = [0.025, 0.975])
locmtx = locimatrix(data)
ids = samplenames(data)
n = length(ids)
nloc = size(locmtx, 2)
idxrange = 1:nloc
result = NamedArray{Float64}(n, n)
setnames!(result, String.(ids),1)
setnames!(result, String.(ids),2)
bresult = Matrix{Float64}(undef, n,n)
b_sdev = similar(bresult)
b_ci = Matrix{Vector{Float64}}(undef, n,n)
pbar = ProgressBar(;refresh_rate=90, transient = true)
job = addjob!(pbar; description= "Kinship: ", N= Int64((n * (n-1))/2))
start!(pbar)
@inbounds @sync for i in 1:n-1
Base.Threads.@spawn begin
@inbounds v1 = view(locmtx,i,:)
boot_idx = Vector{Int64}(undef, nloc)
sizehint!(boot_idx, nloc)
@inbounds for j in i+1:n
@inbounds v2 = view(locmtx,j,:)
@inbounds result[j,i] = result[i,j] = method(v1, v2)
bootstats = Series(Variance(), Quantile(interval))
k = 1
while k <= iterations
k += 1
@inbounds boot_idx .= rand(idxrange, nloc)
v1b = @inbounds view(locmtx, i,boot_idx)
v2b = @inbounds view(locmtx, j, boot_idx)
b_est = method(v1b, v2b)
isnan(b_est) ? continue : fit!(bootstats, b_est)
end
@inbounds bresult[i,j] = bootstats.stats[1].μ
@inbounds b_sdev[i,j] = sqrt(bootstats.stats[1].σ2)
@inbounds b_ci[i,j] = value(bootstats.stats[2])
update!(job)
end
end
end
stop!(pbar)
ci = uppertri2vec(b_ci)
cilow = getindex.(ci, 1)
cihi = getindex.(ci,2)
out = kinshiptotable(result, Symbol(method))
insertcols!(out, :bootmean => uppertri2vec(bresult), :std => uppertri2vec(b_sdev), :CI_lower => cilow, :CI_upper => cihi)
return out
end
function _kinship_boot_freq(data::PopData, method::Function, iterations::Int, interval::Vector{Float64} = [0.025, 0.975])
locmtx = locimatrix(data)
ids = samplenames(data)
n = length(ids)
nloc = size(locmtx, 2)
idxrange = 1:nloc
result = NamedArray{Float64}(n, n)
setnames!(result, String.(ids),1)
setnames!(result, String.(ids),2)
bresult = Matrix{Float64}(undef, n,n)
b_sdev = similar(bresult)
b_ci = Matrix{Vector{Float64}}(undef, n,n)
pbar = ProgressBar(;refresh_rate=90, transient = true)
job = addjob!(pbar; description= "Kinship: ", N= Int64((n * (n-1))/2))
start!(pbar)
allelefrequencies = @inbounds Tuple(allelefreq(i) for i in eachcol(locmtx))
@inbounds @sync for i in 1:n-1
Base.Threads.@spawn begin
@inbounds v1 = view(locmtx,i,:)
boot_idx = Vector{Int64}(undef, nloc)
sizehint!(boot_idx, nloc)
@inbounds for j in i+1:n
@inbounds v2 = view(locmtx,j,:)
@inbounds result[j,i] = result[i,j] = method(v1, v2, allelefrequencies, n_samples = n)
bootstats = Series(Variance(), Quantile(interval))
k = 1
while k <= iterations
k += 1
@inbounds boot_idx .= rand(idxrange, nloc)
v1b = @inbounds view(locmtx, i,boot_idx)
v2b = @inbounds view(locmtx, j, boot_idx)
b_est = method(v1b, v2b, allelefrequencies, n_samples = n)
isnan(b_est) ? continue : fit!(bootstats, b_est)
end
@inbounds bresult[i,j] = bootstats.stats[1].μ
@inbounds b_sdev[i,j] = sqrt(bootstats.stats[1].σ2)
@inbounds b_ci[i,j] = value(bootstats.stats[2])
update!(job)
end
end
end
stop!(pbar)
ci = uppertri2vec(b_ci)
cilow = getindex.(ci, 1)
cihi = getindex.(ci,2)
out = kinshiptotable(result, Symbol(method))
insertcols!(out, :bootmean => uppertri2vec(bresult), :std => uppertri2vec(b_sdev), :CI_lower => cilow, :CI_upper => cihi)
return out
end
"""
kinshiptotable(kinshipresults::T, methd::Symbol) where T<:NamedMatrix
Converts the `NamedMatrix` result from the non-bootstrapped `kinship()` results into a `DataFrame`.
The second positonal argument (`methd`) is the name of the value column (default: `kinship`). For
better analysis workflow, it would be useful to specify the method for this column, to
keep track of which estimator was used (e.g., `Blouin`, `LynchLi`, etc.)
**Example**
```julia
julia> cats = @nancycats ; kin = kinship(cats, method = Moran) ;
julia> kinshiptotable(kin, :Moran)
22366×3 DataFrame
Row │ sample1 sample2 Moran
│ String String Float64
───────┼────────────────────────────────
1 │ cc_001 cc_002 0.00688008
2 │ cc_001 cc_003 -0.0286812
3 │ cc_001 cc_005 -0.000749142
4 │ cc_001 cc_007 0.0516361
5 │ cc_001 cc_008 0.0261128
6 │ cc_001 cc_009 -0.00187027
7 │ cc_001 cc_010 0.0182852
⋮ │ ⋮ ⋮ ⋮
22361 │ seg_028 seg_029 -0.0472928
22362 │ seg_028 seg_030 -0.0172853
22363 │ seg_028 seg_031 -0.00240921
22364 │ seg_029 seg_030 -0.0278483
22365 │ seg_029 seg_031 0.0297876
22366 │ seg_030 seg_031 -0.0371295
22353 rows omitted
```
"""
function kinshiptotable(kinshipresults::T, mthd::Symbol=:nothing) where T<:NamedMatrix
n = size(kinshipresults,1)
n == size(kinshipresults,2) || throw(DimensionMismatch("The input matrix must be symmetrical, but has size $(size(kinshipresults))"))
ids = names(kinshipresults)[1]
vals = uppertri2vec(kinshipresults)
idpairs = pairwisepairs(ids)
meth = mthd == :nothing ? :kinship : mthd
DataFrame(:sample1 => first.(idpairs), :sample2 => getindex.(idpairs, 2), meth => vals)
end
#TODO update kinshipposthoc for this new interface | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 22152 | """
_bootstrapsummary(::Vector{Union{Missing, Float64}}, width::Tuple{Float64, Float64})
Return the mean, median, standard error, and quantiles (given by `witdth`) of kinship resampling.
"""
@inline function _bootstrapsummary(boot_out::Vector{Union{Missing, Float64}}, width::Tuple{Float64, Float64})
isallmissing(boot_out) == true && return missing, missing, missing, missing
boot_skipmissing = collect(skipmissing(boot_out))
n_nonmiss = length(boot_skipmissing)
Mean = mean(boot_skipmissing)
Median = median(boot_skipmissing)
SE = sqrt(sum((boot_skipmissing - (boot_skipmissing / n_nonmiss)).^2) / (n_nonmiss - 1))
quants = quantile(boot_skipmissing, width)
return Mean, Median, SE, quants
end
"""
_bootstrapgenos_all(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}, n_per_loc::Vector{Int}, alleles::NamedTuple; method::Function, iterations::Int)
Perform `iterations` number of bootstrap resampling iterations of all genotypes between pair (`ind1` `ind2`). Returns a vector of length `iterations`
of the kinship estimate given by method `method`. This is an internal function with `locus_names`, `n_per_loc`, and `alleles` supplied by `_kinship_boot_all`.
"""
@inline function _bootstrapgenos_all(ind1::T, ind2::T, locus_names::Vector{Symbol}, n_per_loc::Vector{Int}, alleles::U; method::F, iterations::Int, inbreeding::Bool, n::Int) where T <: GenoArray where U <: NamedTuple where F
relate_vec_boot = Vector{Union{Missing,Float64}}(undef, iterations)
n_loc = length(locus_names)
@sync for iter in 1:iterations
Base.Threads.@spawn begin
# bootstrap the indices
boot_idx = rand(1:n_loc, n_loc)
# sample the source vectors with the resampled/bootstrapped indices
ind1_boot, ind2_boot, loc_boot, n_per_loci = map(i -> getindex(i, boot_idx), [ind1, ind2, locus_names, n_per_loc])
# get index for genotype appearing missing in at least one individual in the pair
keep_idx = nonmissings(ind1_boot, ind2_boot)
relate_vec_boot[iter] = method(ind1_boot[keep_idx], ind2_boot[keep_idx], loc_boot[keep_idx], alleles, loc_n = n_per_loci[keep_idx], n_samples = n_loc, inbreeding = inbreeding)
end
end
return relate_vec_boot
end
"""
_bootstrapgenos_nonmissing(ind1::GenoArray, ind2::GenoArray, locus_names::Vector{Symbol}, n_per_loc::Vector{Int}, alleles::NamedTuple; method::Function, iterations::Int)
Perform `iterations` number of bootstrap resampling iterations of only shared (nonmissing) genotypes between pair (`ind1` `ind2`). Returns a vector of length `interatotions`
of the kinship estimate given by method `method`. This is an internal function with `locus_names`, `n_per_loc`, and `alleles` supplied by `_kinship_boot_nonmissing`.
"""
@inline function _bootstrapgenos_nonmissing(ind1::T, ind2::T, locus_names::Vector{Symbol}, n_per_loc::Vector{Int}, alleles::U; method::F, iterations::Int, inbreeding::Bool) where T <: GenoArray where U <: NamedTuple where F
relate_vec_boot = Vector{Union{Missing,Float64}}(undef, iterations)
n_loc = length(locus_names)
@sync for iter in 1:iterations
Base.Threads.@spawn begin
# bootstrap the indices
boot_idx = rand(1:n_loc, n_loc)
# sample the source vectors with the resampled/bootstrapped indices
ind1_boot, ind2_boot, loc_boot, n_per_loci = map(i -> getindex(i, boot_idx), [ind1, ind2, locus_names, n_per_loc])
# faster/cheaper n counting
relate_vec_boot[iter] = method(ind1_boot, ind2_boot, loc_boot, alleles, loc_n = n_per_loci, n_samples = n_loc, inbreeding = inbreeding)
end
end
return relate_vec_boot
end
#FEATURE namedtuple output
"""
_kinship_boot_all(::PopData, sample_names::Vector{String}; method::Function, iterations::Int, interval::Tuple{Float64, Float64})
Calculate pairwise kinship between all combinations of the provided `sample_names` for each `method` provided. Bootstrapping resamples using
the `all` method, where resampling occurs over all loci. This is an internal function with all arguments provided by `kinship`.
"""
function _kinship_boot_all(data::PopData, sample_names::Vector{T}; method::F, iterations::Int = 100, interval::Tuple{Float64, Float64} = (0.025, 0.975), inbreeding::Bool) where F where T<:AbstractString
loci_names = Symbol.(loci(data))
uniq_pops = unique(data.metadata.sampleinfo.population)
if first(uniq_pops) ∈ ["fullsib", "halfsib", "unrelated", "parent_offspring"]
sample_pairs = simpairs(sample_names) |> collect
else
sample_pairs = pairwisepairs(sample_names) |> collect
end
n_pairs = length(sample_pairs)
n_samples = data.metadata.samples
allelefrequencies = allelefreq(data)
n_per_loci = DataFrames.combine(groupby(data.genodata, :locus), :genotype => nonmissing => :n)[:, :n]
relate_vecs = map(i -> Vector{Union{Missing,Float64}}(undef, n_pairs), 1:length(method))
boot_means, boot_medians, boot_ses = map(i -> deepcopy(relate_vecs), 1:3)
boot_CI = map(i -> Vector{Union{Missing,Tuple{Float64,Float64}}}(undef, n_pairs), 1:length(method))
shared_loci = Vector{Int}(undef, n_pairs)
#p = Progress(n_pairs*length(method), dt = 1, color = :blue, barglyphs = BarGlyphs("|=> |"), barlen = 30)
popdata_idx = groupby(data.genodata, :name)
@inbounds for i in 1:n_pairs
@inbounds geno1 = popdata_idx[(sample_pairs[i][1],)].genotype
@inbounds geno2 = popdata_idx[(sample_pairs[i][2],)].genotype
# get index for genotype appearing missing in at least one individual in the pair
keep_idx = nonmissings(geno1, geno2)
# generate nonmissing genotype data
gen1, gen2, loc, n_per_loc = (i[keep_idx] for i in (geno1, geno2, loci_names, n_per_loci))
@inbounds shared_loci[i] = length(keep_idx)
@sync @inbounds for (j, mthd) in enumerate(method)
Base.Threads.@spawn begin
@inbounds relate_vecs[j][i] = mthd(gen1, gen2, loc, allelefrequencies, loc_n = n_per_loci, n_samples = n_samples, inbreeding = inbreeding)
boot_out = _bootstrapgenos_all(geno1, geno2, loci_names, n_per_loci, allelefrequencies, method = mthd, iterations = iterations, inbreeding = inbreeding, n = j+1)
@inbounds boot_means[j][i], boot_medians[j][i], boot_ses[j][i], boot_CI[j][i] = _bootstrapsummary(boot_out, interval)
#pair_text = sample_pairs[i][1] * " × " * sample_pairs[i][2] * " ($i" * " of " * "$(n_pairs)" * ")"
#ProgressMeter.next!(p; showvalues = [(:Pair, pair_text), (:Method, mthd)])
end
end
end
method_colnames = [Symbol("$i") for i in method]
boot_mean_colnames = [Symbol("$i"*"_mean") for i in method]
boot_median_colnames = [Symbol("$i"*"_median") for i in method]
boot_se_colnames = [Symbol("$i"*"_SE") for i in method]
CI_percent = convert(Int64, round(interval[2] - interval[1], digits = 2) * 100)
boot_CI_colnames = [Symbol("$i"*"_CI_"*"$CI_percent") for i in method]
out_dfs = map(method_colnames) do mthod
DataFrame(:sample_1 => map(i -> i[1], sample_pairs), :sample_2 => map(i -> i[2], sample_pairs), :n_loci => shared_loci)
end
@inbounds for (i, mth) in enumerate(method_colnames)
out_dfs[i][:, mth] = relate_vecs[i]
out_dfs[i][:, boot_mean_colnames[i]] = boot_means[i]
out_dfs[i][:, boot_median_colnames[i]] = boot_medians[i]
out_dfs[i][:, boot_se_colnames[i]] = boot_ses[i]
out_dfs[i][:, boot_CI_colnames[i]] = boot_CI[i]
end
return (; (method_colnames .=> out_dfs)...)
end
"""
_kinship_boot_nonmissing(::PopData, sample_names::Vector{String}; method::F, iterations::Int, interval::Tuple{Float64, Float64}) where F
Calculate pairwise kinship between all combinations of the provided `sample_names` for each `method` provided. Bootstrapping resamples using
the `nonmissing` method, where resampling occurs over only shared non-missing loci. This is an internal function with all arguments provided by `kinship`.
"""
function _kinship_boot_nonmissing(data::PopData, sample_names::Vector{T}; method::F, iterations::Int, interval::Tuple{Float64, Float64} = (0.025, 0.975), inbreeding::Bool) where F where T<: AbstractString
loci_names = Symbol.(loci(data))
uniq_pops = unique(data.metadata.sampleinfo.population)
if first(uniq_pops) ∈ ["fullsib", "halfsib", "unrelated", "parent_offspring"]
sample_pairs = simpairs(sample_names) |> collect
else
sample_pairs = pairwisepairs(sample_names) |> collect
end
n_pairs = length(sample_pairs)
n_samples = data.metadata.samples
n_per_loci = DataFrames.combine(groupby(data.genodata, :locus), :genotype => nonmissing => :n)[:, :n]
allelefrequencies = allelefreq(data)
relate_vecs = map(i -> Vector{Union{Missing,Float64}}(undef, n_pairs), 1:length(method))
boot_means, boot_medians, boot_ses = map(i -> deepcopy(relate_vecs), 1:3)
boot_CI = map(i -> Vector{Union{Missing,Tuple{Float64,Float64}}}(undef, n_pairs), 1:length(method))
shared_loci = Vector{Int}(undef, n_pairs)
#p = Progress(n_pairs * length(method), dt = 1, color = :blue, barglyphs = BarGlyphs("|=> |"), barlen = 30)
popdata_idx = groupby(data.genodata, :name)
@inbounds for i in 1:n_pairs
@inbounds geno1 = popdata_idx[(sample_pairs[i][1],)].genotype
@inbounds geno2 = popdata_idx[(sample_pairs[i][2],)].genotype
# get index for genotype appearing missing in at least one individual in the pair
keep_idx = nonmissings(geno1, geno2)
# generate nonmissing genotype data
gen1, gen2, loc, n_per_loc = (i[keep_idx] for i in (geno1, geno2, loci_names, n_per_loci))
@inbounds shared_loci[i] = length(keep_idx)
@inbounds @sync for (j, mthd) in enumerate(method)
Base.Threads.@spawn begin
@inbounds relate_vecs[j][i] = mthd(gen1, gen2, loc, allelefrequencies, loc_n = n_per_loc, n_samples = n_samples, inbreeding = inbreeding)
boot_out = _bootstrapgenos_nonmissing(gen1, gen2, loc, n_per_loc, allelefrequencies, method = mthd, iterations = iterations, inbreeding = inbreeding)
@inbounds boot_means[j][i], boot_medians[j][i], boot_ses[j][i], boot_CI[j][i] = _bootstrapsummary(boot_out, interval)
#pair_text = sample_pairs[i][1] * " × " * sample_pairs[i][2] * " ($i" * " of " * "$n_pairs" * ")"
#ProgressMeter.next!(p; showvalues = [(:Pair, pair_text), (:Method, mthd)])
end
end
end
method_colnames = [Symbol("$i") for i in method]
boot_mean_colnames = [Symbol("$i"*"_mean") for i in method]
boot_median_colnames = [Symbol("$i"*"_median") for i in method]
boot_se_colnames = [Symbol("$i"*"_SE") for i in method]
CI_percent = convert(Int64, round(interval[2] - interval[1], digits = 2) * 100)
boot_CI_colnames = [Symbol("$i"*"_CI_"*"$CI_percent") for i in method]
out_dfs = map(method_colnames) do mthod
DataFrame(:sample_1 => map(i -> i[1], sample_pairs), :sample_2 => map(i -> i[2], sample_pairs), :n_loci => shared_loci)
end
@inbounds for (i, mth) in enumerate(method_colnames)
out_df[:, mth] = relate_vecs[i]
out_df[:, boot_mean_colnames[i]] = boot_means[i]
out_df[:, boot_median_colnames[i]] = boot_medians[i]
out_df[:, boot_se_colnames[i]] = boot_ses[i]
out_df[:, boot_CI_colnames[i]] = boot_CI[i]
end
return (; (method_colnames .=> out_dfs)...)
end
"""
_kinship_noboot(::PopData, sample_names::Vector{String}; method::F) where F
Calculate pairwise kinship between all combinations of the provided `sample_names` for each `method` provided.
This is an internal function with arguments provided by `kinship`.
"""
function _kinship_noboot(data::PopData, sample_names::Vector{T}; method::F, inbreeding::Bool) where F where T <: AbstractString
loci_names = Symbol.(loci(data))
n_samples = data.metadata.samples
uniq_pops = unique(data.metadata.sampleinfo.population)
if first(uniq_pops) ∈ ["fullsib", "halfsib", "unrelated", "parent_offspring"]
sample_pairs = simpairs(sample_names) |> collect
else
sample_pairs = pairwisepairs(sample_names) |> collect
end
n_pairs = length(sample_pairs)
n_per_loci = DataFrames.combine(groupby(data.genodata, :locus), :genotype => nonmissing => :n)[:, :n]
allelefrequencies = allelefreq(data)
relate_vecs = map(i -> Vector{Union{Missing,Float64}}(undef, n_pairs), 1:length(method))
shared_loci = Vector{Int}(undef, n_pairs)
#p = Progress(n_pairs* length(method), dt = 1, color = :blue, barglyphs = BarGlyphs("|=> |"), barlen = 30)
popdata_idx = groupby(data.genodata, :name)
@inbounds @sync for i in 1:n_pairs
Base.Threads.@spawn begin
@inbounds geno1 = popdata_idx[(sample_pairs[i][1],)].genotype
@inbounds geno2 = popdata_idx[(sample_pairs[i][2],)].genotype
keep_idx = nonmissings(geno1, geno2)
@inbounds shared_loci[i] = length(keep_idx)
@inbounds for (j, mthd) in enumerate(method)
@inbounds relate_vecs[j][i] = mthd(geno1[keep_idx], geno2[keep_idx], loci_names[keep_idx], allelefrequencies, loc_n = n_per_loci[keep_idx], n_samples = n_samples, inbreeding = inbreeding)
#pair_text = sample_pairs[i][1] * " × " * sample_pairs[i][2] * " ($i" * " of " * "$(n_pairs)" * ")"
#ProgressMeter.next!(p; showvalues = [(:Pair, pair_text), (:Method, mthd)])
end
end
end
method_colnames = [Symbol("$i") for i in method]
out_df = DataFrame(:sample_1 => getindex.(sample_pairs, 1), :sample_2 => getindex.(sample_pairs, 2), :n_loci => shared_loci)
[out_df[:, mth] = relate_vecs[i] for (i, mth) in enumerate(method_colnames)]
return out_df
end
"""
```
# compare all samples
kinship(::PopData; method::Function, iterations::Int64, interval::Tuple{Float64, Float64}, resample::String, inbreeding::Bool = false)
# to compare specific samples
kinship(::PopData, samples::Vector{String}; method::F, iterations::Int64, interval::Tuple{Float64, Float64}, resample::String, inbreeding::Bool = false)
```
Return a dataframe of pairwise kinship estimates for all or select pairs of `samples` in a `PopData` object using
method(s) `F` where `F` is one or several of the methods listed below. If no bootstrapping is required, then the only
necessary keyword to provide is `method = ` and `inbreeding = ` for the `dyadicLikelihood` method (see examples below).
**Note:** samples must be diploid.
### Estimator methods
The available estimators are listed below and are functions themselves. `kinship` takes the
function names as arguments (**case sensitive**), therefore do not use quotes or colons
in specifying the methods. Multiple methods can be supplied as a vector. All of these methods will tab-autocomplete.
For more information on a specific method, please see the respective docstring (e.g. `?Loiselle`).
- `Blouin`
- `LiHorvitz`
- `Loiselle`
- `Lynch`
- `LynchLi`
- `LynchRitland`
- `Moran`
- `QuellerGoodnight`
- `Ritland`
### Simulated siblingship comparison
If validating the estimators using `PopGenSims.jl` to simulate sibship relationships, `kinship`
will recognize `PopData` generated in that manner (the `population` column) and only compare siblingship
pairs.
### Inbreeding
Use the `inbreeding` keyword to specify whether to allow inbreeding (`true`) or not (`false`, default).
This is only relevant for the `dyadicLikelihood` method (not yet released)
### Bootstrapping
To calculate means, medians, standard errors, and confidence intervals using bootstrapping,
set `iterations = n` where `n` is an integer greater than `0` (the default) corresponding to the number
of bootstrap iterations you wish to perform for each pair. The default confidence interval is `(0.05, 0.95)` (i.e. 90%),
however that can be changed by supplying the keyword `interval = (low, high)` where `low` and `high` are the intervals you want
(as `AbstractFloat`). Performing bootstrapping will return a NamedTuple of DataFrames, with each field in the NamedTuple
corresponding to the estimator `Method` it describes, which can be merged into one large dataframe using `mergekinship()`.
#### Resampling methods
There are two available resampling methods, `"all"` (default & recommended) and `"nonmissing"`.
- `"all"` : resamples all loci for a pair of individuals and then drops missing loci between them
- speed: slower
- pro: better resampling variation
- con: by chance some iterations may have a lot of missing loci that have to be dropped
- `"nonmissing"` : resamples only the shared non-missing loci between the pair
- speed: faster
- pro: every iteration guarantees the same number of loci compared between the pair
- con: too-tight confidence intervals due to less possible variation
**Examples**
```
julia> cats = @nancycats;
julia> kinship(cats, method = Ritland)
27966×4 DataFrame
Row │ sample_1 sample_2 n_loci Ritland
│ String String Int64 Float64?
───────┼─────────────────────────────────────────
1 │ N215 N216 8 0.258824
2 │ N215 N217 8 0.193238
3 │ N215 N218 8 0.127497
4 │ N215 N219 8 0.0453471
⋮ │ ⋮ ⋮ ⋮ ⋮
27963 │ N297 N290 7 0.189647
27964 │ N281 N289 8 0.0892068
27965 │ N281 N290 7 0.104614
27966 │ N289 N290 7 0.0511663
27958 rows omitted
julia> kinship(cats, ["N7", "N111", "N115"], method = [Ritland, Loiselle])
3×5 DataFrame
Row │ sample_1 sample_2 n_loci Ritland Loiselle
│ String String Int64 Float64? Float64?
─────┼────────────────────────────────────────────────────
1 │ N7 N111 9 -0.129432 -0.101618
2 │ N7 N115 9 -0.0183925 -0.0428898
3 │ N111 N115 9 0.0240152 0.13681
julia> rel_out = kinship(cats, ["N7", "N111", "N115"], method = [Loiselle, Moran], iterations = 100, interval = (0.025, 0.975));
julia> rel_out.Loiselle
3×8 DataFrame
Row │ sample_1 sample_2 n_loci Loiselle Loiselle_mean Loiselle_median Loiselle_SE Loiselle_CI_95
│ String String Int64 Float64? Float64? Float64? Float64? Tuple…?
─────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────
1 │ N7 N111 9 -0.101618 0.0164593 0.0168983 0.0511942 (-0.0670707, 0.104008)
2 │ N7 N115 9 -0.0428898 0.0771664 0.0880589 0.0982462 (-0.0428068, 0.183625)
3 │ N111 N115 9 0.13681 0.255124 0.24991 0.267319 (0.103864, 0.421979)
# merge results into one big dataframe
julia> mergekinship(rel_out)
3×13 DataFrame
Row │ sample_1 sample_2 n_loci Loiselle Loiselle_mean Loiselle_ ⋯
│ String String Int64 Float64? Float64? Float64? ⋯
─────┼───────────────────────────────────────────────────────────────────
1 │ N7 N111 9 -0.101618 0.0164593 0.0 ⋯
2 │ N7 N115 9 -0.0428898 0.0771664 0.0
3 │ N111 N115 9 0.13681 0.255124 0.2
8 columns omitted
```
"""
function kinship(data::PopData, sample_names::Vector{T}; method::F, iterations::Int64 = 0, interval::Tuple{Float64, Float64} = (0.025, 0.975), resample::String = "all", inbreeding::Bool = false) where F where T<:AbstractString
data.metadata.ploidy != 2 && error("kinship analyses currently only support diploid samples")
errs = ""
all_samples = string.(samplenames(data))
if sample_names != all_samples
[errs *= "$i," for i in sample_names if i ∉ all_samples]
errs != "" && error("Samples not found in the PopData: " * errs)
end
if eltype(method) != Function
method = [method]
end
relate_mthds = [:QuellerGoodnight, :Ritland, :Lynch, :LynchLi, :LynchRitland, :Wang, :Loiselle, :Blouin, :Moran, :LiHorvitz, :dyadicLikelihood]
[errs *= "$i is not a valid method\n" for i in Symbol.(method) if i ∉ relate_mthds]
errs != "" && throw(ArgumentError(errs * "Methods are case-sensitive. Please see the docstring (?kinship) for additional help."))
if iterations > 0
if resample == "all"
_kinship_boot_all(data, sample_names, method = method, iterations = iterations, interval = interval, inbreeding = inbreeding)
elseif resample == "nonmissing"
_kinship_boot_nonmissing(data, sample_names, method = method, iterations = iterations, interval = interval, inbreeding = inbreeding)
else
throw(ArgumentError("Invalid resample method. Please choose from resample methods \"all\" or \"nonmissing\""))
end
else
_kinship_noboot(data, sample_names, method = method, inbreeding = inbreeding)
end
end
function kinship(data::PopData; method::F, iterations::Int64 = 0, interval::Tuple{Float64, Float64} = (0.025, 0.975), resample::String = "all", inbreeding::Bool = false) where F
sample_names = samplenames(data) |> collect
kinship(data, sample_names, method = method, iterations = iterations, interval = interval, resample = resample, inbreeding = inbreeding)
end
"""
mergekinship(data::NamedTuple)
A convenience function that takes the `NamedTuple` output from `kinship` performed with bootstrapping
and returns one large DataFrame.
"""
function mergekinship(data::NamedTuple)
k = keys(data)
k1 = k[1]
outdf = deepcopy(data[Symbol(k1)])
for key in k[2:end]
outdf = innerjoin(
outdf,
select(data[Symbol(key)], Not(:n_loci)),
on = [:sample_1, :sample_2]
)
end
return outdf
end | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 4938 | function sig_within(data::PopData, results::DataFrame, population::String, iterations::Int = 20000)
# add extra columns of population names
pop_names = select(data.metadata, :name => :sample_1 ,:population => :pop_1)
tmp_res = innerjoin(results, pop_names, on = :sample_1)
select!(pop_names, :sample_1 => :sample_2,:pop_1 => :pop_2)
tmp_res = innerjoin(tmp_res, pop_names, on = :sample_2)
tmp_res.groups = collect(zip(tmp_res.pop_1, tmp_res.pop_2))
tmp_res.group = tmp_res.pop_1 .* tmp_res.pop_2
select!(tmp_res, Not([:sample_1, :sample_2, :n_loci, :pop_1, :pop_2]))
size(tmp_res,1) == 0 && error("Samples in the kinship results not found in the provided PopData.")
estimators = Symbol.(names(tmp_res)[begin:end-2])
# extract the values for within-population estimates
within_coeff = [tmp_res[tmp_res.group .== population^2, i] for i in estimators]
# get the mean of those values
mean_within = mean.(within_coeff)
# extract the values of the between-population estimates
among_coeff = [tmp_res[(population .∈ tmp_res.groups) .& (tmp_res.group .!= population^2), i] for i in estimators]
# get the mean of those values
mean_among = mean.(among_coeff)
# find the difference
differ = mean_within - mean_among
# setup bootstrap
n_within = length(within_coeff[1])
n_among = length(among_coeff[1])
n_tot = n_within + n_among
n_range = collect(1:n_tot)
n_iter = iterations - 1
bootstrapped = [Vector{Float64}(undef, n_iter) for i in 1:length(estimators)]
#p = Progress(iterations * length(estimators), dt = 1, color = :blue)
for i in 1:length(estimators)
est = estimators[i]
iter_count = 0
@sync @inbounds for j in 1:n_iter
Base.Threads.@spawn begin
iter_count += 1
idx_within = sample(Xoroshiro128Star(), 1:n_tot, n_within, replace = false)
idx_among = n_range[Not(idx_within)]
mu_within = mean(results[:, est][idx_within])
mu_among = mean(results[:, est][idx_among])
bootstrapped[i][j] = mu_within - mu_among
#pair_text = population * " $(iter_count)" * "/" * "$(iterations)"
#ProgressMeter.next!(p; showvalues = [(:Population, pair_text), (:Method, string(est))])
end
end
end
[(sum(differ[i] .<= bootstrapped[i]) + 1) / iterations for i in 1:length(estimators)]
#(sum(differ .<= bootstrapped) + 1) / (iterations)
end
"""
kinshipposthoc(::PopData, results::DataFrame; iterations::Int)
Performs a posthoc analysis using the resulting DataFrame or NamedTuple
from kinship(). This analysis uses permutations to test if a population has
significantly higher within-population kinship than between-population kinship.
The `results` object must have been generated from the provided `PopData`. Use `iterations = `
to specify the number of iterations for the permutation tests (default = `20000`). **Recommended**
that you use `MultipleTesting.jl` to correct resulting P-values.
**Example**
```
julia> cats = @nancycats ;
julia> rel_out = kinship(cats, method = [Ritland, Moran], iterations = 100);
julia> kinshipposthoc(cats, rel_out)
17x3 DataFrame
Row │ population Ritland_P Moran_P
│ String Float64 Float64
─────┼────────────────────────────────
1 │ 1 5.0e-5 5.0e-5
2 │ 2 5.0e-5 5.0e-5
3 │ 3 5.0e-5 5.0e-5
4 │ 4 5.0e-5 5.0e-5
5 │ 5 5.0e-5 5.0e-5
6 │ 6 5.0e-5 5.0e-5
7 │ 7 5.0e-5 5.0e-5
8 │ 8 5.0e-5 5.0e-5
9 │ 9 5.0e-5 5.0e-5
10 │ 10 5.0e-5 5.0e-5
11 │ 11 5.0e-5 5.0e-5
12 │ 12 5.0e-5 5.0e-5
13 │ 13 5.0e-5 5.0e-5
14 │ 14 5.0e-5 5.0e-5
15 │ 15 5.0e-5 5.0e-5
16 │ 16 5.0e-5 5.0e-5
17 │ 17 5.0e-5 5.0e-5
```
"""
function kinshipposthoc(data::PopData, results::DataFrame; iterations::Int = 20000)
all_pops = unique(data.metadata.sampleinfo.population)
estimators = Symbol.(names(results)[names(results) .∉ Ref(["sample_1", "sample_2", "n_loci"])] .* "_P")
sigs = map(pop -> sig_within(data, results, pop, iterations), all_pops)
DataFrame(
:population => all_pops,
[estimators[i] => getindex.(sigs,i) for i in 1:length(estimators)]...
)
end
function kinshipposthoc(data::PopData, results::NamedTuple; iterations::Int = 20000)
estimators = keys(results)
coeffs = [results[i][:, estimators[i]] for i in 1:length(estimators)]
df = select(results[1], "sample_1", "sample_2", "n_loci")
[df[:, estimators[i]] = coeffs[i] for i in 1:length(estimators)]
kinshipposthoc(data, df, iterations = iterations)
end | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 1298 | using MultivariateStats: MulticlassLDA
function dapc(data::PopData, labels::T = [nothing]) where T<:AbstractVector#, covestimator_between=SimpleCovariance(), covestimator_within=SimpleCovariance())
if labels != [nothing]
labs = _relabel(labels)
else
labs = _relabel(data.sampleinfo.population)
end
classes = length(unique(labs))
_pca = pca(data).proj |> permutedims
#return labs, _pca
lda = fit(MulticlassLDA, classes, _pca, labs)
predict(lda, _pca)
end
function subspacedapc(data::PopData, labels::T = [nothing]; classes::Int64, nda::Int64 = 0, covestimator_between=SimpleCovariance(), covestimator_within=SimpleCovariance()) where T<:AbstractVector
if labels != [nothing]
labs = _relabel(labels)
else
labs = _relabel(data.sampleinfo.population)
end
lablen = length(unique(labs))
classes > lablen && error("Number of classes ($classes) must be equal or fewer than number of unique labels ($lablen)")
_pca = pca(data)
pca_mtx = permutedims(_pca.proj)
ndakw = iszero(nda) ? min(size(pca_mtx, 1), size(pca_mtx,2) - 1) : nda
MultivariateStats.fit(SubspaceLDA, classes, pca_mtx, labs, outdim = ndakw, covestimator_between= covestimator_between, covestimator_within = covestimator_within)
end | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 21197 | ## THIS FILE IS DEPRECATED ##
export relatedness, pairwise_relatedness, kinship
#=TODO
Solve dyadic optimization issues
combined backtracking failure - need to isolate and find cause
#N111 N83
#N63 N64
~1/3 - 1/5 not with optimal solution in cats dataset
Multithread
=#
#= Would be good to include
Implement alternative relatedness metrics
Warning if a not implemented (or typo) of method included
Streamline output
=#
"""
pr_l_s(x::Tuple, y::Tuple, alleles::Dict)
Calculate the probability of observing the particular allele state given each of
the 9 Jacquard Identity States for a single locus to create Table 1 from
Milligan 2003.
"""
function pr_l_s(x::Tuple, y::Tuple, alleles::Dict)
#= Example
cats = @nancycats
cat1=genotype(cats, sample = "N100", locus = "fca23")
cat2=genotype(cats, sample = "N111", locus = "fca23")
allele = allelefreq(cats.genodata.fca23)
pr_l_s(cat1, cat2, allele)
=#
#=
Calculate Pr(Li | Sj)
If the allele identity falls into this class (L1-L9), generate the
probabilities of it belonging to each of the different classes and
return that array of 9 distinct probabilities
=#
## class L1 - AᵢAᵢ AᵢAᵢ ##
if x[1] == x[2] == y[1] == y[2]
x = string.(x)
y = string.(y)
p = alleles[x[1]]
[p, p^2, p^2, p^3, p^2, p^3, p^2, p^3, p^4]
## class L2 - AᵢAᵢ AⱼAⱼ ##
elseif (x[1] == x[2]) & (y[1] == y[2]) & (x[1] != y[1])
x = string.(x)
y = string.(y)
p = (alleles[x[1]], alleles[y[1]])
[0, prod(p), 0, prod(p) * p[2], 0, prod(p) * p[1], 0, 0, prod(p) * prod(p)]
## class L3a - AᵢAᵢ AᵢAⱼ ## - has issues because of allele order
elseif ((x[1] == x[2] == y[1]) & (x[1] != y[2]))
x = string.(x)
y = string.(y)
p = (alleles[x[1]], alleles[y[2]])
[0, 0, prod(p), 2 * prod(p) * p[1], 0, 0, 0, prod(p) * p[1], 2 * prod(p) * p[1]^2]
## class L3b - AᵢAᵢ AⱼAᵢ ## - has issues because of allele order
elseif ((x[1] == x[2] == y[2]) & (x[1] != y[1]))
x = string.(x)
y = string.(y)
p = (alleles[x[1]], alleles[y[1]])
[0, 0, prod(p), 2 * prod(p) * p[1], 0, 0, 0, prod(p) * p[1], 2 * prod(p) * p[1]^2]
## class L4 - AᵢAᵢ AⱼAₖ ##
elseif (x[1] == x[2]) & (y[1] != y[2]) & (x[1] != y[1]) & (x[1] != y[2])
x = string.(x)
y = string.(y)
p = (alleles[x[1]], alleles[y[1]], alleles[y[2]])
[0, 0, 0, 2 * prod(p), 0, 0, 0, 0, 2 * prod(p) * p[1]]
## L5a - AiAj AiAi ## - has issues because of allele order
elseif ((x[1] == y[1] == y[2]) & (x[1] != x[2]))
x = string.(x)
y = string.(y)
p = (alleles[x[1]], alleles[x[2]])
[0, 0, 0, 0, prod(p), 2 * prod(p) * p[1], 0, prod(p) *p[1], 2 * prod(p) * p[1]^2]
## L5b - AjAi AiAi ## - has issues because of allele order
elseif (x[2] == y[1] == y[2] & (x[1] != x[2]))
x = string.(x)
y = string.(y)
p = (alleles[x[2]], alleles[x[1]])
[0, 0, 0, 0, prod(p), 2 * prod(p) * p[1], 0, prod(p) *p[1], 2 * prod(p) * p[1]^2]
## L6 - AjAk AiAi ##
elseif (x[1] != x[2]) & (y[1] == y[2]) & (x[1] != y[1]) & (x[2] != y[1])
x = string.(x)
y = string.(y)
p = (alleles[y[1]], alleles[x[1]], alleles[x[2]])
[0, 0, 0, 0, 0, 2 * prod(p), 0, 0, 2 * prod(p) * p[1]]
## L7 - AiAj AiAj ##
elseif (x[1] == y[1]) & (x[2] == y[2]) & (x[1] != x[2])
x = string.(x)
y = string.(y)
p = (alleles[x[1]], alleles[x[2]])
[0, 0, 0, 0, 0, 0, 2 * prod(p), prod(p) * sum(p), 4 * prod(p) * prod(p)]
## L8a - AiAj AiAk ## - has issues because of allele order
elseif ((x[1] == y[1]) & (x[1] != x[2]) & (y[1] != y[2]) & (x[2] != y[2]))
x = string.(x)
y = string.(y)
p = (alleles[x[1]], alleles[x[2]], alleles[y[2]])
[0, 0, 0, 0, 0, 0, 0, prod(p), 4 * prod(p) * p[1]]
## L8b - AjAi AkAi ## - has issues because of allele order
elseif ((x[2] == y[2]) & (x[1] != x[2]) & (y[1] != y[2]) & (x[1] != y[1]))
x = string.(x)
y = string.(y)
p = (alleles[x[2]], alleles[x[1]], alleles[y[1]])
[0, 0, 0, 0, 0, 0, 0, prod(p), 4 * prod(p) * p[1]]
## L8c - AjAi AiAk ## - has issues because of allele order
elseif ((x[2] == y[1]) & (x[1] != x[2]) & (y[1] != y[2]) & (x[1] != y[2]))
x = string.(x)
y = string.(y)
p = (alleles[x[2]], alleles[x[1]], alleles[y[2]])
[0, 0, 0, 0, 0, 0, 0, prod(p), 4 * prod(p) * p[1]]
## L8d - AiAj AkAi ## - has issues because of allele order
elseif ((x[1] == y[2]) & (x[1] != x[2]) & (y[1] != y[2]) & (x[1] != y[1]))
x = string.(x)
y = string.(y)
p = (alleles[x[1]], alleles[x[2]], alleles[y[1]])
[0, 0, 0, 0, 0, 0, 0, prod(p), 4 * prod(p) * p[1]]
## L9 - AiAj AkAl ##
elseif (x[1] != x[2]) & (x[1] != y[1]) & (x[1] != y[2]) & (x[2] != y[1]) & (x[2] != y[2]) & (y[1] != x[2])
x = string.(x)
y = string.(y)
p = (alleles[x[1]], alleles[x[2]], alleles[y[1]], alleles[y[2]])
[0, 0, 0, 0, 0, 0, 0, 0, 4 * prod(p)]
else
[-9, -9, -9, -9, -9, -9, -9, -9, -9]
end
end
"""
all_loci_Pr_L_S(data::PopObj, ind1::String, ind2::String, alleles::Dict)
Calculate the probability of observing the particular allele state given each of
the 9 Jacquard Identity States for all loci Creates Table 1 from Milligan 2002
"""
function all_loci_Pr_L_S(data::PopData, ind1::String, ind2::String, alleles::Dict)
#Need to skip loci where one or both individuals have missing data
Pr_L_S = []
for locus in String.(names(data.genodata))
#Extract the pair of interest's genotypes
gen1 = genotype(data, sample = ind1, locus = locus)
gen2 = genotype(data, sample = ind2, locus = locus)
if gen1 !== missing && gen2 !== missing
tmp = pr_l_s(gen1, gen2, alleles[locus])
return tmp
push!(Pr_L_S, tmp)
end
end
return transpose(hcat(Pr_L_S...))
end
#Pr_L_S_inbreeding = all_loci_Pr_L_S(ind1, ind2, data, allelefrequencies)
"""
no_inbreeding(Pr_L_S::LinearAlgebra.Transpose{Float64,Array{Float64,2}})
Remove Jacquard States which can only be the result of inbreeding
"""
function no_inbreeding(Pr_L_S::Transpose{Float64,Array{Float64,2}})
for i in 1:6
Pr_L_S[:,i] = 0 .* Pr_L_S[:,i]
end
return Pr_L_S
end
#Pr_L_S_noinbreeding = dyadic_ML(data, allelefrequencies) |> no_inbreeding
## Calculate Δ coefficients
#Need to either maximize this sum or use it as the likelihood in a bayesian model and sample from the posterior.
#currently only maximum likelihood optimization
"""
Δ_optim(Pr_L_S::Transpose{Float64,Array{Float64,2}}, verbose::Bool)
Takes the probability of the allelic state given the identity by descent from
all available loci (either allowing for inbreeding or not) and calculated the
maximum likelihood Δ coefficients
"""
function Δ_optim(Pr_L_S::Transpose{Float64,Array{Float64,2}}, verbose::Bool = true)
#Δ is what needs to be optimized
#consist of 9 values between 0 and 1 which must also sum to 1
#is then used to calculate relatedness
Δ = Variable(8)
#problem = maximize(sum(log(Pr_L_S * vcat(1 - sum(Δ), Δ))))
problem = maximize(sum(Pr_L_S * vcat(1 - sum(Δ), Δ)))
problem.constraints += 0 <= 1 - sum(Δ)
problem.constraints += 1 - sum(Δ) <= 1
problem.constraints += 0 <= Δ[1:8]
problem.constraints += Δ[1:8] <= 1
#shifted from actual relatedness calculations because the 1 - sum(Δ) goes at beginning
# problem.constraints += 2 * ((1 - sum(Δ)) + 0.5 * (Δ[2] + Δ[4] + Δ[6]) + 0.25 * Δ[7]) <= 1
# problem.constraints += 0 <= 2 * ((1 - sum(Δ)) + 0.5 * (Δ[2] + Δ[4] + Δ[6]) + 0.25 * Δ[7])
Convex.solve!(problem, ECOSSolver(verbose = verbose, maxit = 100), verbose = verbose) #maxit=100,
#Convex.solve!(problem, ECOSSolver(verbose = verbose, maxit=100, feastol=5e-6, reltol = 1e-3, reltol_inacc = 5e-2), verbose = verbose)
#Convex.solve!(problem, SCSSolver(verbose = verbose, max_iters = 100), verbose = verbose)
vcat(1-sum(Δ.value), Δ.value), problem.status
# Should probably include some output that confirms that it did in fact
# converge and/or use multiple random starts to confirm not a local maxima
end
# JuMP variant
function Δ_optim(Pr_L_S::Transpose{Float64,Array{Float64,2}}, verbose::Bool = true)
#Δ is what needs to be optimized
#consist of 9 values between 0 and 1 which must also sum to 1
#is then used to calculate relatedness
jacquard = Model(ECOS.Optimizer)
@variable(jacquard, Δ[i=1:9])
@objective(jacquard, Max, sum(log(Pr_L_S * Δ)))
@constraint(jacquard, con, sum(Δ) == 1)
@constraint(jacquard, con_bounds, 0 .<= Δ .<= 1)
end
## Calculate theta and r
"""
relatedness_dyadicML(Δ::Array{Float64,2})
Takes the Δ coefficents (with or without inbreeding allowed) and calculates the coefficient of relatedness
"""
function relatedness_from_Δ(Δ::Array{Float64,2})
θ = Δ[1] + 0.5 * (Δ[3] + Δ[5] + Δ[7]) + 0.25 * Δ[8]
2 * θ
end
#relatedness_dyadicML(Δ_inbreeding)
#relatedness_dyadicML(Δ_noinbreeding)
#Relatedness R package appears to have a bug. When allow.inbreeding = TRUE the relatedness value is the same as when I assume no inbreeding
#when you set allow.inbreeding = FALSE then the relatedness calculated is the same as when I assume there is inbreeding
"""
dyadicML_relatedness(data::PopObj, ind1::String, ind2::String; alleles::Dict, inbreeding::Bool = true, verbose::Bool = true)
Calculates the dyadic maximum likelihood relatedness using all available loci following
Milligan 2002 dyadic relatedness - https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1462494/pdf/12663552.pdf
Inbreeding can either be assumed not to occur (inbreeding = false) or be
included in the calculation of the relatedness coefficient Verbose controls the
verbosity of the optimization process to find the Maximum likelihood Δ coefficents
"""
function dyadicML_relatedness(data::PopObj, ind1::String, ind2::String; alleles::Dict, inbreeding::Bool = true, verbose::Bool = true)
#TODO - calculate inbreeding coeffificients Fx & Fy and include in r calculation
#Coancestry Manual
#Fx = sum(Δ[1:4])
#Fy = sum(Δ[1,2,5,6])
#rxy = (2Δ[1] + Δ[3] + Δ[5] + Δ[7] + (1/2)Δ[8]) / sqrt((1 + Fx) * (1 + Fy))
Pr_L_S = all_loci_Pr_L_S(data, ind1, ind2, alleles)
if !inbreeding
no_inbreeding(Pr_L_S)
end
Δ,convergence = Δ_optim(Pr_L_S, verbose)
return relatedness_from_Δ(Δ), Δ, convergence
end
"""
qg_relatedness(data::PopObj, ind1::String, ind2::String; alleles::Dict)
Calculates the moments based estimator of pairwise relatedness developed by Queller & Goodnight (1989).
- Bases allele frequencies on entire population
- Inbreeding can only be assumed not to exist.
See equation 3 in: https://www.nature.com/articles/hdy201752 for variant of estimator used
"""
function qg_relatedness(data::PopData, ind1::String, ind2::String; alleles::T) where T <: NamedTuple
#NEED TO CHECK TO CONFIRM EQUATIONS
n1 = 0.0
n2 = 0.0
d1 = 0.0
d2 = 0.0
for loc in loci(data)
#Extract the pair of interest's genotypes
gen1 = genotype(data, sample = ind1, locus = loc)
gen2 = genotype(data, sample = ind2, locus = loc)
#Skip missing
if gen1 !== missing && gen2 !== missing
a,b = gen1
c,d = gen2
sym_loc = Symbol(loc)
n1 += sum((a == c, a == d, b == c, b == d)) - 2.0 * (alleles[sym_loc][a] + alleles[sym_loc][b])
n2 += sum((a == c, a == d, b == c, b == d)) - 2.0 * (alleles[sym_loc][c] + alleles[sym_loc][d])
d1 += 2.0 * (1.0 + (a==b) - alleles[sym_loc][a] - alleles[sym_loc][b])
d2 += 2.0 * (1.0 + (c==d) - alleles[sym_loc][c] - alleles[sym_loc][d])
end
end
return (n1/d1 + n2/d2)/2.0
end
"""
ritland_relatedness(data::PopObj, ind1::String, ind2::String; alleles::Dict)
Calculates the moments based estimator of pairwise relatedness proposed by Li and Horvitz (1953) and implemented/made popular by Ritland (1996).
- Bases allele frequencies on entire population
- Inbreeding can only be assumed not to exist.
See equation 7 in: https://www.nature.com/articles/hdy201752 for variant of estimator used
Ritland original citation: https://www.cambridge.org/core/journals/genetics-research/article/estimators-for-pairwise-relatedness-and-individual-inbreeding-coefficients/9AE218BF6BF09CCCE18121AA63561CF7
"""
function ritland_relatedness(data::PopData, ind1::String, ind2::String; alleles::T) where T <: NamedTuple
#NEED TO CHECK TO CONFIRM EQUATIONS
n = 0.0
d = 0.0
for loc in loci(data)
#Extract the pair of interest's genotypes
gen1 = genotype(data, sample = ind1, locus = loc)
gen2 = genotype(data, sample = ind2, locus = loc)
#Skip missing
if gen1 !== missing && gen2 !== missing
a,b = gen1
c,d = gen2
sym_loc = Symbol(loc)
A = ((alleles[sym_loc] |> length) - 1)
R = 0.0
for allele in keys(alleles[sym_loc])
# Individual locus relatedness value (eq 7 in paper)
R += ((((a == allele) + (b == allele)) * ((c == allele) + (d == allele))) / (4.0 * alleles[sym_loc][allele]))
end
R = (2 / A) * (R - 1.0)
# numerator for weighted combination of loci
n += (R * A)
# denominator for weighted combination of loci
d += A
end
end
return (n / d)
end
"""
lr_relatedness(data::PopObj, ind1::String, ind2::String; alleles::Dict)
Calculates the moments based estimator of pairwise relatedness by Ritland (1996).
- Bases allele frequencies on entire population
- Inbreeding can only be assumed not to exist.
See equation 10 in: https://www.nature.com/articles/hdy201752 for variant of estimator used
Ritland original citation: https://www.cambridge.org/core/journals/genetics-research/article/estimators-for-pairwise-relatedness-and-individual-inbreeding-coefficients/9AE218BF6BF09CCCE18121AA63561CF7
"""
function lr_relatedness(data::PopData, ind1::String, ind2::String; alleles::T) where T <: NamedTuple
#NEED TO CHECK TO CONFIRM EQUATIONS
n = 0.0
d = 0.0
#Extract the pair of interest's genotypes
gen1 = genotypes(data, ind1)
gen2 = genotypes(data, ind2)
# keep only loci where both are not missing
# _f implies "filtered"
#loc_f, gen1_f, gen2_f = skipmissings(Symbol.(loci(data)), gen1, gen2)
for (loc,geno1,geno2) in zip(skipmissings(Symbol.(loci(data)), gen1, gen2)...)
a,b = geno1
c,d = geno2
n1 = alleles[loc][a] * ((b == c) + (b == d)) + alleles[loc][b] * ((a == c) + (a == d)) - 4.0 * alleles[loc][a] * alleles[loc][b]
n2 = alleles[loc][c] * ((d == a) + (d == b)) + alleles[loc][d] * ((c == a) + (c == b)) - 4.0 * alleles[loc][c] * alleles[loc][d]
d1 = 2.0 * (1.0 + (a == b)) * (alleles[loc][a] + alleles[loc][b]) - 8.0 * alleles[loc][a] * alleles[loc][b]
d2 = 2.0 * (1.0 + (c == d)) * (alleles[loc][c] + alleles[loc][d]) - 8.0 * alleles[loc][c] * alleles[loc][d]
RL = (n1 / d1) + (n2 / d2)
n += RL #JDS - CHECK THIS IS CORRECT
d += ((alleles[loc] |> length) - 1)
end
return (n / d)
end
"""
pairwise_relatedness(data::PopObj; method::String, inbreeding::Bool = true, verbose::Bool = true)
Calculates various pairwise relatedness measures between all pairs of individuals based on the entire sample population
allele frequency
If verbose is set to false then there is a progress bar. If set to true then there is
estimator specific feedback and statements when an individual has been compared to all other pairs
If the method is able to account for inbreeding in it's calculation then that option may be used
Available methods:
- `"qg"` : Queller & Goodknight 1989 (diploid only)
"""
function pairwise_relatedness(data::PopData; method::String = "qg", inbreeding::Bool = true, verbose::Bool = true)
# check that dataset is entirely diploid
all(data.metadata.sampleinfo.ploidy .== 2) == false && error("Relatedness analyses currently only support diploid samples")
allelefrequencies = NamedTuple{Tuple(Symbol.(loci(data)))}(
Tuple(allelefreq.(locus.(Ref(data), loci(data))))
)
#=
if !verbose
n = size(data.samples)[1]
n = n*(n-1) ÷ 2
prog = Progress(n, 1)
end
#Add switch to slightly change output depending on relatednes metric (e.g. convergence doesn't make sense for Moments Estimators)
if method == "dyadml"
output = DataFrame(ind1 = [], ind2 = [], relatedness = [], convergence = [])
end
=#
sample_names = samplenames(data)
sample_pairs = [tuple(sample_names[i], sample_names[j]) for i in 1:length(sample_names)-1 for j in i+1:length(sample_names)]
relate_vec = zeros(length(sample_pairs))
idx = 0
if method == "qg"
@inbounds for (sample_n, ind1) in enumerate(sample_names[1:end-1])
#Base.Threads.@threads
@inbounds Base.Threads.@threads for ind2 in sample_names[sample_n+1:end]
idx += 1
#=
if method == "dyadml"
dyad_out = dyadicML_relatedness(data, ind1, data.samples.name[ind2], alleles = allelefrequencies, inbreeding = inbreeding, verbose = verbose)
append!(output,DataFrame(ind1 = ind1, ind2 = data.samples.name[ind2], relatedness = dyad_out[1], convergence = dyad_out[3]))
end
=#
relate_vec[idx] += qg_relatedness(data, ind1, ind2, alleles = allelefrequencies)
#=
if !verbose
next!(prog)
end
=#
end
end
#=
if verbose
println("All pairs with ", ind1, " finished")
end
=#
end
method_colname = Symbol("relatedness_" * method)
return DataFrame(:sample_1 => getindex.(sample_pairs, 1), :sample_2 => getindex.(sample_pairs, 2), method_colname => relate_vec)
end
#Multithreaded version - requires "Combinatorics" Package
function pairwise_relatedness(data::PopObj; method::String, inbreeding::Bool = true, verbose::Bool = true)
# check that dataset is entirely diploid
all(data.samples.ploidy .== 2) == false && error("Relatedness analyses currently only support diploid samples")
allelefrequencies = Dict()
for locus in names(data.genodata)
allelefrequencies[String(locus)] = allelefreq(data.genodata[:, locus])
end
n = size(data.samples)[1]
n = n*(n-1) ÷ 2
if !verbose
prog = Progress(n, 1)
end
#Add switch to slightly change output depending on relatednes metric (e.g. convergence doesn't make sense for Moments Estimators)
if method == "dyadml"
output = DataFrame([String, String, Float64, Float64, Float64, Symbol], Symbol.(["Ind1", "Ind2", "I", "thread", "relatedness", "convergence"]), n)
end
if method == "qg"
#output = DataFrame(ind1 = [], ind2 = [], relatedness = [])
output = DataFrame([String, String, Float64, Float64, Float64], Symbol.(["Ind1", "Ind2", "I", "thread", "relatedness"]), n)
end
pairs = combinations(data.samples.name, 2) |> collect #This line requires Combinatorics pacakge
Threads.@threads for i in 1:n
ind1 = pairs[i][1]
ind2 = pairs[i][2]
output[i, :I] = i
output[i, :Ind1] = ind1
output[i, :Ind2] = ind2
output[i, :thread] = Threads.threadid()
if method == "qg"
output[i, :relatedness] = qg_relatedness(data, ind1, ind2, alleles = allelefrequencies)
end
if method == "dyadml"
dyad_out = dyadicML_relatedness(data, ind1, ind2, alleles = allelefrequencies, inbreeding = inbreeding, verbose = verbose)
output[i, :relatedness] = dyad_out[1]
output[i, :convergence] = dyad_out[3]
end
next!(prog)
end
return output
end
const relatedness = pairwise_relatedness
const kinship = pairwise_relatedness
# - `"dyadml"` : Milligan 2002 Dyadic Maximum Likelihood relatedness estimator
#=
cat_rel_noInbreeding = pairwise_relatedness(@nancycats, method = "dyadml", inbreeding = false, verbose = false)
cat_rel_Inbreeding = pairwise_relatedness(@nancycats, method = "dyadml", inbreeding = true, verbose = false)
cat_rel_qg = pairwise_relatedness(@nancycats, method = "qg", verbose = false)
=#
#=
Testing area
cat_rel_noInbreeding = pairwise_relatedness(@nancycats, method = "dyadml", inbreeding = false, verbose = false)
cat_rel_Inbreeding = pairwise_relatedness(@nancycats, method = "dyadml", inbreeding = true, verbose = false)
count(cat_rel_noInbreeding[i,4] == :Optimal for i in 1:27966)
count(cat_rel_Inbreeding[i,4] == :Optimal for i in 1:27966)
"N100"
"N106"
=#
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 11060 | ## Method 1
function get_uncondensed_state(ind1::T, ind2::T) where T <: Tuple
i,j = ind1
k,l = ind2
δ = 1 * (i == j == k == l) +
2 * ((i == j == k) & (i != l)) +
3 * ((i == j == l) & (i != k)) +
4 * ((i == k == l) & (i != j)) +
5 * ((j == k == l) & (i != j)) +
6 * ((i == j) & (k == l) & (i != k)) +
7 * ((i == j) & (k != l) & (i != k) & (i != l)) +
8 * ((k == l) & (i != j) & (i != k) & (j != k)) +
9 * ((i == k) & (j == l) & (i != j) & (k != l)) +
10 * ((i == k) & (j != l) & (i != j) & (k != l)) +
11 * ((j == l) & (i != k) & (i != j) & (k != l)) +
12 * ((i == l) & (j == k) & (i != j) & (k != l)) +
13 * ((i == l) & (j != k) & (i != j) & (k != l)) +
14 * ((i != l) & (j == k) & (i != j) & (k != l)) +
15 * ((i != j) & (i != k) & (i != l) & (j != k) & (j != l) & (k != l))
return δ
end
s1(p) = [p[1], p[1]^2, p[1]^2, p[1]^3, p[1]^2, p[1]^3, p[1]^2, p[1]^3, p[1]^4]
s2(p) = [0.0, 0.0, prod(p[[1,4]]), 2.0 * prod(p[[1,4]]) * p[1], 0.0, 0.0, 0.0, prod(p[[1,4]]) * p[1], 2.0 * prod(p[[1,4]]) * p[1]^2] #D3/S2
s3(p) = [0.0, 0.0, prod(p[[1,3]]), 2.0 * prod(p[[1,3]]) * p[1], 0.0, 0.0, 0.0, prod(p[[1,3]]) * p[1], 2.0 * prod(p[[1,3]]) * p[1]^2] #D3/S3
s4(p) = [0.0, 0.0, 0.0, 0.0, prod(p[[1,2]]), 2.0 * prod(p[[1,2]]) * p[1], 0.0, prod(p[[1,2]]) *p[1], 2.0 * prod(p[[1,2]]) * p[1]^2] #D5/S4
s5(p) = [0.0, 0.0, 0.0, 0.0, prod(p[[1,2]]), 2.0 * prod(p[[1,2]]) * p[2], 0.0, prod(p[[1,2]]) *p[2], 2.0 * prod(p[[1,2]]) * p[2]^2] #D5/S5
s6(p) = [0.0, prod(p[[1,3]]), 0.0, prod(p[[1,3]]) * p[3], 0.0, prod(p[[1,3]]) * p[1], 0.0, 0.0, prod(p[[1,3]])^2] #D2/S6
s7(p) = [0.0, 0.0, 0.0, 2.0 * prod(p[[1,3,4]]), 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p[[1,3,4]]) * p[1]] #D4/S7
s8(p) = [0.0, 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p[[1,2,3]]), 0.0, 0.0, 2.0 * prod(p[[1,2,3]]) * p[3]] #D6/S8
s9(p) = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p[[1,2]]), prod(p[[1,2]]) * sum(p[[1,2]]), 4.0 * prod(p[[1,2]])^2] #D7/S9
s10(p) = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, prod(p[[1,2,4]]), 4.0 * prod(p[[1,2,4]]) * p[1]] #D8/S10
s11(p) = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, prod(p[[1,2,3]]), 4.0 * prod(p[[1,2,3]]) * p[2]] #D8/S11
s12(p) = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p[[1,2]]), prod(p[[1,2]]) * sum(p[[1,2]]), 4.0 * prod(p[[1,2]])^2] #D7/S12
s13(p) = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, prod(p[[1,2,3]]), 4.0 * prod(p[[1,2,3]]) * p[1]] #D8/S13
s14(p) = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, prod(p[[1,2,4]]), 4.0 * prod(p[[1,2,4]]) * p[2]] #D8/S14
s15(p) = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0 * prod(p)] #D9/S15
δ = (s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15)
function probability_states(ind1::T, ind2::T, alleles::Dict) where T <: Tuple
i,j = ind1
k,l = ind2
p = [alleles[i], alleles[j], alleles[k], alleles[l]]
return δ[get_uncondensed_state(ind1, ind2)](p)
end
s1(p) = (p[1], p[1]^2, p[1]^2, p[1]^3, p[1]^2, p[1]^3, p[1]^2, p[1]^3, p[1]^4)
s2(p) = (0.0, 0.0, prod(p[[1,4]]), 2.0 * prod(p[[1,4]]) * p[1], 0.0, 0.0, 0.0, prod(p[[1,4]]) * p[1], 2.0 * prod(p[[1,4]]) * p[1]^2) #D3/S2
s3(p) = (0.0, 0.0, prod(p[[1,3]]), 2.0 * prod(p[[1,3]]) * p[1], 0.0, 0.0, 0.0, prod(p[[1,3]]) * p[1], 2.0 * prod(p[[1,3]]) * p[1]^2) #D3/S3
s4(p) = (0.0, 0.0, 0.0, 0.0, prod(p[[1,2]]), 2.0 * prod(p[[1,2]]) * p[1], 0.0, prod(p[[1,2]]) *p[1], 2.0 * prod(p[[1,2]]) * p[1]^2) #D5/S4
s5(p) = (0.0, 0.0, 0.0, 0.0, prod(p[[1,2]]), 2.0 * prod(p[[1,2]]) * p[2], 0.0, prod(p[[1,2]]) *p[2], 2.0 * prod(p[[1,2]]) * p[2]^2) #D5/S5
s6(p) = (0.0, prod(p[[1,3]]), 0.0, prod(p[[1,3]]) * p[3], 0.0, prod(p[[1,3]]) * p[1], 0.0, 0.0, prod(p[[1,3]])^2) #D2/S6
s7(p) = (0.0, 0.0, 0.0, 2.0 * prod(p[[1,3,4]]), 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p[[1,3,4]]) * p[1]) #D4/S7
s8(p) = (0.0, 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p[[1,2,3]]), 0.0, 0.0, 2.0 * prod(p[[1,2,3]]) * p[3]) #D6/S8
s9(p) = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p[[1,2]]), prod(p[[1,2]]) * sum(p[[1,2]]), 4.0 * prod(p[[1,2]])^2) #D7/S9
s10(p) = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, prod(p[[1,2,4]]), 4.0 * prod(p[[1,2,4]]) * p[1]) #D8/S10
s11(p) = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, prod(p[[1,2,3]]), 4.0 * prod(p[[1,2,3]]) * p[2]) #D8/S11
s12(p) = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p[[1,2]]), prod(p[[1,2]]) * sum(p[[1,2]]), 4.0 * prod(p[[1,2]])^2) #D7/S12
s13(p) = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, prod(p[[1,2,3]]), 4.0 * prod(p[[1,2,3]]) * p[1]) #D8/S13
s14(p) = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, prod(p[[1,2,4]]), 4.0 * prod(p[[1,2,4]]) * p[2]) #D8/S14
s15(p) = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0 * prod(p)) #D9/S15
δ_t = (s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15)
function probability_states_tuple(ind1::T, ind2::T, alleles::Dict) where T <: Tuple
i,j = ind1
k,l = ind2
p = [alleles[i], alleles[j], alleles[k], alleles[l]]
return δ_t[get_uncondensed_state(ind1, ind2)](p)
end
## Method 2
S1 = [1 2; 1 3; 1 4; 2 3; 2 4; 3 4]
S2 = [1 2; 3 4]
S3 = [1 2; 1 3; 2 3]
S4 = [1 2]
S5 = [1 3; 1 4; 3 4]
S6 = [3 4]
S7 = [1 3; 2 4]
S8 = [1 3]
S9 = [1 3][[1 3][:,1] .!= 1, :]
ibd_states = [S1, S2, S3, S4, S5, S6, S7, S8, S9]
function get_jacquard_state(ind1::T, ind2::T, ibd_states = ibd_states) where T <: Tuple
the_shared = [intersect(ind1, ind2)...,0]
the_lone = [setdiff(ind1, ind2)..., setdiff(ind2, ind1)...,0]
ind1_resort = [ind1[(sum(ind1 .∈ the_shared',dims=2) .>= 1)[:,1]]..., ind1[(sum(ind1 .∈ the_lone',dims=2) .>= 1)[:,1]]...]
ind2_resort = [ind2[(sum(ind2 .∈ the_shared',dims=2) .>= 1)[:,1]]..., ind2[(sum(ind2 .∈ the_lone',dims=2) .>= 1)[:,1]]...]
adj_mat = Array{Int8}(undef, 6, 2)
idx = 0
for i in 1:3, j in i+1:4
idx += 1
adj_mat[idx,1:2] = [i,j] .* (1.0 * ([ind1_resort..., ind2_resort...][i] == [ind1_resort..., ind2_resort...][j]))
end
adj_mat = adj_mat[adj_mat[:,1] .> 0,:]
jaq_state = 0
for i in 1:9
jaq_state += i * (adj_mat == ibd_states[i])
end
return jaq_state
end
## Single type?
struct JacquardPair
genotype1::Genotype
genotype2::Genotype
state::Int
end
function probability_state(genos::JacquardPair, alleles::Dict)::Vector{Float64}
state = genos.state
if state == 1
p = alleles[genos.genotype1[1]]
[p, p^2, p^2, p^3, p^2, p^3, p^2, p^3, p^4]
elseif state == 2
p = (alleles[genos.genotype1[1]], alleles[genos.genotype2[1]])
[0.0, prod(p), 0.0, prod(p) * p[2], 0.0, prod(p) * p[1], 0.0, 0.0, prod(p)^2]
elseif state == 3
p = (alleles[genos.genotype1[1]], alleles[genos.genotype2[2]])
[0.0, 0.0, prod(p), 2.0 * prod(p) * p[1], 0.0, 0.0, 0.0, prod(p) * p[1], 2.0 * prod(p) * p[1]^2]
elseif state == 4
p = (alleles[genos.genotype1[1]], alleles[genos.genotype2[1]], alleles[genos.genotype2[2]])
[0.0, 0.0, 0.0, 2.0 * prod(p), 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p) * p[1]]
elseif state == 5
p = (alleles[genos.genotype1[1]], alleles[genos.genotype1[2]])
[0.0, 0.0, 0.0, 0.0, prod(p), 2.0 * prod(p) * p[1], 0.0, prod(p) *p[1], 2.0 * prod(p) * p[1]^2]
elseif state == 6
p = (alleles[genos.genotype2[1]], alleles[genos.genotype1[1]], alleles[genos.genotype1[2]])
[0.0, 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p), 0.0, 0.0, 2.0 * prod(p) * p[1]]
elseif state == 7
p = (alleles[genos.genotype1[1]], alleles[genos.genotype1[2]])
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p), prod(p) * sum(p), 4.0 * prod(p)^2]
elseif state == 8
p = (alleles[genos.genotype1[1]], alleles[genos.genotype1[2]], alleles[genos.genotype2[2]])
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, prod(p), 4.0 * prod(p) * p[1]]
elseif state == 9
p = (alleles[genos.genotype1[1]], alleles[genos.genotype1[2]], alleles[genos.genotype2[1]], alleles[genos.genotype2[2]])
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0 * prod(p)]
else
[-9.0, -9.0, -9.0, -9.0, -9.0, -9.0, -9.0, -9.0, -9.0]
end
end
## multiple types?
struct JacquardZero
genotype1::Genotype
genotype2::Genotype
end
struct JacquardOne
genotype1::Genotype
genotype2::Genotype
end
struct JacquardTwo
genotype1::Genotype
genotype2::Genotype
end
struct JacquardThree
genotype1::Genotype
genotype2::Genotype
end
struct JacquardFour
genotype1::Genotype
genotype2::Genotype
end
struct JacquardFive
genotype1::Genotype
genotype2::Genotype
end
struct JacquardSix
genotype1::Genotype
genotype2::Genotype
end
struct JacquardSeven
genotype1::Genotype
genotype2::Genotype
end
struct JacquardEight
genotype1::Genotype
genotype2::Genotype
end
struct JacquardNine
genotype1::Genotype
genotype2::Genotype
end
function probability_state(genos::JacquardZero, alleles::Dict)::Vector{Float64}
[-9.0, -9.0, -9.0, -9.0, -9.0, -9.0, -9.0, -9.0, -9.0]
end
function probability_state(genos::JacquardOne, alleles::Dict)::Vector{Float64}
p = alleles[genos.genotype1[1]]
[p, p^2, p^2, p^3, p^2, p^3, p^2, p^3, p^4]
end
function probability_state(genos::JacquardTwo, alleles::Dict)::Vector{Float64}
p = (alleles[genos.genotype1[1]], alleles[genos.genotype2[1]])
[0.0, prod(p), 0.0, prod(p) * p[2], 0.0, prod(p) * p[1], 0.0, 0.0, prod(p) * prod(p)]
end
function probability_state(genos::JacquardThree, alleles::Dict)::Vector{Float64}
p = (alleles[genos.genotype1[1]], alleles[genos.genotype2[1]])
[0.0, 0.0, prod(p), 2.0 * prod(p) * p[1], 0.0, 0.0, 0.0, prod(p) * p[1], 2.0 * prod(p) * p[1]^2]
end
function probability_state(genos::JacquardFour, alleles::Dict)::Vector{Float64}
p = (alleles[genos.genotype1[1]], alleles[genos.genotype2[1]], alleles[genos.genotype2[2]])
[0.0, 0.0, 0.0, 2.0 * prod(p), 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p) * p[1]]
end
function probability_state(genos::JacquardFive, alleles::Dict)::Vector{Float64}
p = (alleles[genos.genotype1[2]], alleles[genos.genotype1[1]])
[0.0, 0.0, 0.0, 0.0, prod(p), 2.0 * prod(p) * p[1], 0.0, prod(p) *p[1], 2.0 * prod(p) * p[1]^2]
end
function probability_state(genos::JacquardSix, alleles::Dict)::Vector{Float64}
p = (alleles[genos.genotype2[1]], alleles[genos.genotype1[1]], alleles[genos.genotype1[2]])
[0.0, 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p), 0.0, 0.0, 2.0 * prod(p) * p[1]]
end
function probability_state(genos::JacquardSeven, alleles::Dict)::Vector{Float64}
p = (alleles[genos.genotype1[1]], alleles[genos.genotype1[2]])
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0 * prod(p), prod(p) * sum(p), 4 * prod(p) * prod(p)]
end
function probability_state(genos::JacquardEight, alleles::Dict)::Vector{Float64}
p = (alleles[genos.genotype1[1]], alleles[genos.genotype1[2]], alleles[genos.genotype2[2]])
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, prod(p), 4 * prod(p) * p[1]]
end
function probability_state(genos::JacquardNine, alleles::Dict)::Vector{Float64}
p = (alleles[genos.genotype1[1]], alleles[genos.genotype1[2]], alleles[genos.genotype2[1]], alleles[genos.genotype2[2]])
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4 * prod(p)]
end
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 1057 | #Run all lines of code in PopGen.jl
#Then read in all functions within PairwiseRelatedness.jl
verbose = true
data = @nancycats
alleles = Dict()
for loc in loci(data)
alleles[loc] = allelefreq(locus(data, loc))
end
alleles
#Working pair
ind1 = "N100"
ind2 = "N106"
#Not working pair ex.
ind1 = "N111"
ind2 = "N83"
L = all_loci_Pr_L_S(data, ind1, ind2, alleles)
Δ = Variable(8)
problem = maximize(sum(log(L * vcat(1 - sum(Δ), Δ))))
problem.constraints += 0 <= 1 - sum(Δ)
problem.constraints += 1 - sum(Δ) <= 1
problem.constraints += 0 <= Δ[1:8]
problem.constraints += Δ[1:8] <= 1
#shifted from actual relatedness calculations because the 1 - sum(Δ) goes at beginning
problem.constraints += 2 * ((1 - sum(Δ)) + 0.5 * (Δ[2] + Δ[4] + Δ[6]) + 0.25 * Δ[7]) <= 1
problem.constraints += 0 <= 2 * ((1 - sum(Δ)) + 0.5 * (Δ[2] + Δ[4] + Δ[6]) + 0.25 * Δ[7])
Convex.solve!(problem, ECOSSolver(verbose = verbose, maxit = 100), verbose = verbose)
n = Variable(9)
problem2 = minimize(sum(log(L * exp(n)) / sum(exp(n))))
Convex.solve!(problem2, ECOSSolver())
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 268 |
function pwsimilarhelper(x::Int8, y::Int8)
(x == -1 | y == -1) && return missing
(x == 0 & y == 0) && return false
x == y && return true
(x != 0 & y != 0) && return true
end
function pwsimilar(data::PopData)
allelemtx = matrix(data, "count")
end | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 5163 | function probstates(ind1::Tuple, ind2::Tuple)
ListOfTuples = [ind1, ind2]
Edge = Dict{Tuple{Int64,Int64},Int64}()
Ct = 1
for i in 1:4
for j in (i+1):4
Edge[(i,j)] = Ct
Ct += 1
end
end
return Edge
A = zeros(6)
for (i, j) in ListOfTuples
if i > j
i, j = j, i
end
A[Edge[(i,j)]] = 1
end
A == [1,1,1,1,1,1] && return 1
A == [1,0,0,0,0,1] && return 2
A == [1,1,0,1,0,0] && return 3
A == [1,0,0,0,0,0] && return 4
A == [0,1,1,0,0,1] && return 5
A == [0,0,0,0,0,1] && return 6
A == [0,1,0,0,1,0] && return 7
A == [0,1,0,0,0,0] && return 8
A == [0,0,0,0,0,0] && return 9
end
if A[1] == 1
if A[2] == 1
A[5] == 1 ? return 1 : return 3
else
A[6] == 1 ? return 2 : return 4
end
else
if A[2] == 1
A[3] == 1 && return 5
A[5] == 1 ? return 7 : return 8
else
A[6] == 1 ? return 6 : return 9
end
end
####################
####################
#=
Calculate Pr(Li | Sj)
If the allele identity falls into this class (L1-L9), generate the
probabilities of it belonging to each of the different classes and
return that array of 9 distinct probabilities
=#
function probability_state_table(x::Tuple, y::Tuple, alleles::Dict)
#TODO Improve how groups are decided based on how similar things are done with moments estimators
x1, x2 = x
y1, y2 = y
## class L1 - AᵢAᵢ AᵢAᵢ ##
if x1 == x2 == y1 == y2
p = alleles[x1]
(p, p^2, p^2, p^3, p^2, p^3, p^2, p^3, p^4)
## class L2 - AᵢAᵢ AⱼAⱼ ##
elseif x1 == x2 != y1 == y2
# (x1 == x2) & (y1 == y2) & (x1 != y1)
p1,p2 = alleles[x1], alleles[y1]
prod_p = prod((p1,p2))
[0, prod_p, 0, prod_p * p2, 0, prod_p * p1, 0, 0, prod_p^2]
## class L3a - AᵢAᵢ AᵢAⱼ ## - has issues because of allele order
elseif x1 == x2 == y1 != y2
#((x1 == x2 == y1) & (x1 != y2))
p1,p2 = alleles[x1], alleles[y2]
prod_p = prod(p)
[0, 0, prod_p, 2 * prod_p * p1, 0, 0, 0, prod_p * p1, 2 * prod_p * p1^2]
## class L3b - AᵢAᵢ AⱼAᵢ ## - has issues because of allele order
elseif x1 == x2 == y2 != y1
p1,p2 = alleles[x1], alleles[y1]
prod_p = prod(p)
[0, 0, prod_p, 2 * prod_p * p1, 0, 0, 0, prod_p * p1, 2 * prod_p * p1^2]
## class L4 - AᵢAᵢ AⱼAₖ ##
elseif x1 == x2 != y1 != y2 != x1
#(x1 == x2) & (y1 != y2) & (x1 != y1) & (x1 != y2)
p = (alleles[x1], alleles[y1], alleles[y2])
prod_p = prod(p)
[0, 0, 0, 2 * prod_p, 0, 0, 0, 0, 2 * prod_p * p[1]]
## L5a - AiAj AiAi ## - has issues because of allele order
elseif x1 == y1 == y2 != x2
p1,p2 = alleles[x1], alleles[x2]
prod_p = prod(p)
[0, 0, 0, 0, prod_p, 2 * prod_p * p1, 0, prod_p * p1, 2 * prod_p * p1^2]
## L5b - AjAi AiAi ## - has issues because of allele order
elseif x2 == y1 == y2 != x2
p1,p2 = alleles[x2], alleles[x1]
prod_p = prod(p)
[0, 0, 0, 0, prod_p, 2 * prod_p * p1, 0, prod_p *p1, 2 * prod_p * p1^2]
## L6 - AjAk AiAi ##
elseif y1 != x2 != x1 != y1 == y2
#(x1 != x2) & (y1 == y2) & (x1 != y1) & (x2 != y1)
p = (alleles[y1], alleles[x1], alleles[x2])
prod_p = prod(p)
[0, 0, 0, 0, 0, 2 * prod_p, 0, 0, 2 * prod_p * p[1]]
## L7 - AiAj AiAj ##
elseif y1 == x1 != x2 == y2
#(x1 == y1) & (x2 == y2) & (x1 != x2)
p = (alleles[x1], alleles[x2])
prod_p = prod(p)
[0, 0, 0, 0, 0, 0, 2 * prod_p, prod_p * sum(p), 4 * prod_p * prod_p]
## L8a - AiAj AiAk ## - has issues because of allele order
elseif x2 != x1 == y1 != y2 != x2
#(x1 == y1) & (x1 != x2) & (y1 != y2) & (x2 != y2)
p = (alleles[x1], alleles[x2], alleles[y2])
prod_p = prod(p)
[0, 0, 0, 0, 0, 0, 0, prod_p, 4 * prod_p * p[1]]
## L8b - AjAi AkAi ## - has issues because of allele order
elseif x1 != x2 == y2 != y1 != x1
#(x2 == y2) & (x1 != x2) & (y1 != y2) & (x1 != y1)
p = (alleles[x2], alleles[x1], alleles[y1])
prod_p = prod(p)
[0, 0, 0, 0, 0, 0, 0, prod_p, 4 * prod_p * p[1]]
## L8c - AjAi AiAk ## - has issues because of allele order
elseif x1 != x2 == y1 != y2 != x1
#(x2 == y1) & (x1 != x2) & (y1 != y2) & (x1 != y2)
p = (alleles[x2], alleles[x1], alleles[y2])
prod_p = prod(p)
[0, 0, 0, 0, 0, 0, 0, prod_p, 4 * prod_p * p[1]]
## L8d - AiAj AkAi ## - has issues because of allele order
elseif x2 != x1 == y2 != y2 & (x1 != y1)
#(x1 == y2) & (x1 != x2) & (y1 != y2) & (x1 != y1)
p = (alleles[x1], alleles[x2], alleles[y1])
prod_p = prod(p)
[0, 0, 0, 0, 0, 0, 0, prod_p, 4 * prod_p * p[1]]
## L9 - AiAj AkAl ##
elseif x1 != x2 != y1 != y2 != x1 != y1 & (y2 != x2)
#(x1 != x2) & (x1 != y1) & (x1 != y2) & (x2 != y1) & (x2 != y2) & (y1 != x2)
p = (alleles[x1], alleles[x2], alleles[y1], alleles[y2])
[0, 0, 0, 0, 0, 0, 0, 0, 4 * prod(p)]
else
[-9, -9, -9, -9, -9, -9, -9, -9, -9]
end
end | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 1631 | module TestClustering
using PopGen
using Test
using Clustering
using DataFrames
using Distances
cats = @nancycats;
@testset "Clustering.jl" begin
@testset "k-means" begin
@test kmeans(cats, k = 2) isa Clustering.KmeansResult
@test kmeans(cats, k = 2, matrixtype = :freq) isa Clustering.KmeansResult
@test kmeans(cats, k = 2, iterations = 30) isa Clustering.KmeansResult
end
@testset "k-medoids" begin
@test kmedoids(cats, k = 2) isa Clustering.KmedoidsResult
@test kmedoids(cats, k = 2, distance = sqeuclidean) isa Clustering.KmedoidsResult
@test kmedoids(cats, k = 2, distance = sqeuclidean, matrixtype = :freq) isa Clustering.KmedoidsResult
end
@testset "h-clust" begin
hcout = hclust(cats)
@test hcout isa Hclust
@test hclust(cats, matrixtype = :freq) isa Hclust
@test hclust(cats, distance = sqeuclidean) isa Hclust
@test cutree(cats, hcout, krange = 2:5) isa DataFrame
end
@testset "fuzzy-c" begin
@test fuzzycmeans(cats, c = 2) isa FuzzyCMeansResult
@test fuzzycmeans(cats, c = 2, fuzziness = 3) isa FuzzyCMeansResult
@test fuzzycmeans(cats, c = 2, iterations = 30) isa FuzzyCMeansResult
@test fuzzycmeans(cats, c = 2, matrixtype = :freq) isa FuzzyCMeansResult
end
@testset "dbscan" begin
@test dbscan(cats, radius = 0.5, minpoints = 2) isa DbscanResult
@test dbscan(cats, radius = 0.5, minpoints = 2, distance = sqeuclidean) isa DbscanResult
@test dbscan(cats, radius = 0.5, matrixtype = :freq) isa DbscanResult
end
end
end #module | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 1021 | module TestDataExploration
using PopGen
using DataFrames
using Test
cats = @nancycats;
@testset "Data Exploration" begin
@testset "missing data" begin
@test missingdata(cats) isa DataFrame
@test missingdata(cats, by = "population") isa DataFrame
@test missingdata(cats, by = "locus") isa DataFrame
@test missingdata(cats, by = "locusxpopulation") isa DataFrame
end
@testset "pairwise identical" begin
pw_i = pairwiseidentical(cats)
@test pw_i isa AbstractMatrix
@test size(pw_i) == (237, 237)
pw_i_2 = pairwiseidentical(cats, cats.sampleinfo.name[1:10])
@test pw_i_2 isa AbstractMatrix
@test size(pw_i_2) == (10, 10)
end
@testset "frequency tables" begin
@test genofreqtable(cats) isa DataFrame
@test genofreqtable(cats, by = "population") isa DataFrame
@test allelefreqtable(cats) isa DataFrame
@test allelefreqtable(cats, by = "population") isa DataFrame
end
end
end # module | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 510 | module TestHardyWeinberg
using PopGen
using DataFrames
using Test
cats = @nancycats;
testarray = cats.genodata.genotype[1:10]
@testset "Hardy Weinberg" begin
@test PopGen._chisqlocus(testarray) == (5.135802469135802, 6, 0.5265173055755079)
tmp = hwetest(cats)
@test tmp isa DataFrame
@test size(tmp) == (9,4)
tmp = hwetest(cats, correction = "bh")
@test size(tmp) == (9,5)
tmp = hwetest(cats, by = "population", correction = "bh")
@test size(tmp) == (153,6)
end
end #module | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 1615 | module TestHeterozygosity
using PopGen
using DataFrames
using Test
cats = @nancycats;
testarray = cats.genodata.genotype[1:10]
@testset "Heterozygosity.jl" begin
@testset "counthet" begin
@test PopGen.counthet(testarray, 135) == 5
@test PopGen.counthet(testarray, 143) == 4
@test PopGen.counthet(testarray, [135,143]) == [5,4]
end
@testset "counthom" begin
@test PopGen.counthom(testarray, 135) == 2
@test PopGen.counthom(testarray, 143) == 0
@test PopGen.counthom(testarray, [135,143]) == [2,0]
end
@testset "het obs/exp" begin
@test PopGen._hetero_obs(testarray) == 0.75
@test PopGen._hetero_exp(testarray) == 0.6015625
end
@testset "Nei het" begin
@test ismissing(PopGen._genediversitynei87(missing,0.5,11))
@test ismissing(PopGen._genediversitynei87(0.5,missing,11))
@test ismissing(PopGen._genediversitynei87(missing,missing,11))
@test PopGen._genediversitynei87(0.5,0.5,11) == 0.525
@test PopGen._genediversitynei87(0.5,0.5,11, corr = false) == 0.4772727272727273
end
@testset "heterozygosity" begin
tmp = heterozygosity(cats)
@test tmp isa DataFrame
@test size(tmp) == (9,4)
tmp = heterozygosity(cats, by = "sample")
@test tmp isa DataFrame
@test size(tmp) == (237,3)
tmp = heterozygosity(cats, by = "population")
@test tmp isa DataFrame
@test size(tmp) == (17,4)
@test samplehet(cats, "N215") == 3/8
@test_throws ArgumentError samplehet(cats, "M115")
end
end
end #module | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 509 | module TestPCA
using PopGen
using Test
cats = @nancycats;
@testset "PCA.jl" begin
@testset "pca" begin
@test size(pca(cats).proj) == (237,93)
@test size(pca(cats, maxpc = 25).proj) == (237,25)
@test size(pca(cats, maxpc = 25).proj) == (237,25)
@test size(pca(cats, maxpc = 25, center = true).proj) == (237,25)
@test size(pca(cats, maxpc = 25, scale = false).proj) == (237,25)
@test size(pca(cats, pratio = 0.7).proj) == (237,39)
end
end
end #module | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 841 | fatalerrors = length(ARGS) > 0 && ARGS[1] == "-f"
quiet = length(ARGS) > 0 && ARGS[1] == "-q"
anyerrors = 0
using PopGen
using Test
all_tests = [
"dataexploration.jl",
"summaryinfo.jl",
"heterozygosity.jl",
"hardyweinberg.jl",
"clustering.jl",
"pca.jl"
]
println("Running tests:")
@testset "All Tests" begin
for a_test in all_tests
try
include(a_test)
println("\t\033[1m\033[32mPASSED\033[0m: $(a_test)")
catch e
global anyerrors += 1
println("\t\033[1m\033[31mFAILED\033[0m: $(a_test)")
if fatalerrors
rethrow(e)
elseif !quiet
showerror(stdout, e, backtrace())
println()
end
end
end
end
anyerrors > 0 && throw("$anyerrors files of tests failed :(") | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | code | 897 | module SummaryInfo
using PopGen
using DataFrames
using Test
cats = @nancycats;
@testset "alleleaverage" begin
@test length(alleleaverage(cats, rounding = true)) == 2
@test alleleaverage(cats, rounding = true) isa NamedTuple
@test length(alleleaverage(cats, rounding = false)) == 2
@test alleleaverage(cats, rounding = false) isa NamedTuple
end
@testset "richness" begin
@test richness(cats, by = "locus") isa DataFrame
@test size(richness(cats, by = "locus")) == (9, 2)
@test richness(cats, by = "population") isa DataFrame
@test size(richness(cats, by = "population")) == (153, 3)
end
@testset "summary F/D/etc. stats" begin
smry_glob = summary(cats)
smry_loc = summary(cats, by = "locus")
@test smry_glob isa DataFrame
@test smry_loc isa DataFrame
@test size(smry_glob) == (1, 10)
@test size(smry_loc) ==(9, 11)
end
end # module | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | docs | 5218 | # Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[email protected].
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | docs | 3212 |
# Contributing
We welcome contributions in the form of pull requests. For your code to be
considered it must meet the following guidelines.
* By making a pull request, you're agreeing to license your code under an MIT
license. See LICENSE.md.
* Types and functions must be documented using Julia's [docstrings](http://docs.julialang.org/en/latest/manual/documentation/).
Documentation regarding specific implementation details that aren't relevant
to users should be in the form of comments.
Documentation may be omitted if the function is not exported (i.e. only used
internally) and is short and obvious. E.g. `cube(x) = x^3`.
* All significant code must be tested. Tests should be organized into
contexts, and into separate files based on module.
* Contributions are included if the code has been reviewed by at least two
team members who are **not** the author of the proposed contribution,
and there is general consensus (or general lack of objections) that it's useful
and fits with the intended scope of PopGen.jl.
* Code must be consistent with the prevailing style in PopGen.jl, which includes,
but is not necessarily limited to the following style guide.
* Code contributed should be compatible with Julia v1.0.
## Style
* Indent with 4 spaces.
* Type names are camel case, with the first letter capitalized. E.g.
`SomeVeryUsefulType`.
* Module names are also camel case.
* Function names, apart from constructors, are all lowercase. Include
underscores between words only if the name would be hard to read without.
E.g. `start`, `stop`, `findletter` `find_last_digit`.
* Generally try to keep lines below 80-columns, unless splitting a long line
onto multiple lines makes it harder to read.
* Files that declare modules should only declare the module, and import any
modules that it requires. Any code should
be included from separate files. E.g.
```julia
module AwesomeFeatures
using IntervalTrees, JSON
include("feature1.jl")
include("feature2.jl")
end
```
* Files that declare modules should have the same name name of the module, e.g
the module `SomeModule` is declared under the file `SomeModule.jl`.
* Separate logical blocks of code with one blank line, or two blank lines for
function/type definitions.
* When extending method definitions, explicitly import the method.
```julia
import Base: start, next, done
```
* Document functions using bare docstrings before a definition:
```julia
"This function foo's something"
foo(x) = 2*x
```
or
```julia
"""
This function foo's something
"""
foo(x) = 2*x
```
* Functions that get or set variables in a type should not be prefixed with 'get' or 'set'. The getter should be named for the variable it sets, and the setter should have the same name as the getter, with the suffix `!`. For exmaple, for the variable `names`:
```julia
name(node) # get node name
name!(node, "somename") # set node name
```
## Conduct
We adhere to the Julia [community standards](http://julialang.org/community/standards/).
 | PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | docs | 1146 | 
Population Genetics in Julia.
[](https://BioJulia.net/PopGen.jl/)
[](https://zenodo.org/badge/latestdoi/204318876)

### How to install:
Invoke the package manager by pressing `]` on an empty line and `add PopGen`

----
<h3 align="center"> Cite As </h3>
Pavel V. Dimens, & Jason Selwyn. (2022). BioJulia/PopGen.jl: v0.8.0 (v0.8.0). Zenodo. https://doi.org/10.5281/zenodo.6450254
----
<h3 align="center"> Authors </h3>
[](https://orcid.org/0000-0003-3823-0373) [](https://twitter.com/PVDimens) Pavel Dimens
[](http://orcid.org/0000-0002-9100-217X) [](https://twitter.com/JasonSelwyn) Jason Selwyn
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | docs | 2614 | # A clear and descriptive title (No issue numbers please)
> _This template is extensive, so fill out all that you can. If are a new contributor or unsure about any section, leave it empty and a reviewer will help you_ :smile:. If anything in this template isn't relevant, just delete or ignore it._
## Types of changes
This PR implements the following changes:
_(tick any that apply)_
* [ ] :sparkles: New feature (A non-breaking change which adds functionality).
* [ ] :bug: Bug fix (A non-breaking change, which fixes an issue).
* [ ] :boom: Breaking change (fix or feature that would cause existing functionality to change).
## :clipboard: Additional details
>_replace this block of text with your information_
- If you have implemented new features or behaviour
- **Provide a description of the addition** in as many details as possible.
- **Provide justification of the addition**.
- **Provide a runnable example of use of your addition**. This lets reviewers
and others try out the feature before it is merged or makes it's way to release.
- If you have changed current behaviour...
- **Describe the behaviour prior to you changes**
- **Describe the behaviour after your changes** and justify why you have made the changes,
Please describe any breakages you anticipate as a result of these changes.
- **Does your change alter APIs or existing exposed methods/types?**
If so, this may cause dependency issues and breakages, so the maintainer
will need to consider this when versioning the next release.
- If you are implementing changes that are intended to increase performance, you
should provide the results of a simple performance benchmark exercise
demonstrating the improvement. Especially if the changes make code less legible.
## :ballot_box_with_check: Checklist
>_it's ok if not all the boxes are checked :smile:_
- [ ] :art: The changes implemented is consistent with the [julia style guide](https://docs.julialang.org/en/v1/manual/style-guide/).
- [ ] :blue_book: I have updated and added relevant docstrings, in a manner consistent with the [documentation styleguide](https://docs.julialang.org/en/v1/manual/documentation/).
- [ ] :blue_book: I have added or updated relevant user and developer manuals/documentation in `docs/src/`.
- [ ] :ok: There are unit tests that cover the code changes I have made.
- [ ] :ok: The unit tests cover my code changes AND they pass.
- [ ] :ok: All changes should be compatible with the latest stable version of Julia.
- [ ] :thought_balloon: I have commented liberally for any complex pieces of internal code.
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | docs | 246 | ---
name: Bug report
about: Create a report to help us improve
title: "[bug]"
labels: bug
assignees: ''
---
**description**
**minimal example to reproduce**
```julia
```
**expected behavior**
**screenshots (optional)**
**additional info**
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | docs | 300 | ---
name: Feature request
about: Suggest a new feature
title: "[feature]"
labels: enhancement
assignees: ''
---
**Is your feature request related to a problem and which?**
**Describe the solution/feature you'd like (with examples)**
```julia
```
**are there alternatives?**
**additional info**
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"MIT"
] | 0.9.4 | 5ae136673280da008bcf55e551553a254fd463ca | docs | 165 | ---
name: Question
about: Questions about how/why things in PopGen.jl work
title: ''
labels: help wanted, question
assignees: ''
---
**what needs clarification?**
| PopGen | https://github.com/BioJulia/PopGen.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 804 | # Demonstrate MASH system
using RSDeltaSigmaPort
using RSDeltaSigmaPort.EasyPlot #set, cons
import RSDeltaSigmaPort: BoundingBox
import Printf: @sprintf
j=im
#==
===============================================================================#
ABCD = [
1 0 0 0 1 -1 0;
1 1 0 0 0 -2 0;
0 1 1 0 0 0 -1;
0 0 1 1 0 0 -2;
0 1 0 0 0 0 0;
0 0 0 1 0 0 0;
]
nlev = [9 9]
(ntf, stf) = calculateTF(ABCD, [1,1])
println()
@error("Need to get minreal() calcualtion to work in calculateTF() to proceed.")
println()
@info("TODO: Port rest of code once this is done.")
throw(:ERROR)
ncf1 = -ntf[2,1] #2*(z-0.5)^2/z^4
ncf2 = ntf[1,1] #(z-1)^2/z^2
#stf_eff = stf[1,1]*ncf1 + stf[2,1]*ncf2
stf_eff = cancelPZ( _zpk(_tf(stf[1,1]*ncf1) + tf(stf[2,1]*ncf2)) )
ntf_eff = ntf(2,2)*ncf2 #(z-1)^4/z^4
:END_OF_DEMO
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 1275 | #Lowpass and Bandpass Demonstration Script
include("dsexample1_fn.jl")
#Reminder: nlev=M+1
function wait_for_user(dsm)
println("\nSimulation complete for modulator:\n\t$dsm")
println("\n*** Press ENTER to continue ***")
readline(stdin)
end
#2nd-order lowpass
dsm = RealDSM(order=2, OSR=16, M=8, Hinf=2.0, opt=0)
dsexample1(dsm, LiveDemo=true)
wait_for_user(dsm)
#5th-order lowpass
dsm = RealDSM(order=5, OSR=16, M=8, Hinf=2.0, opt=0)
dsexample1(dsm, LiveDemo=true)
wait_for_user(dsm)
#5th-order lowpass with optimized zeros
dsm = RealDSM(order=5, OSR=16, M=8, Hinf=2.0, opt=1)
dsexample1(dsm, LiveDemo=true)
wait_for_user(dsm)
#5th-order lowpass with optimized zeros and larger Hinf
dsm = RealDSM(order=5, OSR=16, M=8, Hinf=3.0, opt=1)
dsexample1(dsm, LiveDemo=true)
wait_for_user(dsm)
#7th-order lowpass; Hinf=2
dsm = RealDSM(order=7, OSR=8, M=16, Hinf=2.0, opt=1)
dsexample1(dsm, ampdB=-6.0, LiveDemo=true)
wait_for_user(dsm)
#7th-order lowpass; Hinf=8
dsm = RealDSM(order=7, OSR=8, M=16, Hinf=8.0, opt=1)
dsexample1(dsm, ampdB=-6.0, LiveDemo=true)
wait_for_user(dsm)
#6th-order bandpass
f0 = 1/6 #Normalized center frequency
dsm = RealDSM(order=6, OSR=16, M=8, f0=f0, Hinf=2.0, opt=1)
dsexample1(dsm, ampdB=-6.0, LiveDemo=true)
wait_for_user(dsm)
:END_OF_DEMO
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 2290 | # Demonstrate NTF synthesis (synthesizeNTF)
using RSDeltaSigmaPort
j=im
#==Baseband modulator
===============================================================================#
println("\n*** 5th order, 2-level, baseband modulator")
OSR = 32
@info("Synthesizing NTF without zero-optimization..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
NTF_noopt = synthesizeNTF(5, OSR, opt=0)
println("\tdone.")
plot = plotNTF(NTF_noopt, OSR, color=:blue)
plot.title = "5th-Order Modulator (No Zero-Optimization)"
saveimage(:png, "dsdemo1_o5_noopt.png", plot, AR=2/1, width=900)
displaygui(plot)
@info("Synthesizing NTF with optimized zeros..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
NTF_opt = synthesizeNTF(5, OSR, opt=1)
println("\tdone.")
plot = plotNTF(NTF_opt, OSR, color=:red)
plot.title = "5th-Order Modulator (Optimized Zeros)"
saveimage(:png, "dsdemo1_o5_zopt.png", plot, AR=2/1, width=900)
displaygui(plot)
@info("Plotting NTF comparison (overlay results)")
#-------------------------------------------------------------------------------
plot = plotNTF(NTF_noopt, OSR, color=:blue)
plot = plotNTF!(plot, NTF_opt, OSR, color=:red)
plot.title = "5th-Order Modulator (Optimized Zeros - Overlay)"
saveimage(:png, "dsdemo1_o5_cmp.png", plot, AR=2/1, width=900)
displaygui(plot)
#==Bandpass modulator
===============================================================================#
println("\n*** 8th order, 2-level, bandpass modulator")
OSR = 64
order = 8
f0 = 0.125 #fs/8
function calcSTF(order, OSR, NTF, f0)
G = _zpk(zeros(array_round(order/2)),NTF.p,1,1)
G.k = 1/abs(evalTF(G,exp(2π*j*f0)))
return G
end
@info("Synthesizing NTF..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
NTF = synthesizeNTF(order, OSR, opt=2, f0=f0)
println("\tdone.")
@info("Plotting NTF")
#-------------------------------------------------------------------------------
STF = calcSTF(order, OSR, NTF, f0)
plot = plotNTF(NTF, OSR, color=:blue, f0=f0, STF=STF)
plot.title = "8th-Order Bandpass Modulator"
saveimage(:png, "dsdemo1_o8_bp.png", plot, AR=2/1, width=900)
displaygui(plot)
:END_OF_DEMO
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 5989 | # Demonstrate simulateDSM, (simulateSNR and predictSNR) => calcSNRInfo
using RSDeltaSigmaPort
using RSDeltaSigmaPort.EasyPlot #set, cons
import RSDeltaSigmaPort: BoundingBox
import Printf: @sprintf
j=im
#==Baseband modulator
===============================================================================#
println("\n*** 5th order, 2-level, baseband modulator")
OSR = 32
N = 8192
dsm = RealDSM(order=5, OSR=OSR, Hinf=1.5, opt=1)
@info("Performing ΔΣ simulation..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
ftest = dsm.f0 + 1/(3*OSR); fband = default_fband(dsm)
fband[1] += 2/N #Ignore first 2 low-frequency points
(u, iftest) = genTestTone(dsm, dbv(0.5), ftest, N=N)
NTF = synthesizeNTF(dsm)
simresult = simulateDSM(u, NTF)
println("\tdone.")
@info("Plotting modulator signals")
#-------------------------------------------------------------------------------
plot = plotModTransient(u, simresult.v)
set(plot, xyaxes=set(xmin=0, xmax=300, ymin=-1.2, ymax=1.2))
saveimage(:png, "dsdemo2_o5_sig.png", plot, AR=2/1, width=900)
displaygui(plot)
@info("Plotting output spectrum (simulated vs theory)")
#-------------------------------------------------------------------------------
specinfo = calcSpecInfo(simresult.v, NTF, fband, ftest)
plot = plotModSpectrum(specinfo)
plot.title="Modulator Output Spectrum @ OSR = $OSR."
saveimage(:png, "dsdemo2_o5_spec.png", plot, AR=2/1, width=900)
displaygui(plot)
@info("Plotting SNR vs input power")
#-------------------------------------------------------------------------------
snrinfo = calcSNRInfo(dsm, NTF=NTF)
plot = plotSNR(snrinfo, dsm)
plot.title="SNR curve- theory and simulation"
saveimage(:png, "dsdemo2_o5_snr.png", plot, AR=2/1, width=900)
displaygui(plot)
#==Bandpass modulator
===============================================================================#
println("\n*** 8th order, 2-level, bandpass modulator")
OSR=64
f0 = 1/8
N = 8192
dsm = RealDSM(order=8, OSR=OSR, f0=f0, Hinf=1.5, opt=1)
display(dsm)
@info("Performing ΔΣ simulation..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
ftest = dsm.f0 + 1/(6*OSR); fband = default_fband(dsm)
(u, iftest) = genTestTone(dsm, dbv(0.5), ftest, N=N) #Half-scale sine-wave input
NTF = synthesizeNTF(dsm)
simresult = simulateDSM(u, NTF)
println("\tdone.")
@info("Plotting modulator signals")
#-------------------------------------------------------------------------------
plot = plotModTransient(u, simresult.v)
set(plot, xyaxes=set(xmin=0, xmax=300, ymin=-1.2, ymax=1.2))
saveimage(:png, "dsdemo2_bp_o5_sig.png", plot, AR=2/1, width=900)
displaygui(plot)
@info("Plotting output spectrum (simulated vs theory)")
#-------------------------------------------------------------------------------
specinfo = calcSpecInfo(simresult.v, NTF, fband, ftest)
plot = plotModSpectrum(specinfo)
plot.title="Modulator Output Spectrum @ OSR = $OSR."
saveimage(:png, "dsdemo2_bp_o5_spec.png", plot, AR=2/1, width=900)
displaygui(plot)
@info("Plotting SNR vs input power")
#-------------------------------------------------------------------------------
snrinfo = calcSNRInfo(dsm, NTF=NTF)
plot = plotSNR(snrinfo, dsm)
plot.title="SNR curve- theory and simulation"
saveimage(:png, "dsdemo2_bp_o5_snr.png", plot, AR=2/1, width=900)
displaygui(plot)
#==15-step baseband modulator
===============================================================================#
println("\n*** 7th order, 15-step, baseband modulator")
OSR = 8
M = 16 #Shouldn'nt this be 15 (nlev=16?)
N = 8192
Hinf_list = [2.0, 8.0]
dsm_list = [RealDSM(order=7, OSR=OSR, M=M, opt=1, Hinf=Hinf) for Hinf in Hinf_list]
color_list = [:blue, :green]
@info("Performing ΔΣ simulation for H(∞)=$(Hinf_list)..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
id_list = [@sprintf("H(∞)=%.1f", Hinf) for Hinf in Hinf_list]
ftest = 1/(7*OSR); fband = default_fband(OSR)
fband[1] += 2/N #Ignore first 2 low-frequency points
(u, iftest) = genTestTone(dsm, dbv(0.5*M), ftest, N=N) #Half-scale sine-wave input
NTF_list = [synthesizeNTF(dsm) for dsm in dsm_list]
v_list = [simulateDSM(u, NTF, nlev=M+1).v for NTF in NTF_list] #simulateDSM(..).v is mod output, v.
println("\tdone.")
@info("Plotting input/output characteristics of ΔΣ simulations")
#-------------------------------------------------------------------------------
ioplotc = cons(:plot_collection, title="15-step / 7th-order / H(∞)=$(Hinf_list)")
#Plot input & output transients:
for (v, c) in zip(v_list, color_list) #Each simulated output
local plot = plotModTransient(u, v, color=c, legend=false)
set(plot, xyaxes=set(xmin=0, xmax=100, ymin=-16, ymax=16))
push!(ioplotc, plot)
end
#Append SNR vs input curves:
snrinfo = calcSNRInfo(dsm_list[1], NTF=NTF_list[1])
plot = plotSNR(snrinfo, dsm_list[1], color=color_list[1])
snrinfo = calcSNRInfo(dsm_list[2], NTF=NTF_list[2])
plotSNR!(plot, snrinfo, dsm_list[2], color=color_list[2])
set(plot, xyaxes=set(xmin=-100, xmax=0, ymin=0, ymax=120))
push!(ioplotc, plot)
#Specify plot locations to help readability:
ioplotc.bblist = [ #Format plot locations
BoundingBox(0, 0.5, 0, 0.5), #1st modulator transient
BoundingBox(0, 0.5, 0.5, 1), #2nd modulator transient
BoundingBox(0.5, 1, 0, 1), #SNR curve
]
saveimage(:png, "dsdemo2_o7_15s_io.png", ioplotc, AR=2/1, width=900)
displaygui(ioplotc)
@info("Plotting output spectrum (simulated vs theory)")
#-------------------------------------------------------------------------------
plot = plotModSpectrum()
plot.title="Modulator Output Spectrum @ OSR = $OSR."
set(plot, xyaxes=set(ymin=-160))
for i in keys(NTF_list)
local specinfo = calcSpecInfo(v_list[i], NTF_list[i], fband, ftest, M=M)
plotModSpectrum!(plot, specinfo, id=id_list[i], color=color_list[i])
end
saveimage(:png, "dsdemo2_o7_15s_spec.png", plot, AR=2/1, width=900)
displaygui(plot)
:END_OF_DEMO
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 2177 | # Realization and dynamic range scaling
using RSDeltaSigmaPort
using RSDeltaSigmaPort.EasyPlot #set, cons
import Printf: @sprintf
j=im
function stringify(vA, digits=6)
#ctx=IOContext(stdout, :compact=>true)
#IOContext does not support # of digits
#_s(v) = @sprintf("%.6f", v) #Cannot parameterize format specifier
return [round(v, digits=6) for v in vA]
end
#==
===============================================================================#
println("\n*** Modulator realization and scaling")
OSR=42
order=5; opt=1
NTF = synthesizeNTF(order, OSR, opt=opt)
plot = plotNTF(NTF, OSR)
plot.title = string(order, "th-Order Modulator")
#saveimage(:png, "dsdemo3_ntf.png", plot, AR=2/1, width=900)
displaygui(plot)
@info("Realizing NTF..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
a,g,b,c = realizeNTF(NTF)
b = [b[1] zeros(1,length(b)-1)] #Use a single feed-in for the input
println("\nUnscaled modulator")
println(" DAC feedback coefficients = ", join(stringify(a), ", "))
println(" resonator feedback coefficients = ", join(stringify(g), ", "))
println()
@info("Calculating the state maxima...")
#-------------------------------------------------------------------------------
ABCD = stuffABCD(a,g,b,c)
u = range(0, stop=0.6, length=30)
N = 10^4
plot = plotStateMaxima(u, ABCD, N=N)
displaygui(plot)
println()
@info("Calculating the scaled coefficients...")
#-------------------------------------------------------------------------------
ABCDs, umax = scaleABCD(ABCD, N=N)
as, gs, bs, cs = mapABCD(ABCDs)
println("\nScaled modulator, umax=", @sprintf("%.2f", umax))
println(" DAC feedback coefficients = ", join(stringify(as), ", "))
println(" resonator feedback coefficients = ", join(stringify(gs), ", "))
println(" interstage coefficients = ", join(stringify(cs), ", "))
println(" feed-in coefficients = ", join(stringify(bs), ", "))
println()
@info("Calculating the state maxima...")
#-------------------------------------------------------------------------------
u = range(0, stop=umax, length=30)
N = 10^4
plot = plotStateMaxima(u, ABCDs, N=N)
displaygui(plot)
:END_OF_DEMO
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 9270 | #dsdemo4_audio.jl: A GUI-based demonstration of delta-sigma with sound (Currently GUI-less).
using RSDeltaSigmaPort
@warn("dsdemo4_audio requires additional libraries:\n - MAT.jl\n - WAV.jl")
if !@isdefined(DSAudioDemo) #Hack to avoid continuously appending docs
@doc """# `DSAudioDemo`: Delta-Sigma Audio Demo Module
- Set source, modulator and decimation filter paramters,
- then click "Go."
- Click on input or output waveforms to listen to them.
- Click on the output spectrum to listen to the amplified error.
## TODO
- Implement GUI, as originally intended.
""" DSAudioDemo
end
module DSAudioDemo
using RSDeltaSigmaPort
using RSDeltaSigmaPort.EasyPlot #set, cons
using RSDeltaSigmaPort: fft
using RSDeltaSigmaPort: linlin, loglin
import Printf: @sprintf
import MAT, WAV
#==Constants
===============================================================================#
const 𝑓ₛout = 8192 #Output sample rate is fixed
const j=im
"ABCD matrices of pre-defined modulators:"
const MODABCD_PRESETS = Dict{Int, Array}(
1 => [1 1 -1; 1 0 0],
2 => [1 0 1 -1; 1 1 1 -2; 0 1 0 0],
)
#==Types
===============================================================================#
abstract type AbstractSource; end
struct SrcRamp <: AbstractSource; end
mutable struct SrcRaw <: AbstractSource
u::Vector #{Float64} #Oversampled signal
u0::Vector #{Float64} #Decimated signal
end
mutable struct SrcSine <: AbstractSource
f::Int #Frequency (Hz)
A::Float64
end
SrcSine(;f=500, A=0.5) = SrcSine(Int(f), A)
struct Modulator
sys
fsout::Int #Output sample rate
OSR::Int
sincorder::Int
end
#==Constructors
===============================================================================#
function _Modulator(sys, 𝑓ₒₛ::Int, OSR::Nothing, sincorder::Int)
#Round 𝑓ₒₛ to a multiple of 𝑓ₛout:
OSR = round(Int, 𝑓ₒₛ/𝑓ₛout)
OSR = max(1, OSR)
𝑓ₒₛnew = 𝑓ₛout*OSR #𝑓ₛout is always hardcoded in dsdemo4
if 𝑓ₒₛ < 𝑓ₛout
@warn("Requested (over)-sampling frequency, 𝑓ₒₛ < 𝑓ₛout." *
"\nSetting 𝑓ₒₛ = 𝑓ₛout = $𝑓ₛout Hz."
)
elseif 𝑓ₒₛ != 𝑓ₒₛnew
@warn("Adjusting (over)-sampling frequency\nfrom 𝑓ₒₛ = $𝑓ₒₛ" *
"\nto 𝑓ₒₛ = $𝑓ₒₛnew (OSR = $OSR)."
)
end
return Modulator(sys, 𝑓ₛout, OSR, sincorder)
end
_Modulator(sys, 𝑓ₒₛ::Nothing, OSR::Int, sincorder::Int) =
Modulator(sys, 𝑓ₛout, OSR, sincorder) #𝑓ₛout is always hardcoded in dsdemo4
_Modulator(sys, 𝑓ₒₛ::Nothing, OSR::Nothing, sincorder::Int) =
_Modulator(sys, 𝑓ₒₛ, 32, sincorder) #Default OSR
_Modulator(sys, 𝑓ₒₛ, OSR, sincorder::Int) =
throw(ArgumentError("Modulator: Invalid type for 𝑓ₒₛ or OSR (or overspecified)."))
Modulator(preset; fos=nothing, OSR=nothing, sincorder::Int=2) =
_Modulator(MODABCD_PRESETS[preset], fos, OSR, sincorder)
#==Helper functions/ data generators
===============================================================================#
function validate(m::Modulator)
(m.OSR < 1) &&
throw("Modulator: Invalid OSR value: $(m.OSR).")
return
end
function generate(src::SrcSine, m::Modulator; N=10)
(src.f > m.fsout/2) &&
throw("Source 𝑓 = $(src.f) too high. Must be <= 𝑓ₛout/2.")
𝑓ₒₛ = m.fsout*m.OSR
𝜔₀ = 2π*src.f
tidx = (0:N-1); t = tidx/𝑓ₒₛ
u = src.A * sin.((𝜔₀/𝑓ₒₛ) * tidx) .* ds_hann(N)
u0 = u[1:m.OSR:end]
return (t, u, u0)
end
function generate(src::SrcRamp, m::Modulator; N=10)
𝑓ₒₛ = m.fsout*m.OSR
tidx = (0:N-1); t = tidx/𝑓ₒₛ
u = collect(range(-0.7, stop=0.7, length=N))
u0 = u[1:m.OSR:end]
return (t, u, u0)
end
function generate(src::SrcRaw, m::Modulator; N=10)
𝑓ₒₛ = m.fsout*m.OSR
N = length(src.u)
tidx = (0:N-1); t = tidx/𝑓ₒₛ
return (t, src.u, src.u0)
end
function play(data::Vector)
WAV.wavplay(data, 𝑓ₛout)
return :PLAYBACK_COMLETE
end
#==Main algorithms
===============================================================================#
function run(m::Modulator; Tsim::Float64=2.0, input=SrcSine(f=500))
validate(m)
(m.fsout != 𝑓ₛout) && throw("dsdemo4 ony supports modulation schemes where 𝑓ₛout = $𝑓ₛout.")
fos = 𝑓ₛout*m.OSR #(over)-sampling frequency
#Compute signal `u` depending on desired source:
N = round(Int, Tsim*fos) #Unless overwritten
(t, u, u0) = generate(input, m, N=round(Int, Tsim*fos))
OSR = m.OSR #Copy for string interpolation
#Plot signal `u0`
plot_tdec = cons(:plot, linlin, title="Decimated Signal (dec=$OSR)", legend=false,
xyaxes=set(ymin=-1, ymax=1),
labels=set(xaxis="Time [s]", yaxis="u₀(t), w(t)"),
)
N = length(u0)
t = (0:N-1)/𝑓ₛout
u0w = waveform(t, u0)
push!(plot_tdec,
cons(:wfrm, u0w, line=set(style=:solid, color=:blue, width=2), label="u₀"),
)
#Plot U(𝑓), from 0 to 𝑓ₛout/2
plot_𝑓dec = cons(:plot, linlin, title="Spectrum (dec=$OSR)", legend=false,
xyaxes=set(xmin=0, xmax=𝑓ₛout/2, ymin=-160, ymax=0),
labels=set(xaxis="Frequency [Hz]", yaxis="U₀(𝑓), W(𝑓)"),
)
if typeof(input) in [SrcSine, SrcRaw]
N = length(u0)
local U
if isa(input, SrcSine)
U = fft(u0)/(N/4)
else #SrcRaw
U = fft(applywnd(u0, ds_hann(N)))/(N/4)
end
𝑓 = range(0, stop=𝑓ₛout, length=N+1); 𝑓 = 𝑓[1:div(N,2)+1]
UdB = dbv.(U[keys(𝑓)])
UdBw = waveform(𝑓, UdB)
push!(plot_𝑓dec,
cons(:wfrm, UdBw, line=set(style=:solid, color=:blue, width=2), label="U₀"),
)
end
#Plot ΔΣ signals (time domain):
Nplot = 300 #Number of samples to plot
simresult = simulateDSM(u, m.sys)
v = simresult.v; y = simresult.y
q = v - y #Quantization error; assumes quantizer gain = 1.
N = length(v)
n = 1:N
if N>Nplot #Sample values from the middle:
n = floor.(Int, N/2-Nplot/2:N/2+Nplot/2-1)
end
plot_tos = cons(:plot, linlin, title="Signals @ Modulator Input/Output", legend=false,
xyaxes=set(ymin=-1.2, ymax=1.2),
labels=set(xaxis="samples", yaxis="v(t)"),
)
_x = collect(0:length(n)-1)
vw = wfrm_stairs(_x, v[n])
uw = waveform(_x, u[n])
push!(plot_tos,
cons(:wfrm, vw, line=set(style=:solid, color=:blue, width=2), label="v"),
cons(:wfrm, uw, line=set(style=:solid, color=:green, width=2), label="u"),
)
#Plot V(𝑓), from 0 to 𝑓ₒₛ/2. Use the middle Nfft points of v
N = length(v)
Nfft = min(N, 16*8192)
n = array_round((N-Nfft)/2+1):array_round((N+Nfft)/2)
V = fft(applywnd(v[n], ds_hann(Nfft)))/(Nfft/4)
inBin = ceil(Int, Nfft/1000)
if isa(input, SrcSine)
inBin = round(Int, input.f/fos*Nfft+1) #Bin of tone
end
(𝑓nrm, Vp) = logsmooth(V, inBin)
plot_𝑓os = cons(:plot, loglin, title="Spectrum (OSR=$OSR)", legend=false,
xyaxes=set(xmin=100, xmax=fos/2, ymin=-160, ymax=0),
labels=set(xaxis="Frequency [Hz]", yaxis="V(𝑓)"),
)
Vpw = waveform(𝑓nrm*fos, Vp)
nbw_str = @sprintf("NBW = %.1f Hz", fos*1.5/Nfft) #orig. coords: (Fs/2, -90); :cr
push!(plot_𝑓os,
cons(:wfrm, Vpw, line=set(style=:solid, color=:blue, width=2), label="V"),
cons(:atext, nbw_str, y=-90, reloffset=set(x=0.95), align=:cr),
)
#Compute w
w = sinc_decimate(v, m.sincorder, m.OSR)
filtered_q = sinc_decimate(q, m.sincorder, m.OSR)
N = length(w)
t = collect(0:N-1)/𝑓ₛout
ww = wfrm_stairs(t, w)
push!(plot_tdec,
cons(:wfrm, ww, line=set(style=:solid, color=:red, width=2), label="w"),
)
#Plot W(𝑓), from 0 to 𝑓ₛout/2
if typeof(input) in [SrcSine, SrcRaw]
Nfft = length(w)
local W
if isa(input, SrcSine)
W = fft(w)/(N/4)
else
W = fft(applywnd(w, ds_hann(N)))/(N/4)
end
𝑓 = range(0, stop=𝑓ₛout, length=Nfft+1); 𝑓 = 𝑓[1:div(Nfft,2)+1]
WdB = dbv.(W[keys(𝑓)])
WdBw = waveform(𝑓, WdB)
nbw_str = @sprintf("NBW = %.1f Hz", 𝑓ₛout*1.5/Nfft) #orig. coords: (10, -90); :cl
push!(plot_𝑓dec,
cons(:wfrm, WdBw, line=set(style=:solid, color=:red, width=2), label="W"),
cons(:atext, nbw_str, y=-90, reloffset=set(x=0.05), align=:cl),
)
end
pcoll = push!(cons(:plot_collection, ncolumns=2),
plot_tdec, plot_𝑓dec, plot_tos, plot_𝑓os,
)
return (plot=pcoll, u0=u0, w=w, input=input)
end
end #module DSAudioDemo
#==Auto-run test code:
===============================================================================#
println()
display(@doc(DSAudioDemo)) #Show user how to use DSAudioDemo
function load_demo4_audio_data(m::DSAudioDemo.Modulator)
srcpath = dirname(pathof(RSDeltaSigmaPort))
fpath = joinpath(srcpath, "..", "original_source", "delsig", "dsdemo4.mat")
alldata = DSAudioDemo.MAT.matread(fpath)
ds = alldata["ds"][:]
sd = alldata["sd"][:]
u0 = ds
u = interp(sd, m.OSR)
return DSAudioDemo.SrcRaw(u, u0)
end
function playresults(results)
if isa(results.input, DSAudioDemo.SrcRamp)
@warn("Will not playback ramp signal.")
return
end
println()
@info("Listening to ideally sampled/decimated signal...")
flush(stdout); flush(stderr)
@show DSAudioDemo.play(results.u0)
println()
@info("Listening to modulator output...")
flush(stdout); flush(stderr)
@show DSAudioDemo.play(results.w)
println("\nReplay results with:")
println("\tplayresults(results)")
return
end
#Inputs
dsm = DSAudioDemo.Modulator(1, OSR=32, sincorder=2)
#sig = load_demo4_audio_data(dsm)
sig = DSAudioDemo.SrcSine(f=500) #500, 4000, 4200
#sig = DSAudioDemo.SrcRamp()
println("\nUsing modulator:")
@show dsm
println()
@info("Performing ΔΣ audio simulation..."); flush(stdout); flush(stderr)
results = DSAudioDemo.run(dsm, Tsim=3.0, input=sig)
println("\tdone."); flush(stdout); flush(stderr)
println()
@info("Displaying results..."); flush(stdout); flush(stderr)
displaygui(results.plot)
playresults(results)
println()
:END_OF_DEMO
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 4562 | # Demonstrate the simulateMS function
using RSDeltaSigmaPort
using RSDeltaSigmaPort.EasyPlot #set, cons
import RSDeltaSigmaPort: fft
import RSDeltaSigmaPort: BoundingBox
import Printf: @sprintf
j=im
#==Demo5Module: Module to define types, etc
===============================================================================#
module Demo5Module
struct SimConfig
A::Float64
f::Float64
mtf
dither::Float64
# dw
id::String
end
struct SimResult
fin::Int #Input frequency
sv::Array
Svv::Array
Sdd::Array
leg::String
end
end #Demo5Module
import .Demo5Module: SimConfig, SimResult
#==
===============================================================================#
show_usage = false
OSR = 25
M = 16
N = 2^14
sigma_d = 0.01 #1% mismatch
dsm2b = RealDSM(order=2, OSR=OSR, M=M, opt=1, Hinf=2) #Second-order shaping
dsm4 = RealDSM(order=4, OSR=round(Int, OSR*0.9), M=M, opt=1, Hinf=1.3) #Fourth-order shaping
dsm6 = RealDSM(order=6, OSR=OSR, M=M, opt=1, Hinf=4)
mtf1 = _zpk(1,0,1,1) #First-order shaping
mtf2 = _zpk([ 1 1 ], [ 0.3 0.3 ], 1, 1) #Second-order shaping
mtf2b = synthesizeNTF(dsm2b) #Second-order shaping
mtf4 = synthesizeNTF(dsm4) #Fourth-order shaping
cases = [
SimConfig(undbv(-3), 0.01, [], 0, "Thermometer"),
SimConfig(undbv(-3), 0.01, mtf1, 0, "Rotation"),
SimConfig(undbv(-3), 0.01, mtf2, 0, "2^{nd}-order"),
SimConfig(undbv(-30), 0.01, mtf1, 0, "Rotation"),
SimConfig(undbv(-30), 0.01, mtf1, 0.5, "Rot + dither"),
SimConfig(undbv(-3), 0.01, mtf2b, 0, "2^{nd}-order with zero"),
SimConfig(undbv(-3), 0.01, mtf4, 0, "4^{th}-order"),
]
comparisons = [ [1,2], [2,3], [4,5], [3,6], [6,7] ]
if false #Debug
cases = cases[1:2]
comparisons = comparisons[1:1]
end
nconfig = length(cases)
println("\nNumber of configurations to simulate: $nconfig"); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
NTF = synthesizeNTF(dsm6)
window = ds_hann(N)'/(M*N/8)
windowM = repeat(window, M)
println()
@info("Running simulations..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
simresults = SimResult[] #Simulation results
for cfg in cases
local u #Avoid complaints wrt "global u"
local sv
println("\t", cfg.id); flush(stdout); flush(stderr)
fin = round(Int, cfg.f*N)
inband = setdiff(2:1 .+ ceil(Int, 0.5*N/OSR), 1 .+ [0 1 fin .+ [-1 0 1]])
w = (2π/N)*fin
u = M*cfg.A*sin.(w*(0:N-1))
v = simulateDSM(u, NTF, nlev=M+1).v #M unit elements requires an (M+1)-level quant.
data = applywnd(v, window)
Svv = abs.(fft(applywnd(v, window))) .^ 2
if isa(cfg.mtf, Array) && isempty(cfg.mtf)
sv = ds_therm(v, M)
else
sv = simulateMS(v, M=M, mtf=cfg.mtf, d=cfg.dither).sv
end
Sdd = sigma_d^2 * sum( abs.(fft(applywnd(sv, windowM),2)) .^ 2 , dims=1)
mnp = sum(Sdd[inband])/1.5
leg = @sprintf("%s (MNP= %.0f dBFS)", cfg.id, dbp(mnp))
push!(simresults, SimResult(fin, sv, Svv, Sdd, leg))
end
println("\tdone.")
println()
@info("Plotting results..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
for case_nums in comparisons
local plot #Avoid complaints wrt "global plot"
nc = length(case_nums)
#_cases = cases[case_nums]
_results = simresults[case_nums]
id1 = _results[1].leg; id2 = _results[2].leg #Only print out first 2
println("Mismatch-Shaping Unit-Element DAC")
println("\tComparing $id1 vs. $id2")
if show_usage
pcoll = cons(:plot_collection, ncolumns=1, title="Element Usage")
T = 25
for resi in _results
plot = plotUsage(resi.sv[:,1:T])
push!(pcoll, plot)
end
displaygui(pcoll)
end
colors = [:blue, :magenta, :red, :green, :orange]
plot = plotSpectrum()
# plot.title = "Error Spectra"
set(plot,
xyaxes=set(xmin=1e-3, xmax=0.5, ymin=-140, ymax=-50),
labels=set(yaxis="Error PSD"),
)
for (i, resi) in enumerate(_results)
plotSpectrum!(plot, sqrt.(resi.Sdd), resi.fin, lw=2, color=colors[i], id=resi.leg, n=4)
end
resi = _results[end]
A = dbp(resi.Svv[resi.fin])
plotSpectrum!(plot, sqrt.(resi.Svv), resi.fin, color=:green, n=5)
lwfrm = waveform([1e-3, 0.5/OSR], -140*[1, 1])
nbw_str = @sprintf("NBW = %.1e", 1.5/N) #orig. coords: (0.5, -140)
push!(plot,
cons(:wfrm, lwfrm, line=set(style=:solid, color=:black, width=8)),
cons(:atext, nbw_str, y=-140, reloffset=set(x=0.95), align=:br),
)
plot.title = @sprintf("A = %.0fdBFS", A)
displaygui(plot)
end
if true #Quadrature example
println()
@warn("TODO: Quadrature example")
end
:END_OF_DEMO
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 1658 | #Demonstrate Saramaki half-band filter design
using RSDeltaSigmaPort
using RSDeltaSigmaPort.EasyPlot #set, cons
import RSDeltaSigmaPort: csdsize2, linlin
import RSDeltaSigmaPort: BoundingBox
import RSDeltaSigmaPort: fft
import Printf: @sprintf
j=im
use_canned_example = false
println("\n*** Half-band filter design (ALPHA VERSION)")
f1=f2=0 #Define variables
title=nothing
if use_canned_example
(f1, f2) = exampleHBF(2)
else
fp = 0.9*0.25
@warn("Need firls() equivalent to converge for small dB values.")
#delta = undbv( -100 )
delta = undbv( -25 )
title = "designHBF Iterations"
(f1, f2, info) = designHBF(fp, delta=delta, debug=true)
end
n1 = length(f1); n2 = length(f2)
complexity = sum(csdsize2(f1)) + (2*n1-1)*(n2+sum(csdsize2(f2))-1) #VERIFYME
@show complexity
println()
@info("Running simulations..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
#interleave the even and odd decimated impulse responses
Nimp = 2^11
imp = simulateHBF(vcat(1, zeros(Nimp-1)),f1,f2)
mag = abs.(fft(imp))
mag = mag[1:array_round(end/2+1)]
t = range(0, stop=0.5, length=length(mag))
println("\tdone.")
println()
@info("Plotting resulting filter"); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
plot = cons(:plot, linlin, title="designHBF() result", legend=false,
xyaxes=set(xmin=0, xmax=0.5, ymin=-150, ymax=10),
labels=set(xaxis="Normalized Frequency", yaxis="|H| [dB]"),
)
magdBw = waveform(t, dbv.(mag))
push!(plot,
cons(:wfrm, magdBw, line=set(style=:solid, color=:blue, width=2), label=""),
)
displaygui(plot)
:END_OF_DEMO
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 736 | #Demonstrate the designLCBP function
#=WARNING:
designLCBP is very ill-behaved due to deficiencies in MATLAB's constr() and
minimax(). These functions frequently take a viable set of parameters as a
starting point and turn them into an unstable modulator. I should provide a
more robust implementation...
=#
using RSDeltaSigmaPort
using RSDeltaSigmaPort.EasyPlot #set, cons
import Printf: @sprintf
j=im
#==
===============================================================================#
println("\n*** Continuous-Time LC Modulator Design")
n = 3
OSR = 64
opt = 2
Hinf = 1.7
f0 = 1/16
t = [0.5 1]
form = :FB
dbg = true
(param, H, L0, ABCD) = designLCBP(n, OSR=OSR, opt=opt,
Hinf=Hinf, f0=f0, t=t, form=form, dbg=dbg
)
:END_OF_DEMO
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 275 | #Example lowpass/bandpass real/quadrature modulator design
include("dsexample1_fn.jl")
opt=2
#dsm = RealDSM(order=5, OSR=32, form=:CRFB, Hinf=1.5, opt=2)
dsm = RealDSM(order=5, OSR=32, form=:CRFB, Hinf=1.5, opt=opt)
results = dsexample1(dsm, LiveDemo=true)
:END_OF_EXAMPLE
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 4039 | #Example lowpass/bandpass real/quadrature modulator design
using RSDeltaSigmaPort
using RSDeltaSigmaPort.EasyPlot #set, cons
import Printf: @sprintf
j=im
"""`result = dsexample1(dsm; ampdB::Float64=-3.0, ftest=nothing, LiveDemo=false)`
Example lowpass/bandpass real/quadrature modulator design.
# Output `result`: `Dict` of:
- :nlev, :NTF, :ABCD, :umax, :amp, :snr, :peak_snr
- :coeff (a NamedTuple of: a, g, b c)
"""
function dsexample1(dsm::AbstractDSM; ampdB::Float64=-3.0, ftest=nothing, LiveDemo::Bool=false)
#Computed defaults
if isnothing(ftest)
ftest = default_ftest(dsm)
end
#Derived parameters
nlev = dsm.M + 1
dsmtypestr = str_modulatortype(dsm)
println("*** ", dsmtypestr, " example... ")
#TODO: support plot attributes?:
sizes = Dict{Symbol, Any}(
:lw => 1, #LineWidth
:ms => 5, #MarkerSize
:fs => 12, #FontSize
:fw => "normal", #FontWeight
)
if LiveDemo #Overwrite settings:
push!(sizes,
:lw => 2,
:ms => 6,
:fw => "bold",
)
end
LiveDemo && (@info("Realizing NTF..."); flush(stdout); flush(stderr))
#-------------------------------------------------------------------------------
local NTF, ABCD
if !isquadrature(dsm)
NTF = synthesizeNTF(dsm)
a,g,b,c = realizeNTF(NTF, dsm.form)
z0 = exp(2π*j*dsm.f0)
b = [abs(b[1]+b[2]*(1-z0)) zeros(1,length(b)-1)] #Use a single feed-in for the input
ABCD = stuffABCD(a,g,b,c,dsm.form)
else
NTF = synthesizeQNTF(dsm)
ABCD = realizeQNTF(NTF, dsm.form, 1)
end
LiveDemo && println("\tdone.")
plot = documentNTF(dsm, ABCD, sizes=sizes, frespOnly=false)
saveimage(:png, "dsexample1_NTF.png", plot, AR=2/1, width=900)
displaygui(plot)
LiveDemo && (@info("Performing time-domain simulations..."); flush(stdout); flush(stderr))
#-------------------------------------------------------------------------------
N = 100
t = 0:N-1
#Do not use genTestTone() (N too small; ftest gets rounded to 0Hz).
#(u, iftest) = genTestTone(dsm, ampdB, ftest, N=100)
local simresult
if !isquadrature(dsm)
u = undbv(ampdB)*dsm.M*sin.( 2π*ftest*t )
simresult = simulateDSM(u, ABCD, nlev=nlev)
else
u = undbv(ampdB)*dsm.M*exp.( 2π*j*ftest*t );
simresult = simulateQDSM(u, ABCD, nlev=nlev)
end
LiveDemo && println("\tdone.")
plot = plotModTransient(u, simresult.v, legend=false)
ylim = (dsm.M+0.25)
set(plot, xyaxes=set(ymin=-ylim, ymax=ylim))
displaygui(plot)
#Example spectrum
#-------------------------------------------------------------------------------
plot = plotExampleSpectrum(dsm, NTF, ampdB=ampdB, ftest=ftest, sizes=sizes)
displaygui(plot)
#SQNR plot
#-------------------------------------------------------------------------------
snrinfo = calcSNRInfo(dsm, NTF=NTF)
plot = plotSNR(snrinfo, dsm)
#TODO: Apply sizes
displaygui(plot)
if isquadrature(dsm) #Example I/Q mismatch
error("Quadrature section not ported over.")
end
umax = nothing
coeffs = nothing
if !isquadrature(dsm) #Dynamic range scaling
LiveDemo && (@info("Performing dynamic range scaling..."); flush(stdout); flush(stderr))
#-------------------------------------------------------------------------------
ABCD0 = ABCD
ABCD, umax = scaleABCD(ABCD0, nlev=nlev, f=dsm.f0)
a,g,b,c = mapABCD(ABCD, dsm.form)
coeffs = (a=a, g=g, b=b, c=c)
LiveDemo && println("\tdone.")
LiveDemo && (@info("Verifying dynamic range scaling..."); flush(stdout); flush(stderr))
#-------------------------------------------------------------------------------
N = 10^4; N0 = 50
u = range(0, stop=0.95*umax, length=30)
test_tone = cos.(2π*dsm.f0 * (0:N-1))
test_tone[1:N0] = test_tone[1:N0] .* (0.5 .- 0.5*cos.(2π/N0 * (0:N0-1)))
plot = plotStateMaxima(u, ABCD, test_tone, nlev=nlev)
set(plot, RSDeltaSigmaPort.linlin, xyaxes=set(ymin=0, ymax=1))
LiveDemo && println("\tdone.")
displaygui(plot)
end
results = Dict{Symbol, Any}(
:nlev => nlev,
:NTF => NTF,
:ABCD => ABCD,
:SNR_vs_amp_sim => snrinfo.vs_amp_sim,
:peak_snr => snrinfo.peak[2],
:umax => umax,
:coeffs => coeffs
)
return results
end
#Last line
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 5139 | #Design example for a continuous-time lowpass ΔΣ ADC
using RSDeltaSigmaPort
using RSDeltaSigmaPort.EasyPlot #set, cons
import Printf: @sprintf
j=im
#==Baseband modulator (continuous-time implementation)
===============================================================================#
nlev=2
#dsm = RealDSM(order=3, OSR=100, M=nlev-1, f0=0, opt=0, Hinf=1.3, form=:FB)
#dsm = RealDSM(order=3, OSR=32, M=nlev-1, f0=0, opt=0, Hinf=1.5, form=:FB)
dsm = RealDSM(order=3, OSR=32, M=nlev-1, f0=0, opt=2, Hinf=1.5, form=:FB)
umax = 0.83
#Parameters for the continuous-time implementation:
tdac = [0 1] #DAC timing. [0 1] means zero-delay non-return-to-zero
println("\n*** $(dsm.order)th-Order Continuous-Time Lowpass Example")
@info("Performing NTF synthesis..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
NTF0 = synthesizeNTF(dsm) #Optimized zero placement
#println("p:"); display(NTF0.p)
#println("z:"); display(NTF0.z)
plotcoll_NTF = documentNTF(dsm, NTF0)
println("\tdone.")
saveimage(:png, "dsexample2_NTF.png", plotcoll_NTF, AR=2/1, width=900)
displaygui(plotcoll_NTF)
displaygui(plotNTF(NTF0, dsm.OSR)) #ADDED MALaforge
@info("Performing time-domain simulations..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
#Example spectrum
#ftest = 0.3 * 0.5/dsm.OSR/3 #~1/3 of the way across the passband
ftest = (0.5/dsm.OSR)/3 #~1/3 of the way across the passband
plot = plotExampleSpectrum(dsm, NTF0, ftest=ftest,N=2^12) #16
#title('Example Spectrum');
saveimage(:png, "dsexample2_PSD.png", plot, AR=2/1, width=900)
displaygui(plot)
#SQNR plot
snrinfo = calcSNRInfo(dsm, NTF=NTF0)
plot = plotSNR(snrinfo, dsm)
set(plot, xyaxes=set(xmin=-100, ymin=0, ymax=100))
println("\tdone.")
saveimage(:png, "dsexample2_SQNR.png", plot, AR=2/1, width=900)
displaygui(plot)
@info("Mapping to continuous-time..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
(ABCDc,tdac2) = realizeNTF_ct(NTF0, dsm.form, tdac)
(Ac, Bc, Cc, Dc) = partitionABCD(ABCDc)
sys_c = _ss(Ac,Bc,Cc,Dc)
println("\tdone.")
#Verify that the sampled pulse response of the CT loop filter
#matches the impulse response of the DT prototype
n_imp = 10
y = -impL1(NTF0, n_imp) #Negate impL1 to make the plot prettier
y_max = maximum(real.(y))
plot = plotLollipop(0:n_imp, y, color=:blue)
plot.title = "Loop filter pulse/impulse responses (negated)"
yl = floor(0.9*y_max); color=:blue
ylw = waveform([0.5, 2], [yl, yl])
simglyph = cons(:a, glyph=set(shape=:o, size=1.5, color=color, fillcolor=color))
push!(plot,
cons(:wfrm, ylw, simglyph, line=set(style=:solid, color=color, width=2), label=""),
cons(:atext, " discrete-time", x=2, y=yl, align=:bl),
)
dt = 1/16
t = collect(0:dt:n_imp)
yy = -pulse(sys_c, [0 0;tdac], dt, n_imp*1.0)
yy = squeeze(yy)
yyw = waveform(t, yy)
yl = floor(0.7*y_max); color=:green
ylw = waveform([0.5, 2], [yl, yl])
push!(plot,
#Add yy when it works
cons(:wfrm, yyw, line=set(style=:solid, color=color, width=2), label=""),
cons(:wfrm, ylw, line=set(style=:solid, color=color, width=2), label=""),
cons(:atext, " continuous-time", x=2, y=yl, align=:bl),
)
displaygui(plot)
#Map the cts system to its discrete time equivalent and check the NTF
(sys_d, Gp) = mapCtoD(sys_c, t=tdac)
ABCD = [sys_d.A sys_d.B; sys_d.C sys_d.D]
(NTF, G) = calculateTF(ABCD)
NTF = cancelPZ(NTF)
(plot_NTF, plot_fresp) = plotcoll_NTF.plotlist
plotPZ!(plot_NTF, NTF, color=:cyan)
#Also plot the STF
L0 = _zpk(sys_c[1,1])
@warn("FIXME: Again, numerical error in ss->zpk gives 0 gain")
f = range(0, stop=0.5, length=10)
G = evalTFP(L0, NTF, f)
Gw = waveform(f, dbv.(G))
push!(plot_fresp,
cons(:wfrm, Gw, line=set(style=:solid, color=:magenta, width=2), label=""),
)
plotcoll_NTF.title = "NTF and STF"
displaygui(plotcoll_NTF)
@info("Performing dynamic range scaling..."); flush(stdout); flush(stderr)
#-------------------------------------------------------------------------------
#!!! This code assumes that the scale factors for the DT equivalent apply
#!!! to the CT system. A system with an RZ DAC will have inter-sample peaks
#!!! that exceed the values an the sampling instants.
(ABCDs, umax, S) = scaleABCD(ABCD, nlev=nlev, f=dsm.f0, xlim=1, umax=umax, N0=10^4)
S = S[1:dsm.order,1:dsm.order] #Don't worry about the extra states used in the d-t model
println("\tdone.")
println("\nScaled ABCD matrix (ABCDs):")
@show umax
display(ABCDs)
#Compute ABCDcs:
Sinv = inv(S)
Acs=S*Ac*Sinv; Bcs=S*Bc; Ccs=Cc*Sinv
ABCDcs = [Acs Bcs; Ccs Dc]
sys_cs = _ss(Acs, Bcs, Ccs, Dc)
#ABCDcs needs to be checked with CT simulations to
# 1. Verify pulse response
# 2. Verify signal swings
println("\nABCDcs matrix:")
display(ABCDcs)
adc = Dict{Symbol, Any}(
:order => dsm.order,
:OSR => dsm.OSR,
:opt => dsm.opt,
:M => dsm.M,
:f0 => dsm.f0,
:NTF => NTF,
:ABCD => ABCD,
:umax => umax,
:peak_snr => snrinfo.peak[2],
:form => dsm.form,
:ABCDc => ABCDc,
:L0 => L0,
:sys_c => sys_c,
:ABCDcs => ABCDcs,
:sys_cs => sys_cs,
)
println()
display(adc)
:END_OF_EXAMPLE
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
|
[
"BSD-3-Clause",
"MIT"
] | 0.4.2 | 286a0cf04d4e951697988474d818613ec4632282 | code | 6094 | #RSDeltaSigmaPort: A port of Richard Schreier's Delta Sigma Toolbox
#-------------------------------------------------------------------------------
#__precompile__(false) #Bit faster to develop when using ControlSystems.jl
#=Conventions
- Function name `_stairs__()`
- Leading `_`: Private function (not meant to be called externally from this
module).
- Trailing `__`: Implementation for a specific function (do not call unless
from within that parent function).
=#
"""`RSDeltaSigmaPort`: A port of Richard Schreier's Delta Sigma Toolbox
# Sample usage
```julia-repl
julia> using RSDeltaSigmaPort #Will take a while to load, compile, etc...
julia> import RSDeltaSigmaPort: @runsample
julia> @runsample("dsdemo1.jl")
julia> @runsample("dsdemo2.jl")
julia> @runsample("dsdemo3.jl")
julia> @runsample("dsdemo4_audio.jl")
julia> @runsample("dsdemo5.jl")
julia> @runsample("dsdemo6.jl")
julia> @runsample("dsexample1.jl")
julia> @runsample("dsexample2.jl")
julia> @runsample("demoLPandBP.jl")
```
# See also:
[`simulateDSM`](@ref), [`simulateMS`](@ref), [`simulateSNR`](@ref), [`simulateHBF`](@ref) |
[`synthesizeNTF`](@ref), [`realizeNTF`](@ref), [`realizeNTF_ct`](@ref) |
[`calculateSNR`](@ref), [`peakSNR`](@ref), [`predictSNR`](@ref) |
[`calculateTF`](@ref), [`evalTF`](@ref), [`evalTFP`](@ref) |
[`stuffABCD`](@ref), [`scaleABCD`](@ref), [`mapABCD`](@ref), [`partitionABCD`](@ref) |
[`mapCtoD`](@ref), [`mapQtoR`](@ref) |
[`exampleHBF`](@ref), [`designHBF`](@ref), [`simulateHBF`](@ref) |
[`pulse`](@ref), [`impL1`](@ref) |
[`lollipop`](@ref), [`logsmooth`](@ref) |
[`documentNTF`](@ref), [`plotExampleSpectrum`](@ref)
"""
module RSDeltaSigmaPort
const rootpath = realpath(joinpath(@__DIR__, ".."))
#=Flags:
-TODO
-VERIFYME: is this the correct behaviour???
-VERSIONSWITCH: Algorithm originally depends on software version (Needs to be checked)
-NEEDSTOOLKIT
-REQUESTHELP
=#
import Random
import Statistics: mean
import LinearAlgebra
import LinearAlgebra: norm, diagm, eigen, cond
import SpecialFunctions: erfinv
import Interpolations
import FFTW: fft, fftshift
import DSP: conv, filt, remez
import Optim
import Optim: optimize, GoldenSection, IPNewton, GradientDescent, LBFGS
import Optim: Fminbox, TwiceDifferentiableConstraints
import Polynomials
import Polynomials: Polynomial
import Printf: @sprintf
using CMDimData
using CMDimData.MDDatasets
using CMDimData.EasyPlot
using CMDimData.EasyPlot.Colors #Should be ok with whatever version it needs
import CMDimData.EasyPlot: BoundingBox #Use this one to avoid version conflicts with Graphics.
using InspectDR
CMDimData.@includepkg EasyPlotInspect
import ControlSystems
import ControlSystems: isdiscrete
#using ControlSystems: zpk, zpkdata
function throw_unreachable()
msg = "Code expected to be unreachable with exception handling"
error(msg)
end
function throw_notimplemented()
msg = "Feature not implemented"
error(msg)
end
const j = im
include("base.jl")
include("arrays.jl")
include("mlfun.jl")
include("mlfun_control.jl")
include("text.jl")
include("datasets.jl")
include("signals_time.jl")
include("power.jl")
include("timedomain.jl")
include("windowing.jl")
include("snr.jl")
include("optimize.jl")
include("transferfunctions.jl")
include("statespace.jl")
include("mapCtoD.jl")
include("synthesizeNTF.jl")
include("quantizer.jl")
include("simulate_base.jl")
include("simulateDSM.jl")
include("simulateMS.jl")
include("realizeNTF.jl")
include("realizeNTF_ct.jl")
include("filter_base.jl")
include("exampleHBF.jl")
include("designHBF.jl")
include("designLCBP.jl")
include("simulateHBF.jl")
include("calc_spectrum.jl")
include("plot_base.jl")
include("plot_transient.jl")
include("plot_spectrum.jl")
include("plot_NTF.jl")
include("plot_zplane.jl")
include("plot_SNR.jl")
include("plot_state.jl")
include("display.jl")
#==Convenience macros
===============================================================================#
macro runsample(filename::String)
fnstr = "$filename"
fpath = realpath(joinpath(rootpath, "sample", fnstr))
@info("Running RSDeltaSigmaPort sample:\n$fpath...")
m = quote
include($fpath)
end
return esc(m) #esc: Evaluate in calling module
end
#==Exported interface
===============================================================================#
#Simple calculations:
export dbm, dbp, dbv, rms
export undbm, undbp, undbv
#Compatibility:
#WARN: Might eventually collide with names from other packages:
#NOTE: Some use "_" prefix to avoid collisions with similar functions in other packages
export cplxpair
export _ss, _zpk, _zpkdata
export _zp2ss, _zp2tf, _freqz, _tf, _minreal, _impulse
export interp1_lin, interp1_cubic #Specialized names
export interp
export squeeze
export eye, orth, eig
#Data/arrays:
export AbstractDSM, RealDSM, QuadratureDSM
export padl, padr, padt, padb
export array_round #Converts a floating-point value to an (Int) index usable by Julia arrays.
export mapQtoR
#Accessors
export isquadrature
#Signal generators:
export ds_f1f2, default_ftest, default_fband
export genTestTone, genTestTone_quad, genTestTone_sin
export pulse, impL1
#Windowing/time domain:
export ds_hann, applywnd
export ds_therm
export sinc_decimate
#Misc. calculations:
export calcSpecInfo
export calcSNRInfo, calculateSNR, peakSNR, predictSNR
#Transfer functions:
export synthesizeNTF, realizeNTF, realizeNTF_ct
export calculateTF, evalTF, evalTFP
export rmsGain, cancelPZ
#State space:
export stuffABCD, scaleABCD, mapABCD, partitionABCD
export mapCtoD
#Filters:
export exampleHBF, designHBF, designLCBP
#Simulations:
export simulateDSM, simulateMS
export simulateSNR, simulateHBF
#Plotting:
export waveform, wfrm_stairs, lollipop, logsmooth
export plotPZ, plotPZ!, plotNTF, plotNTF!
export documentNTF
export plotSNR, plotSNR!, plotStateMaxima, plotUsage
export plotModTransient, plotLollipop
export plotSpec, plotSpec!, plotModSpectrum, plotModSpectrum!
export plotSpectrum, plotSpectrum!, plotExampleSpectrum
#Displaying plots:
export inlinedisp, saveimage, displaygui
#Display/text:
export ds_orderString, str_modulatortype
end # module
| RSDeltaSigmaPort | https://github.com/ma-laforge/RSDeltaSigmaPort.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.