licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.0 | 06939499b8c623fd97998b85bb0263b88bea4b8f | code | 150 | using MonteCarloSummary
using Test
const tests = [
"mcsummary.jl",
]
for t in tests
@testset "Test $t" begin
include(t)
end
end
| MonteCarloSummary | https://github.com/andrewjradcliffe/MonteCarloSummary.jl.git |
|
[
"MIT"
] | 0.1.0 | 06939499b8c623fd97998b85bb0263b88bea4b8f | docs | 942 | # MonteCarloSummary
## Installation
```julia
using Pkg
Pkg.add("MonteCarloSummary")
```
## Description
Have Monte Carlo simulations and need a simple, efficiently computed summary? This package provides just that.
A single function, `mcsummary`, computes the bare minimum of statistical properties -- mean, Monte Carlo standard error, standard deviation, and quantiles (the granularity of which may be specified by the user).
The assumption is that irrespective of how one's Monte Carlo simulations are generated, the result is a matrix of numeric values. Perhaps the simulation index may be on the first or second dimension -- the user may specify this with the `dim` keyword argument. Given that Monte Carlo typically involves a large number of simulations (and/or high-dimensional spaces), `mcsummary` defaults to a threaded implementation, but the user may opt out of this with the `multithreaded` keyword argument.
That's it. Enjoy.
| MonteCarloSummary | https://github.com/andrewjradcliffe/MonteCarloSummary.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | code | 554 | using Documenter
using GEEBRA
makedocs(
sitename = "GEEBRA",
authors = "Ioannis Kosmidis, Nicola Lunardon",
format = Documenter.HTML(),
modules = [GEEBRA],
pages = [
"Home" => "index.md",
"Examples" => "man/examples.md",
"Documentation" => Any[
"Public" => "lib/public.md",
"Internal" => "lib/internal.md",
]
],
doctest = false
)
deploydocs(
repo = "github.com/ikosmidis/GEEBRA.jl.git",
target = "build",
devbranch = "develop",
push_preview = true,
)
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | code | 918 | module GEEBRA # general estimating equations with or without bias-reducting adjustments
@warn "As of version 0.1.1, GEEBRA will receive no further updates in the methods it provides. All methods in GEEBRA are available and will be maintained in the more extensive MEstimation (https://github.com/ikosmidis/MEstimation.jl) Julia package."
using NLsolve
using Optim
using FiniteDiff
using ForwardDiff
using LinearAlgebra
using Distributions
import Base: show, print
import StatsBase: fit, aic, vcov, coef, coeftable, stderror, CoefTable
export objective_function
export objective_function_template
export estimating_function
export get_estimating_function
export estimating_function_template
export aic
export tic
export vcov
export coef
export coeftable
export fit
export stderror
include("estimating_functions.jl")
include("objective_functions.jl")
include("fit.jl")
include("result_methods.jl")
end # module
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | code | 3685 | """
estimating_function_template(nobs::Function, ef_contribution::Function)
Define an `estimating_function_template` by supplying:
+ `nobs`: a function of `data` that computes the number of observations of the particular data type,
+ `ef_contribution`: a function of the parameters `theta`, the `data` and the observation index `i` that returns a vector of length `length(theta)`.
"""
struct estimating_function_template
nobs::Function
ef_contribution::Function
end
"""
estimating_function(theta::Vector, data::Any, template::estimating_function_template, br::Bool = false)
Construct the estimating function by adding up all contributions in the `data` according to [`estimating_function_template`](@ref), and evaluate it at `theta`. If `br = true` then automatic differentiation is used to compute the empirical bias-reducing adjustments and add them to the estimating function.
"""
function estimating_function(theta::Vector,
data::Any,
template::estimating_function_template,
br::Bool = false)
p = length(theta)
n_obs = template.nobs(data)
contributions = Matrix(undef, p, n_obs)
for i in 1:n_obs
contributions[:, i] = template.ef_contribution(theta, data, i)
end
if (br)
quants = ef_quantities(theta, data, template, br)
sum(contributions, dims = 2) + quants[1]
else
sum(contributions, dims = 2)
end
end
"""
get_estimating_function(data::Any, template::estimating_function_template, br::Bool = false)
Construct the estimating function by adding up all contributions in the `data` according to [`estimating_function_template`](@ref). If `br = true` then automatic differentiation is used to compute the empirical bias-reducing adjustments and add them to the estimating function. The result is a function that stores the estimating functions values at its second argument, in a preallocated vector passed as its first argument, ready to be used withing `NLsolve.nlsolve`.
"""
function get_estimating_function(data::Any,
template::estimating_function_template,
br::Bool = false)
function g!(F, theta::Vector)
out = estimating_function(theta, data, template, br)
for i in 1:length(out)
F[i] = out[i]
end
end
end
function ef_quantities(theta::Vector,
data::Any,
template::estimating_function_template,
adjustment::Bool = false)
nj(eta::Vector, i::Int) = ForwardDiff.jacobian(beta -> template.ef_contribution(beta, data, i), eta)
p = length(theta)
n_obs = template.nobs(data)
psi = Matrix{Float64}(undef, n_obs, p)
njmats = Vector(undef, n_obs)
for i in 1:n_obs
psi[i, :] = template.ef_contribution(theta, data, i)
njmats[i] = nj(theta, i)
end
jmat_inv = inv(-sum(njmats))
emat = psi' * psi
vcov = jmat_inv * (emat * jmat_inv')
if (adjustment)
u(eta::Vector, i::Int) = ForwardDiff.jacobian(beta -> nj(beta, i), eta)
psi_tilde = Matrix(undef, n_obs, p)
umats = Vector(undef, n_obs)
for i in 1:n_obs
umats[i] = u(theta, i)
end
umat = sum(umats)
A = Vector(undef, p)
for j in 1:p
for i in 1:n_obs
psi_tilde[i, :] = njmats[i][j, :]
end
A[j] = -tr(jmat_inv * psi_tilde' * psi +
vcov * umat[j:p:(p * p - p + j), :] / 2)
end
[A, jmat_inv, emat]
else
[vcov, jmat_inv, emat]
end
end
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | code | 6519 | """
fit(template::objective_function_template, data::Any, theta::Vector; estimation_method::String = "M", br_method::String = "implicit_trace", optim_method = LBFGS(), optim_options = Optim.Options())
Fit an [`objective_function_template`](@ref) on `data` using M-estimation ([keyword argument](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1) `estimation_method = "M"`; default) or RBM-estimation (reduced-bias M estimation; [Kosmidis & Lunardon, 2020](http://arxiv.org/abs/2001.03786); [keyword argument](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1) `estimation_method = "RBM"`). Bias reduction is either through the maximization of the bias-reducing penalized objective in Kosmidis & Lunardon (2020) ([keyword argument](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1) `br_method = "implicit_trace"`; default) or by subtracting an estimate of the bias from the M-estimates ([keyword argument](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1) `br_method = "explicit_trace"`). The bias-reducing penalty is constructed internally using automatic differentiation (using the [ForwardDiff](https://github.com/JuliaDiff/ForwardDiff.jl) package), and the bias estimate using a combination of automatic differentiation (using the [ForwardDiff](https://github.com/JuliaDiff/ForwardDiff.jl) package) and numerical differentiation (using the [FiniteDiff](https://github.com/JuliaDiff/FiniteDiff.jl) package).
The maximization of the objective or the penalized objective is done using the [**Optim**](https://github.com/JuliaNLSolvers/Optim.jl) package. Optimization methods and options can be supplied directly through the [keyword arguments](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1) `optim_method` and `optim_options`, respectively. `optim_options` expects an object of class `Optim.Options`. See the [Optim documentation](https://julianlsolvers.github.io/Optim.jl/stable/#user/config/#general-options) for more details on the available options.
"""
function fit(template::objective_function_template,
data::Any,
theta::Vector;
estimation_method::String = "M",
br_method::String = "implicit_trace",
optim_method = LBFGS(),
optim_options = Optim.Options())
if (estimation_method == "M")
br = false
elseif (estimation_method == "RBM")
if (br_method == "implicit_trace")
br = true
elseif (br_method == "explicit_trace")
br = false
else
error(br_method, " is not a recognized bias-reduction method")
end
else
error(estimation_method, " is not a recognized estimation method")
end
obj = beta -> -objective_function(beta, data, template, br)
out = optimize(obj, theta, optim_method, optim_options)
if (estimation_method == "M")
theta = out.minimizer
elseif (estimation_method == "RBM")
if (br_method == "implicit_trace")
theta = out.minimizer
elseif (br_method == "explicit_trace")
quants = obj_quantities(out.minimizer, data, template, true)
jmat_inv = quants[2]
adjustment = FiniteDiff.finite_difference_gradient(beta -> obj_quantities(beta, data, template, true)[1], out.minimizer)
theta = out.minimizer + jmat_inv * adjustment
br = true
end
end
GEEBRA_results(out, theta, data, template, br, true, br_method)
end
"""
fit(template::estimating_function_template, data::Any, theta::Vector; estimation_method::String = "M", br_method::String = "implicit_trace", nlsolve_arguments...)
Fit an [`estimating_function_template`](@ref) on `data` using M-estimation ([keyword argument](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1) `estimation_method = "M"`; default) or RBM-estimation (reduced-bias M estimation; [Kosmidis & Lunardon, 2020](http://arxiv.org/abs/2001.03786); [keyword argument](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1) `estimation_method = "RBM"`). Bias reduction is either through the solution of the empirically adjusted estimating functions in Kosmidis & Lunardon (2020) ([keyword argument](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1) `br_method = "implicit_trace"`; default) or by subtracting an estimate of the bias from the M-estimates ([keyword argument](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1) `br_method = "explicit_trace"`). The bias-reducing adjustments and the bias estimate are constructed internally using automatic differentiation (using the [ForwardDiff](https://github.com/JuliaDiff/ForwardDiff.jl) package).
The solution of the estimating equations or the adjusted estimating equations is done using the [**NLsolve**](https://github.com/JuliaNLSolvers/NLsolve.jl) package. Arguments can be passed directly to `NLsolve.nlsolve` through [keyword arguments](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1). See the [NLsolve README](https://github.com/JuliaNLSolvers/NLsolve.jl) for more information on available options.
"""
function fit(template::estimating_function_template,
data::Any,
theta::Vector;
estimation_method::String = "M",
br_method::String = "implicit_trace",
nlsolve_arguments...)
if (estimation_method == "M")
br = false
elseif (estimation_method == "RBM")
if (br_method == "implicit_trace")
br = true
elseif (br_method == "explicit_trace")
br = false
else
error(br_method, " is not a recognized bias-reduction method")
end
else
error(estimation_method, " is not a recognized estimation method")
end
ef = get_estimating_function(data, template, br)
out = nlsolve(ef, theta; nlsolve_arguments...)
if (estimation_method == "M")
theta = out.zero
elseif (estimation_method == "RBM")
if (br_method == "implicit_trace")
theta = out.zero
elseif (br_method == "explicit_trace")
quants = ef_quantities(out.zero, data, template, true)
adjustment = quants[1]
jmat_inv = quants[2]
theta = out.zero + jmat_inv * adjustment
br = true
end
end
GEEBRA_results(out, theta, data, template, br, false, br_method)
end
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | code | 2294 | """
objective_function_template(nobs::Function, obj_contribution::Function)
Define an `objective_function_template` by supplying:
+ `nobs`: a function of `data` that computes the number of observations of the particular data type,
+ `obj_contribution`: a function of the parameters `theta`, the `data` and the observation index `i` that returns a real.
"""
struct objective_function_template
nobs::Function
obj_contribution::Function
end
"""
objective_function(theta::Vector, data::Any, template::objective_function_template, br::Bool = false)
Construct the objective function by adding up all contributions in the
`data` according to [`objective_function_template`](@ref), and
evaluate it at `theta`. If `br = true` then automatic differentiation
is used to compute the empirical bias-reducing penalty and add it to
the objective function.
"""
function objective_function(theta::Vector,
data::Any,
template::objective_function_template,
br::Bool = false)
p = length(theta)
n_obs = template.nobs(data)
contributions = Vector(undef, n_obs)
for i in 1:n_obs
contributions[i] = template.obj_contribution(theta, data, i)
end
if (br)
quants = obj_quantities(theta, data, template, br)
sum(contributions) + quants[1]
else
sum(contributions)
end
end
function obj_quantities(theta::Vector,
data::Any,
template::objective_function_template,
penalty::Bool = false)
npsi(eta::Vector, i::Int) = ForwardDiff.gradient(beta -> template.obj_contribution(beta, data, i), eta)
nj(eta::Vector, i::Int) = ForwardDiff.hessian(beta -> template.obj_contribution(beta, data, i), eta)
p = length(theta)
n_obs = template.nobs(data)
psi = Matrix{Float64}(undef, n_obs, p)
njmats = Vector(undef, n_obs)
for i in 1:n_obs
psi[i, :] = npsi(theta, i)
njmats[i] = nj(theta, i)
end
jmat_inv = inv(-sum(njmats))
emat = psi' * psi
vcov = jmat_inv * (emat * jmat_inv)
if (penalty)
penalty = - tr(jmat_inv * emat) / 2
[penalty, jmat_inv, emat]
else
[vcov, jmat_inv, emat]
end
end
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | code | 5715 | """
GEEBRA_results(results::Union{NLsolve.SolverResults, Optim.MultivariateOptimizationResults, Optim.UnivariateOptimizationResults}, theta::Vector, data::Any, data::Any, template::Union{objective_function_template, estimating_function_template}, br::Bool, has_objective::Bool, br_method::String)
Composite type for the output of [`fit`](@ref) for an [`objective_function_template`](@ref) or an [`estimating_function_template`](@ref).
"""
struct GEEBRA_results
results::Union{NLsolve.SolverResults, Optim.MultivariateOptimizationResults, Optim.UnivariateOptimizationResults}
theta::Vector
data::Any
template::Union{objective_function_template, estimating_function_template}
br::Bool
has_objective::Bool
br_method::String
end
"""
vcov(results::GEEBRA_results)
Compute an esitmate of the variance-covariance matrix of the `M`-estimator or its reduced-bias version from the output of [`fit`](@ref) for an [`objective_function_template`](@ref) or an [`estimating_function_template`](@ref).
"""
function vcov(results::GEEBRA_results)
if (results.has_objective)
obj_quantities(results.theta, results.data, results.template, false)[1]
else
ef_quantities(results.theta, results.data, results.template, false)[1]
end
end
"""
tic(results::GEEBRA_results)
Compute the Takeuchi Information Criterion at the `M`-estimator or its reduced-bias version from the output of [`fit`](@ref) for an [`objective_function_template`](@ref). `nothing` is returned if `results` is the output of [`fit`](@ref) for an [`estimating_function_template`](@ref).
"""
function tic(results::GEEBRA_results)
if (results.has_objective)
obj = objective_function(results.theta, results.data, results.template, false)
quants = obj_quantities(results.theta, results.data, results.template, true)
-2 * (obj + 2 * quants[1])
end
end
"""
aic(results::GEEBRA_results)
Compute the Akaike Information Criterion at the `M`-estimator or its reduced-bias version from the output of [`fit`](@ref) for an [`objective_function_template`](@ref). `nothing` is returned if `results` is the output of [`fit`](@ref) for an [`estimating_function_template`](@ref).
"""
function aic(results::GEEBRA_results)
if (results.has_objective)
obj = objective_function(results.theta, results.data, results.template, false)
p = length(results.theta)
-2 * (obj - p)
end
end
"""
coef(results::GEEBRA_results)
Extract the `M`-estimates or their reduced-bias versions from the output of [`fit`](@ref) for an [`objective_function_template`](@ref) or an [`estimating_function_template`](@ref).
"""
function coef(results::GEEBRA_results)
results.theta
end
"""
show(io::IO, results::GEEBRA_results; digits::Real = 4)
`show` method for `GEEBRA_results` objects. If `GEEBRA_results.has_object == true`, then the result of `aic(results)` and `tic(results)` are also printed.
"""
function Base.show(io::IO, results::GEEBRA_results;
digits::Real = 4)
theta = results.theta
p = length(theta)
v = vcov(results)
if results.has_objective
println(io,
(results.br ? "RBM" : "M") * "-estimation with objective contributions ",
results.template.obj_contribution)
else
println(io,
(results.br ? "RBM" : "M") * "-estimation with estimating function contributions ",
results.template.ef_contribution)
end
if (results.br)
println(io, "Bias reduction method: ", results.br_method)
end
println(io)
show(io, coeftable(results))
if results.has_objective
objfun = objective_function(results.theta, results.data, results.template, results.br)
if results.br
print(io, "\nMaximum penalized objetive:\t", round(objfun, digits = digits))
else
print(io, "\nMaximum objetive:\t\t", round(objfun, digits = digits))
end
print(io, "\nTakeuchi information criterion:\t", round(tic(results), digits = digits))
print(io, "\nAkaike information criterion:\t", round(aic(results), digits = digits))
else
estfun = estimating_function(results.theta, results.data, results.template, results.br)
if results.br
print(io, "\nAdjusted estimating functions:\t", estfun)
else
print(io, "\nEstimating functions:\t", estfun)
end
end
end
"""
stderror(results::GEEBRA_results)
Compute esitmated standard errors for the `M`-estimator or its reduced-bias version from the output of [`fit`](@ref) for an [`objective_function_template`](@ref) or an [`estimating_function_template`](@ref).
"""
function stderror(results::GEEBRA_results)
sqrt.(diag(vcov(results)))
end
"""
coeftable(results::GEEBRA_results; level::Real=0.95)
Return a `StatsBase.CoefTable` for the `M`-estimator or its reduced-bias version from the output of [`fit`](@ref) for an [`objective_function_template`](@ref) or an [`estimating_function_template`](@ref). `level` can be used to set the level of the reported Wald-type confidence intervals (using quantiles of the standard normal distribution).
"""
function coeftable(results::GEEBRA_results; level::Real=0.95)
cc = coef(results)
se = stderror(results)
zz = cc ./ se
p = 2 * ccdf.(Ref(Normal()), abs.(zz))
ci = se * quantile(Normal(), (1-level)/2)
levstr = isinteger(level*100) ? string(Integer(level*100)) : string(level*100)
CoefTable(hcat(cc, se, zz, p, cc + ci, cc - ci),
["Estimate","Std. Error","z value","Pr(>|z|)","Lower $levstr%","Upper $levstr%"],
["theta[$i]" for i = 1:length(cc)], 4)
end
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | code | 12600 | using Test
## Ratios
@testset "ef implementation for a single parameter" begin
using GEEBRA
using Random
## Ratio data
struct ratio_data
y::Vector
x::Vector
end
## Ratio nobs
function ratio_nobs(data::ratio_data)
nx = length(data.x)
ny = length(data.y)
if (nx != ny)
error("length of x is not equal to the length of y")
end
nx
end
## Ratio contribution to estimating function
function ratio_ef(theta::Vector,
data::ratio_data,
i::Int64)
p = length(theta)
(data.y[i] .- theta * data.x[i])[1:p]
end
## Set the ratio template
ratio_template = estimating_function_template(ratio_nobs, ratio_ef)
@inferred estimating_function_template(ratio_nobs, ratio_ef)
## Generate some data
Random.seed!(123);
my_data = ratio_data(randn(10), rand(10));
## Get M-estimator for the ratio
result_m = fit(ratio_template, my_data, [0.1], estimation_method = "M")
@inferred fit(ratio_template, my_data, [0.1], estimation_method = "M")
## Get reduced-bias estimator for the ratio
result_br = fit(ratio_template, my_data, [0.1], estimation_method = "RBM")
@inferred fit(ratio_template, my_data, [0.1], estimation_method = "RBM")
## Gere reduced-bias estimator for the ration using explicit RBM-estimation
result_br1 = fit(ratio_template, my_data, [0.1], estimation_method = "RBM", br_method = "explicit_trace")
## Quantities for estimators
sx = sum(my_data.x)
sxx = sum(my_data.x .* my_data.x)
sy = sum(my_data.y)
sxy = sum(my_data.x .* my_data.y)
@test isapprox(sy/sx, coef(result_m)[1])
@test isapprox((sy + sxy/sx)/(sx + sxx/sx), coef(result_br)[1])
@test isapprox(sy/sx * (1 - sxx / sx^2) + sxy/sx^2, coef(result_br1)[1])
@test_throws ErrorException result_br1 = fit(ratio_template, my_data, [0.1], estimation_method = "RBM", br_method = "magic_br_method")
end
## Instrumental variables
@testset "ef implementation for multiple parameters" begin
using GEEBRA
using Random
using Distributions
using NLsolve
## IV data
struct iv_data
y::Vector
t::Vector
w::Vector
end
## IV nobs
function iv_nobs(data::iv_data)
nw = length(data.w)
ny = length(data.y)
nt = length(data.t)
if (nw != ny)
error("length of w is not equal to the length of y")
elseif (nw != nt)
error("length of w is not equal to the length of t")
elseif (nw != nt)
error("length of w is not equal to the length of t")
end
nw
end
## Contributions to the estimating functions
function iv_ef(theta::Vector,
data::iv_data,
i::Int64)
[theta[1] - data.t[i], (data.y[i] - theta[2] * data.w[i]) * (theta[1] - data.t[i])]
end
## Simulating IV data
function simulate_iv(nobs::Int,
theta::Vector)
alpha = theta[1]
beta = theta[2]
gamma = theta[3]
delta = theta[4]
mux = theta[5]
sigmax = theta[6]
sigmae = theta[7]
sigmau = theta[8]
sigmat = theta[9]
e1 = rand(Normal(0, sigmae), nobs)
e2 = rand(Normal(0, sigmau), nobs)
e3 = rand(Normal(0, sigmat), nobs)
x = rand(Normal(mux, sigmax), nobs)
w = x + e2
y = alpha .+ beta * x + e1
t = gamma .+ delta * x + e3
iv_data(y, t, w)
end
## Set up IV GEEBRA template
iv_template = estimating_function_template(iv_nobs, iv_ef)
@inferred estimating_function_template(iv_nobs, iv_ef)
## Simulate data
true_theta = [2.0, 2.0, 1.0, 3.0, 0.0, 1.0, 2.0, 1.0, 1.0]
true_parameter = true_theta[[3, 2]]
Random.seed!(123)
my_data = simulate_iv(100, true_theta)
o1_ml = fit(iv_template, my_data, true_parameter, estimation_method = "M")
@inferred fit(iv_template, my_data, true_parameter, estimation_method = "M")
o1_br = fit(iv_template, my_data, true_parameter, estimation_method = "RBM")
@inferred fit(iv_template, my_data, true_parameter, estimation_method = "RBM")
o1_br1 = fit(iv_template, my_data, true_parameter, estimation_method = "RBM", br_method = "explicit_trace")
@inferred fit(iv_template, my_data, true_parameter, estimation_method = "RBM", br_method = "explicit_trace")
ef_br = get_estimating_function(my_data, iv_template, true)
@inferred get_estimating_function(my_data, iv_template, true)
ef_ml = get_estimating_function(my_data, iv_template, false)
@inferred get_estimating_function(my_data, iv_template, false)
o2_ml = nlsolve(ef_ml, [0.1, 0.2])
o2_br = nlsolve(ef_br, [0.1, 0.2])
qs = GEEBRA.ef_quantities(coef(o1_ml), my_data, iv_template, true)
@test isapprox(coef(o1_ml) + qs[2] * qs[1], coef(o1_br1))
@test isapprox(coef(o1_ml), o2_ml.zero)
@test isapprox(coef(o1_br), o2_br.zero)
## Estimating function at the estimates
@test isapprox(estimating_function(o2_br.zero, my_data, iv_template, true),
zeros(Float64, 2, 1),
atol = 1e-10)
end
@testset "obj implementation for multiple parameters" begin
using GEEBRA
using Random
using Distributions
using Optim
using LinearAlgebra
## Logistic regression data
struct logistic_data
y::Vector
x::Array{Float64}
m::Vector
end
## Logistic regression nobs
function logistic_nobs(data::logistic_data)
nx = size(data.x)[1]
ny = length(data.y)
nm = length(data.m)
if (nx != ny)
error("number of rows in of x is not equal to the length of y")
elseif (nx != nm)
error("number of rows in of x is not equal to the length of m")
elseif (ny != nm)
error("length of y is not equal to the length of m")
end
nx
end
function logistic_loglik(theta::Vector,
data::logistic_data,
i::Int64)
eta = sum(data.x[i, :] .* theta)
mu = exp.(eta)./(1 .+ exp.(eta))
ll = data.y[i] .* log.(mu) + (data.m[i] - data.y[i]) .* log.(1 .- mu)
ll
end
Random.seed!(123);
n = 100;
m = 1;
x = Array{Float64}(undef, n, 2);
x[:, 1] .= 1.0;
x[:, 2] .= rand(n);
true_betas = [0.5, -1];
y = rand.(Binomial.(m, cdf.(Logistic(), x * true_betas)));
my_data = logistic_data(y, x, fill(m, n));
logistic_template = objective_function_template(logistic_nobs, logistic_loglik)
@inferred objective_function_template(logistic_nobs, logistic_loglik)
o1_ml = optimize(b -> -objective_function(b, my_data, logistic_template, false),
true_betas, LBFGS())
o1_br = optimize(b -> -objective_function(b, my_data, logistic_template, true),
true_betas, LBFGS())
o2_ml = fit(logistic_template, my_data, true_betas, estimation_method = "M")
o2_br = fit(logistic_template, my_data, true_betas, estimation_method = "RBM")
o3_ml = optimize(b -> -objective_function(b, my_data, logistic_template, false),
true_betas, Optim.Options(iterations = 2))
o3_br = optimize(b -> -objective_function(b, my_data, logistic_template, true),
true_betas, Optim.Options(iterations = 2))
o4_ml = fit(logistic_template, my_data, true_betas, estimation_method = "M",
optim_method = NelderMead(),
optim_options = Optim.Options(iterations = 2))
o4_br = fit(logistic_template, my_data, true_betas, estimation_method = "RBM",
optim_method = NelderMead(),
optim_options = Optim.Options(iterations = 2))
@test isapprox(Optim.minimizer(o1_ml), Optim.minimizer(o2_ml.results))
@test isapprox(Optim.minimizer(o1_br), Optim.minimizer(o2_br.results))
@test isapprox(Optim.minimizer(o3_ml), Optim.minimizer(o4_ml.results))
@test isapprox(Optim.minimizer(o3_br), Optim.minimizer(o4_br.results))
@test isapprox(sqrt.(diag(vcov(o2_br))), stderror(o2_br))
end
@testset "agreement between obj and ef implementations" begin
using GEEBRA
using Random
using Distributions
using Optim
using NLsolve
## Logisti regression data
struct logistic_data
y::Vector
x::Array{Float64}
m::Vector
end
## Logistic regression nobs
function logistic_nobs(data::logistic_data)
nx = size(data.x)[1]
ny = length(data.y)
nm = length(data.m)
if (nx != ny)
error("number of rows in of x is not equal to the length of y")
elseif (nx != nm)
error("number of rows in of x is not equal to the length of m")
elseif (ny != nm)
error("length of y is not equal to the length of m")
end
nx
end
function logistic_loglik(theta::Vector,
data::logistic_data,
i::Int64)
eta = sum(data.x[i, :] .* theta)
mu = exp.(eta)./(1 .+ exp.(eta))
ll = data.y[i] .* log.(mu) + (data.m[i] - data.y[i]) .* log.(1 .- mu)
ll
end
function logistic_ef(theta::Vector,
data::logistic_data,
i::Int64)
eta = sum(data.x[i, :] .* theta)
mu = exp.(eta)./(1 .+ exp.(eta))
data.x[i, :] * (data.y[i] - data.m[i] * mu)
end
Random.seed!(123);
n = 100;
m = 1;
p = 5;
x = Array{Float64}(undef, n, p);
x[:, 1] .= 1.0;
for j in 2:p
x[:, j] .= rand(n);
end
true_betas = randn(p) * sqrt(p);
y = rand.(Binomial.(m, cdf.(Logistic(), x * true_betas)));
my_data = logistic_data(y, x, fill(m, n));
logistic_obj_template = objective_function_template(logistic_nobs, logistic_loglik)
logistic_ef_template = estimating_function_template(logistic_nobs, logistic_ef)
o1_ml = fit(logistic_obj_template, my_data, true_betas, estimation_method = "M")
e1_ml = fit(logistic_ef_template, my_data, true_betas, estimation_method = "M")
o1_br = fit(logistic_obj_template, my_data, true_betas, estimation_method = "RBM")
e1_br = fit(logistic_ef_template, my_data, true_betas, estimation_method = "RBM")
o1_br1 = fit(logistic_obj_template, my_data, true_betas, estimation_method = "RBM", br_method = "explicit_trace")
o1_br2 = fit(logistic_obj_template, my_data, coef(o1_ml), estimation_method = "RBM", br_method = "explicit_trace")
e1_br1 = fit(logistic_ef_template, my_data, true_betas, estimation_method = "RBM", br_method = "explicit_trace")
e1_br2 = fit(logistic_ef_template, my_data, coef(o1_ml), estimation_method = "RBM", br_method = "explicit_trace")
@test isapprox(coef(o1_ml), coef(e1_ml), atol = 1e-05)
@test isapprox(coef(o1_br), coef(e1_br), atol = 1e-05)
@test isapprox(coef(o1_br1), coef(e1_br1), atol = 1e-05)
@test isapprox(coef(o1_br2), coef(e1_br2), atol = 1e-05)
@test isapprox(coef(o1_br1), coef(e1_br2), atol = 1e-05)
@test isapprox(aic(o1_ml),
-2 * (objective_function(coef(o1_ml), my_data, logistic_obj_template, false) - p))
@test isapprox(aic(o1_br),
-2 * (objective_function(coef(o1_br), my_data, logistic_obj_template) - p))
quants_ml = GEEBRA.obj_quantities(coef(o1_ml), my_data, logistic_obj_template, true)
quants_br = GEEBRA.obj_quantities(coef(o1_br), my_data, logistic_obj_template, true)
@test isapprox(tic(o1_ml),
-2 * (objective_function(coef(o1_ml), my_data, logistic_obj_template) + 2 * quants_ml[1]))
@test isapprox(tic(o1_br),
-2 * (objective_function(coef(o1_br), my_data, logistic_obj_template) + 2 * quants_br[1]))
@test isapprox(vcov(o1_ml), vcov(e1_ml))
@test isapprox(vcov(o1_br), vcov(e1_br))
@test isapprox(vcov(o1_br1), vcov(e1_br1))
@test isapprox(vcov(o1_br2), vcov(e1_br2))
@test isapprox(coeftable(o1_ml).cols, coeftable(e1_ml).cols)
@test isapprox(coeftable(o1_br).cols, coeftable(e1_br).cols)
@test isapprox(coeftable(o1_br1).cols, coeftable(e1_br1).cols)
@test isapprox(coeftable(o1_br2).cols, coeftable(e1_br2).cols)
end
# using Revise
# using Pkg
# Pkg.activate("/Users/yiannis/Repositories/GEEBRA.jl")
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | docs | 327 | # GEEBRA 0.1.1
* As of version 0.1.1, GEEBRA will receive no further updates in the methods it provides. All methods in GEEBRA are available and will be maintained in the more extensive [MEstimation](https://github.com/ikosmidis/MEstimation.jl) Julia package.
* Added deprecation warning
# GEEBRA 0.1.0
First public release
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | docs | 988 | # GEEBRA.jl
**G**eneral **E**stimating **E**quations with or without **B**ias-**R**educing **A**djustments (pronounced `zee· bruh`)
[](https://travis-ci.org/ikosmidis/GEEBRA.jl)
[](https://codecov.io/github/ikosmidis/GEEBRA.jl?branch=master)
[](https://ikosmidis.github.io/GEEBRA.jl/dev/)
[](https://ikosmidis.github.io/GEEBRA.jl/stable/)
[](https://github.com/ikosmidis/GEEBRA.jl/blob/master/LICENSE.md)
**As of version 0.1.1, GEEBRA will receive no further updates in the methods it provides. All methods in GEEBRA are available and will be maintained in the more extensive [MEstimation](https://github.com/ikosmidis/MEstimation.jl) Julia package.**
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | docs | 3212 | # [GEEBRA.jl](https://github.com/ikosmidis/GEEBRA.jl)
## Authors
| [**Ioannis Kosmidis**](http://www.ikosmidis.com) | **(author, maintainer)** |
--- | ---
| [**Nicola Lunardon**](https://www.unimib.it/nicola-lunardon) | **(author)** |
## Licence
[MIT License](https://github.com/ikosmidis/GEEBRA.jl/blob/master/LICENSE.md)
## Package description
**GEEBRA** is a Julia package that implements ``M``-estimation for
statistical models, either by solving estimating equations or by
maximizing inference objectives, like
[likelihoods](https://en.wikipedia.org/wiki/Likelihood_function) and
composite likelihoods (see, [Varin et al,
2011](http://www3.stat.sinica.edu.tw/statistica/oldpdf/A21n11.pdf),
for a review), using user-specified templates of the estimating
function or the objective functions contributions.
A key feature is the use of only those templates and forward mode
automatic differentiation (as implemented in
[**ForwardDiff**](https://github.com/JuliaDiff/ForwardDiff.jl)) to
provide methods for **reduced-bias ``M``-estimation**
(**RB``M``-estimation**; see, [Kosmidis & Lunardon, 2020](http://arxiv.org/abs/2001.03786)). RB``M``-estimation takes place either through the
adjustment of the estimating equations or the penalization of the
objectives, or the subtraction of an estimate of the bias of the
``M``-estimator from the ``M``-estimates.
See the
[examples](https://ikosmidis.github.io/GEEBRA.jl/dev/man/examples/)
for a showcase of the functionaly **GEEBRA** provides.
See
[NEWS.md](https://github.com/ikosmidis/GEEBRA.jl/blob/master/NEWS.md)
for changes, bug fixes and enhancements.
## **GEEBRA** templates
**GEEBRA** has been designed so that the only requirements from the user are to:
1. implement a [Julia composite type](https://docs.julialang.org/en/v1/manual/types/index.html) for
the data;
2. implement a function for computing the number of observations from
the data object;
3. implement a function for calculating the contribution to the
estimating function or to the objective function from a single
observation that has arguments the parameter vector, the data
object, and the observation index;
4. specify a GEEBRA template (using
[`estimating_function_template`](@ref) for estimating functions and
[`objective_function_template`](@ref) for objective function) that
has fields the functions for computing the contributions to the
estimating functions or to the objective, and the number of
observations.
**GEEBRA**, then, can estimate the unknown parameters by either
``M``-estimation or RB``M``-estimation.
## Examples
```@contents
Pages = [
"man/examples.md",
]
```
## Documentation
```@contents
Pages = [
"lib/public.md",
"lib/internal.md",
]
```
## [Index](@id main-index)
```@index
Pages = [
"lib/public.md",
"lib/internal.md",
]
```
## References
+ Varin, C., N. Reid, and D. Firth (2011). An overview of composite likelihood methods. Statistica Sinica 21(1), 5–42. [Link](http://www3.stat.sinica.edu.tw/statistica/oldpdf/A21n11.pdf)
+ Kosmidis, I., N. Lunardon (2020). Empirical bias-reducing adjustments to estimating functions. ArXiv:2001.03786. [Link](http://arxiv.org/abs/2001.03786)
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | docs | 177 | # Internals
## Contents
```@contents
Pages = ["internal.md"]
```
## Index
```@index
Pages = ["internal.md"]
```
## Internals
```@docs
GEEBRA.GEEBRA_results
GEEBRA.show
```
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | docs | 675 | # Public documentation
## Contents
```@contents
Pages = ["public.md"]
```
## Index
```@index
Pages = ["public.md"]
```
## Public interface
```@docs
estimating_function_template
get_estimating_function
estimating_function
objective_function_template
objective_function
fit(template::objective_function_template, data::Any, theta::Vector; estimation_method::String = "M", br_method::String = "implicit_trace", optim_method = LBFGS(), optim_options = Optim.Options())
fit(template::estimating_function_template, data::Any, theta::Vector; estimation_method::String = "M", br_method::String = "implicit_trace", nlsolve_arguments...)
coef
vcov
stderror
coeftable
tic
aic
```
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.1 | 60edca49e56b2d8aa4f712369fc49ba57cbfe976 | docs | 9028 | # Examples
## Contents
```@contents
Pages = ["examples.md"]
Depth=3
```
## Ratio of two means
Consider a setting where independent pairs of random variables ``(X_1, Y_1), \ldots, (X_n, Y_n)`` are observed, and suppose that interest is in the ratio of the mean of ``Y_i`` to the mean of ``X_i``, that is ``\theta = \mu_Y / \mu_X``, with
``\mu_X = E(X_i)`` and ``\mu_Y = E(Y_i) \ne 0`` ``(i = 1, \ldots, n)``.
Assuming that sampling is from an infinite population, one way of estimating ``\theta`` without any further assumptions about the joint distribution of ``(X_i, Y_i)`` is to set the unbiased estimating equation ``\sum_{i = 1}^n (Y_i - \theta X_i) = 0``. The resulting ``M``-estimator is then ``\hat\theta = s_Y/s_X`` where ``s_X = \sum_{i = 1}^n X_i`` and ``s_Y = \sum_{i = 1}^n Y_i``.
The estimator ``\hat\theta`` is generally biased, as can be shown, for example, by an application of the Jensen inequality assuming that ``X_i``is independent of ``Y_i``, and its bias can be reduced using the empirically adjusted estimating functions approach in Kosmidis & Lunardon (2020).
This example illustrates how GEEBRA can be used to calculate the ``M``-estimator and its reduced-bias version.
```@repl 1
using GEEBRA, Random
```
Define a data type for ratio estimation problems
```@repl 1
struct ratio_data
y::Vector
x::Vector
end;
```
Write a function to compute the number of observations for objects of type `ratio_data`.
```@repl 1
function ratio_nobs(data::ratio_data)
nx = length(data.x)
ny = length(data.y)
if (nx != ny)
error("length of x is not equal to the length of y")
end
nx
end;
```
Generate some data to test things out
```@repl 1
Random.seed!(123);
my_data = ratio_data(randn(10), rand(10));
ratio_nobs(my_data)
```
The estimating function for the ratio ``\theta`` is
``\sum_{i = 1}^n (Y_i - \theta X_i)``
So, the contribution to the estimating function can be implemented as
```@repl 1
function ratio_ef(theta::Vector,
data::ratio_data,
i::Int64)
data.y[i] .- theta * data.x[i]
end;
```
The `estimating_function_template` for the ratio estimation problem can now be set up using `ratio_nobs` and `ratio_ef`.
```@repl 1
ratio_template = estimating_function_template(ratio_nobs, ratio_ef);
```
We are now ready use `ratio_template` and `my_data` to compute the ``M``-estimator of ``\theta`` by solving the esitmating equation ``\sum_{i = 1}^n (Y_i - \theta X_i) = 0``. The starting value for the nonlinear solver is set to `0.1`.
```@repl 1
result_m = fit(ratio_template, my_data, [0.1])
```
`fit` uses methods from the [**NLsolve**](https://github.com/JuliaNLSolvers/NLsolve.jl) package for solving the estimating equations. Arguments can be passed directly to `NLsolve.nlsolve` through [keyword arguments](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1) to the `fit` method. For example,
```@repl 1
result_m = fit(ratio_template, my_data, [0.1], show_trace = true)
```
Bias reduction in general ``M``-estimation can be achieved by solving the adjusted estimating equation ``\sum_{i = 1}^n (Y_i - \theta X_i) + A(\theta, Y, X) = 0``, where ``A(\theta)`` are empirical bias-reducing adjustments depending on the first and second derivatives of the estimating function contributions. **GEEBRA** can use `ratio_template` and automatic differentiation (see, [ForwardDiff](https://github.com/JuliaDiff/ForwardDiff.jl)) to construct ``A(\theta, Y, X)`` and, then, solve the bias-reducing adjusted estimating equations. All this is simply done by
```@repl 1
result_br = fit(ratio_template, my_data, [0.1], estimation_method = "RBM")
```
where `RBM` stands for reduced-bias `M`-estimation.
Kosmidis & Lunardon (2020) show that the reduced-bias estimator of $\theta$ is ``\tilde\theta = (s_Y + s_{XY}/s_{X})/(s_X + s_{XX}/s_{X})``. The code chunks below tests that this is indeed the result **GEEBRA** returns.
```@repl 1
sx = sum(my_data.x);
sxx = sum(my_data.x .* my_data.x);
sy = sum(my_data.y);
sxy = sum(my_data.x .* my_data.y);
isapprox(sy/sx, result_m.theta[1])
isapprox((sy + sxy/sx)/(sx + sxx/sx), result_br.theta[1])
```
## Logistic regression
### Using [`objective_function_template`](@ref)
Here, we use **GEEBRA**'s [`objective_function_template`](@ref) to estimate a logistic regression model using maximum likelihood and maximum penalized likelihood, with the empirical bias-reducing penalty in Kosmidis & Lunardon (2020).
```@repl 2
using GEEBRA
using Random
using Distributions
using Optim
```
A data type for logistic regression models (consisting of a response vector `y`, a model matrix `x`, and a vector of weights `m`) is
```@repl 2
struct logistic_data
y::Vector
x::Array{Float64}
m::Vector
end
```
A function to compute the number of observations from `logistic_data` objects is
```@repl 2
function logistic_nobs(data::logistic_data)
nx = size(data.x)[1]
ny = length(data.y)
nm = length(data.m)
if (nx != ny)
error("number of rows in of x is not equal to the length of y")
elseif (nx != nm)
error("number of rows in of x is not equal to the length of m")
elseif (ny != nm)
error("length of y is not equal to the length of m")
end
nx
end
```
The logistic regression log-likelihood contribution at a parameter `theta` for the ``i``th observations of data `data` is
```@repl 2
function logistic_loglik(theta::Vector,
data::logistic_data,
i::Int64)
eta = sum(data.x[i, :] .* theta)
mu = exp.(eta)./(1 .+ exp.(eta))
data.y[i] .* log.(mu) + (data.m[i] - data.y[i]) .* log.(1 .- mu)
end
```
Let's simulate some logistic regression data with $10$ covariates
```@repl 2
Random.seed!(123);
n = 100;
m = 1;
p = 10
x = Array{Float64}(undef, n, p);
x[:, 1] .= 1.0;
for j in 2:p
x[:, j] .= rand(n);
end
true_betas = randn(p) * sqrt(p);
y = rand.(Binomial.(m, cdf.(Logistic(), x * true_betas)));
my_data = logistic_data(y, x, fill(m, n));
```
and set up an `objective_function_template` for logistic regression
```@repl 2
logistic_template = objective_function_template(logistic_nobs, logistic_loglik)
```
The maximum likelihood estimates starting at `true_betas` are
```@repl 2
o1_ml = fit(logistic_template, my_data, true_betas, optim_method = NelderMead())
```
`fit` uses methods from the [**Optim**](https://github.com/JuliaNLSolvers/Optim.jl) package internally. Here, we used the `Optim.NelderMead` method. Alternative optimization methods and options can be supplied directly through the [keyword arguments](https://docs.julialang.org/en/v1/manual/functions/#Keyword-Arguments-1) `method` and `optim.Options`, respectively. For example,
```@repl 2
o2_ml = fit(logistic_template, my_data, true_betas, optim_method = LBFGS(), optim_options = Optim.Options(g_abstol = 1e-05))
```
The reduced-bias estimates starting at the maximum likelihood ones are
```@repl 2
o1_br = fit(logistic_template, my_data, coef(o1_ml), estimation_method = "RBM")
```
### Using [`estimating_function_template`](@ref)
The same results as above can be returned using an [`estimating_function_template`](@ref) for logistic regression.
The contribution to the derivatives of the log-likelihood for logistic regression is
```@repl 2
function logistic_ef(theta::Vector,
data::logistic_data,
i::Int64)
eta = sum(data.x[i, :] .* theta)
mu = exp.(eta)./(1 .+ exp.(eta))
data.x[i, :] * (data.y[i] - data.m[i] * mu)
end
```
Then, solving the bias-reducing adjusted estimating equations
```@repl 2
logistic_template_ef = estimating_function_template(logistic_nobs, logistic_ef);
e1_br = fit(logistic_template_ef, my_data, true_betas, estimation_method = "RBM")
```
returns the reduced-bias estimates from maximum penalized likelihood:
```@repl 2
isapprox(coef(o1_br), coef(e1_br))
```
### Bias-reduction methods
**GEEBRA** currently implements 2 alternative bias reduction methods, called `implicit_trace` and `explicit_trace`. `implicit_trace` will adjust the estimating functions or penalize the objectives, as we have seen earlier. `explicit_trace`, on the other hand, will form an estimate of the bias of the ``M``-estimator and subtract that from the ``M``-estimates. The default method is `implicit_trace`.
For example, for logistic regression via estimating functions
```@repl 2
e2_br = fit(logistic_template_ef, my_data, true_betas, estimation_method = "RBM", br_method = "explicit_trace")
```
which gives slightly different estimates that what are in the `implict_trace` fit in `e1_br`.
The same can be done using objective functions, but numerical differentiation (using the [FiniteDiff](https://github.com/JuliaDiff/FiniteDiff.jl) package) is used to approximate the gradient of the bias-reducing penalty (i.e. ``A(\theta)``).
```@repl 2
o2_br = fit(logistic_template, my_data, true_betas, estimation_method = "RBM", br_method = "explicit_trace")
isapprox(coef(e2_br), coef(o2_br))
```
| GEEBRA | https://github.com/ikosmidis/GEEBRA.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 616 | using DataTools
using Documenter
makedocs(;
modules=[DataTools],
authors="Takafumi Arakaki <[email protected]> and contributors",
repo="https://github.com/JuliaFolds/DataTools.jl/blob/{commit}{path}#L{line}",
sitename="DataTools.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://JuliaFolds.github.io/DataTools.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
strict = get(ENV, "CI", "false") == "true",
)
deploydocs(;
repo="github.com/JuliaFolds/DataTools.jl",
push_preview = true,
)
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 996 | module DataTools
export averaging,
firstitem,
firstitems,
inc1,
lastitem,
lastitems,
meanvar,
modifying,
nitems,
oncol,
rightif
using Base: HasLength, HasShape, IteratorSize
using InitialValues: InitialValues
using Accessors: @optic, PropertyLens, modify, set
using StaticNumbers: static
using Statistics: Statistics, mean, std, var
using Tables: Tables
using Transducers:
Composition,
Count,
IdentityTransducer,
Map,
MapSplat,
Scan,
Take,
TakeLast,
Transducers,
combine,
complete,
extract_transducer,
next,
opcompose,
reducingfunction,
right,
start
include("utils.jl")
include("oncol.jl")
include("modifying.jl")
include("reductions.jl")
include("reducers.jl")
# Use README as the docstring of the module:
@doc let path = joinpath(dirname(@__DIR__), "README.md")
include_dependency(path)
replace(read(path, String), r"^```julia"m => "```jldoctest README")
end DataTools
end
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 2019 | """
modifying(; \$property₁ = f₁, ..., \$propertyₙ = fₙ) -> g::Function
modifying(lens₁ => f₁, ..., lensₙ => fₙ) -> g::Function
Create a function that runs function `fᵢ` on the locations specified
by `propertyᵢ` or `lensᵢ`.
The keyword-only method `modifying(; a = f₁, b = f₂)` is equivalent to
`modifying(@len(_.a) => f₁, @len(_.b) => f₂)`.
The unary method `g(x)` is equivalent to
```julia
x = modify(f₁, x, lens₁)
x = modify(f₂, x, lens₂)
...
x = modify(fₙ, x, lensₙ)
```
The binary method `g(x, y)` is equivalent to
```julia
x = set(x, lens₁, f₁(lens₁(x)), lens₁(y))
x = set(x, lens₂, f₂(lens₂(x)), lens₂(y))
...
x = set(x, lensₙ, fₙ(lensₙ(x)), lensₙ(y))
```
Note that the locations that are not specified by the lenses keep the
values as in `x`. This is similar to how `mergewith` behaves.
# Examples
```jldoctest
julia> using DataTools
julia> map(modifying(a = string), [(a = 1, b = 2), (a = 3, b = 4)])
2-element Array{NamedTuple{(:a, :b),Tuple{String,Int64}},1}:
(a = "1", b = 2)
(a = "3", b = 4)
julia> reduce(modifying(a = +), [(a = 1, b = 2), (a = 3, b = 4)])
(a = 4, b = 2)
julia> using Accessors
julia> map(modifying(@optic(_.a[1].b) => x -> 10x),
[(a = ((b = 1,), 2),), (a = ((b = 3,), 4),)])
2-element Array{NamedTuple{(:a,),Tuple{Tuple{NamedTuple{(:b,),Tuple{Int64}},Int64}}},1}:
(a = ((b = 10,), 2),)
(a = ((b = 30,), 4),)
```
"""
modifying
modifying(; specs...) =
ModifyingFunction(map(((k, v),) -> PropertyLens{k}() => v, Tuple(specs)))
modifying(specs::Pair...) = ModifyingFunction(specs)
modifying(f, lens) = ModifyingFunction((f, lens))
struct ModifyingFunction{FS} <: Function
functions::FS
end
@inline (f::ModifyingFunction)(x) =
foldl(f.functions; init = x) do x, (lens, g)
@_inline_meta
modify(g, x, lens)
end
@inline (f::ModifyingFunction)(x, y) =
foldl(f.functions; init = x) do z, (lens, g)
@_inline_meta
modify(z, lens) do v
@_inline_meta
g(v, lens(y))
end
end
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 5429 | # Something like `DataFrames.combine`
# https://juliadata.github.io/DataFrames.jl/stable/man/split_apply_combine/
# https://juliadata.github.io/DataFrames.jl/stable/lib/functions/#DataFrames.select
"""
oncol(iname₁ => spec₁, ..., inameₙ => specₙ) -> f::Function
oncol(; \$iname₁ = spec₁, ..., \$inameₙ = specₙ) -> f::Function
Combine functions that work on a column and create a function that
work on an entire row.
It constructs a reducing step function acting on a table row where
`specᵢ` is either a reducing step function or a `Pair` of a reducing
step function and an output column name.
It also defines a unary function when `specᵢ` is either a unary
function or a `Pair` of a unary function and an output column name.
This function is inspired by the "`Pair` notation" in DataFrames.jl
(see also [Split-apply-combine ·
DataFrames.jl](https://juliadata.github.io/DataFrames.jl/stable/man/split_apply_combine/)
and
[`DataFrames.select`](https://juliadata.github.io/DataFrames.jl/stable/lib/functions/#DataFrames.select)).
# Examples
```jldoctest oncol
julia> using DataTools
using Transducers
julia> rf = oncol(a = +, b = *);
julia> foldl(rf, Map(identity), [(a = 1, b = 2), (a = 3, b = 4)])
(a = 4, b = 8)
julia> rf((a = 1, b = 2), (a = 3, b = 4))
(a = 4, b = 8)
julia> rf = oncol(:a => (+) => :sum, :a => max => :max);
julia> foldl(rf, Map(identity), [(a = 1,), (a = 2,)])
(sum = 3, max = 2)
julia> rf((sum = 1, max = 1), (a = 2,))
(sum = 3, max = 2)
julia> rf = oncol(:a => min, :a => max);
julia> foldl(rf, Map(identity), [(a = 2,), (a = 1,)])
(a_min = 1, a_max = 2)
julia> rf((a_min = 2, a_max = 2), (a = 1,))
(a_min = 1, a_max = 2)
julia> foldl(rf, Map(x -> (a = x,)), [5, 2, 6, 8, 3])
(a_min = 2, a_max = 8)
```
`oncol` also defines a unary function
```jldoctest oncol
julia> f = oncol(a = string);
julia> f((a = 1, b = 2))
(a = "1",)
```
Note that `oncol` does not verify the arity of input functions. If
the input functions have unary and binary methods, `oncol` is callable
with both arities:
```jldoctest oncol
julia> f((a = 1, b = 2), (a = 3, b = 4))
(a = "13",)
```
"""
oncol
struct Property{name} end
Property(p::Property) = p
Property(name::Symbol) = Property{name}()
Property(::Val{name}) where {name} = Property{name}()
@inline getprop(x, ::Property{name}) where {name} = getproperty(x, name)
@inline Base.Symbol(::Property{name}) where {name} = name::Symbol
const PropertyLike = Union{Symbol,Val,Property}
struct OnRowFunction{FS} <: Function
functions::FS
end
# :x => f (x_f = f(a.x, b.x),)
# :x => f => :y (y = f(a.x, b.x),)
# (:x, :y) => f => :z (z = f((a.x, a.y), (b.x, b.y)),) ???
@inline (f::OnRowFunction)(x) =
mapfoldl(merge, f.functions; init = NamedTuple()) do (iname, g, oname)
@_inline_meta
(; Symbol(oname) => g(getprop(x, iname)))
end
@inline (rf::OnRowFunction)(acc, x) = next(rf, acc, x)
@inline Transducers.next(rf::OnRowFunction, acc, x) =
mapfoldl(merge, rf.functions; init = NamedTuple()) do (iname, op, oname)
@_inline_meta
(; Symbol(oname) => next(op, getprop(acc, oname), getprop(x, iname)))
end
Transducers.start(rf::OnRowFunction, init) =
mapfoldl(merge, rf.functions; init = NamedTuple()) do (_, op, oname)
(; Symbol(oname) => start(op, init))
end
# TODO: dispatch on "Initializer" type instead
Transducers.start(rf::OnRowFunction, init::RowLike) =
mapfoldl(merge, rf.functions; init = NamedTuple()) do (_, op, oname)
(; Symbol(oname) => start(op, getprop(init, oname)))
end
Transducers.complete(rf::OnRowFunction, acc) =
mapfoldl(merge, rf.functions; init = NamedTuple()) do (_, op, oname)
(; Symbol(oname) => complete(op, getprop(acc, oname)))
end
Transducers.combine(rf::OnRowFunction, a, b) =
mapfoldl(merge, rf.functions; init = NamedTuple()) do (_, op, oname)
(; Symbol(oname) => combine(op, getprop(a, oname), getprop(b, oname)))
end
# TODO: define better API:
Transducers._asmonoid(rf::OnRowFunction) =
OnRowFunction(map(modifying(@optic(_[2]) => Transducers._asmonoid), rf.functions))
Transducers.Completing(rf::OnRowFunction) =
OnRowFunction(map(modifying(@optic(_[2]) => Transducers.Completing), rf.functions))
@inline process_spec_kwargs((iname, spec)::Pair) =
process_spec(Property(iname), spec, Property(iname))
@inline process_spec(name::PropertyLike) = (Property(name), identity, Property(name))
@inline process_spec((iname, spec)::Pair) = process_spec(iname, spec)
@inline process_spec(iname, (op, oname)::Pair) = process_spec(iname, op, oname)
@inline process_spec(iname, op) = process_spec(iname, op, outname(iname, op))
@inline process_spec(iname, op, oname) = (Property(iname), op, Property(oname))
outname(iname, op) = Symbol(Symbol(iname), '_', funname(op))
funname(op::ComposedFunction) = Symbol(funname(op.f), :_, funname(op.g))
function funname(op)
name = string(op)
return startswith(name, "#") ? :function : Symbol(op)
end
oncol(; specs...) =
if any(((_, spec),) -> spec isa Pair, specs)
fs = map(process_spec, Tuple(specs))
@assert allunique(map(last, fs)) # TODO: uniquify function names
OnRowFunction(fs)
else
OnRowFunction(map(process_spec_kwargs, Tuple(specs)))
end
oncol(specs::Union{Pair{<:PropertyLike},PropertyLike}...) =
OnRowFunction(map(process_spec, specs))
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 2420 | """
nitems(xs) -> n::Integer
Count number of items in `xs`. Consume `xs` if necessary.
# Examples
```jldoctest
julia> using DataTools, Transducers
julia> nitems(1:10)
10
julia> 1:10 |> Filter(isodd) |> Map(inv) |> nitems
5
```
"""
nitems
nitems(xs) =
if IteratorSize(xs) isa Union{HasLength, HasShape}
length(xs)
else
xf, coll = extract_transducer(xs)
_nitems(_pop_innermost_maplikes(xf), coll)
end
_pop_innermost_maplikes(xf) = xf
_pop_innermost_maplikes(::Union{Map,MapSplat,Scan}) = IdentityTransducer()
function _pop_innermost_maplikes(xf::Composition)
inner = _pop_innermost_maplikes(xf.inner)
if inner isa IdentityTransducer
return _pop_innermost_maplikes(xf.outer)
else
opcompose(xf.outer, inner)
end
end
_nitems(::IdentityTransducer, xs) = _nitems(xs)
_nitems(xf, xs) = xs |> xf |> _nitems
_nitems(xs) =
if IteratorSize(xs) isa Union{HasLength, HasShape}
length(xs)
else
foldl(inc1, IdentityTransducer(), xs)
end
# TODO: optimization for `Cat`.
"""
firstitem(xs)
Get the first item of `xs`. Consume `xs` if necessary.
# Examples
```jldoctest
julia> using DataTools, Transducers
julia> firstitem(3:7)
3
julia> 3:7 |> Map(x -> x + 1) |> Filter(isodd) |> firstitem
5
```
"""
firstitem
firstitem(xs::AbstractArray) = first(xs)
firstitem(xs) = foldl(right, Take(1), xs)
"""
lastitem(xs)
Get the last item of `xs`. Consume `xs` if necessary.
# Examples
```jldoctest
julia> using DataTools, Transducers
julia> lastitem(3:7)
7
julia> 3:7 |> Map(x -> x + 1) |> Filter(isodd) |> lastitem
7
```
"""
lastitem
lastitem(xs::AbstractArray) = last(xs)
lastitem(xs) = foldl(right, Map(identity), xs)
"""
firstitems(xs, n::Integer)
firstitems(n::Integer) -> xs -> firstitems(xs, n)
Get the first `n` items of `xs`. Consume `xs` if necessary.
"""
firstitems
firstitems(n::Integer) = xs -> firstitems(xs, n)
firstitems(xs, n::Integer) = collect(Take(n), xs)
firstitems(xs::AbstractArray, n::Integer) = view(xs, firstindex(xs):firstindex(xs)+n-1)
"""
lastitems(xs, n::Integer)
lastitems(n::Integer) -> xs -> lastitems(xs, n)
Get the last `n` items of `xs`. Consume `xs` if necessary.
"""
lastitems
lastitems(n::Integer) = xs -> lastitems(xs, n)
lastitems(xs, n::Integer) = collect(TakeLast(n), xs)
lastitems(xs::AbstractArray, n::Integer) = view(xs, lastindex(xs)-n+1:lastindex(xs))
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 7470 | """
inc1(n, _) -> n + 1
A reducing function for counting elements. It increments the first
argument by one.
# Examples
```jldoctest
julia> using DataTools
using Transducers
julia> inc1(10, :ignored)
11
julia> inc1(Init(inc1), :ignored)
1
julia> foldl(inc1, Map(identity), 'a':2:'e')
3
julia> foldl(TeeRF(+, inc1), Map(identity), 1:2:10) # sum and count
(25, 5)
julia> rf = oncol(:a => (+) => :sum, :a => inc1 => :count);
julia> foldl(rf, Map(identity), [(a = 1, b = 2), (a = 2, b = 3)])
(sum = 3, count = 2)
```
"""
inc1(n, _) = n + 1
Transducers.start(::typeof(inc1), ::InitializerFor{typeof(inc1)}) = 0
Transducers.combine(::typeof(inc1), a, b) = a + b
InitialValues.@def inc1 1
function merge_state end
Transducers.Completing(::typeof(merge_state)) = merge_state # TODO: remove this
@inline initialize_state(x) = x
@inline initialize_right(x) = initialize_state(x)
@inline initialize_left(x) = initialize_state(x)
const InitMergeState = InitialValues.GenericInitialValue{typeof(merge_state)}
merge_state(::InitMergeState, x) = initialize_right(x)
merge_state(x, ::InitMergeState) = initialize_left(x)
merge_state(x::InitMergeState, ::InitMergeState) = x
InitialValues.hasinitialvalue(::Type{typeof(merge_state)}) = true
"""
averaging
A reducing function for averaging elements.
# Examples
```jldoctest
julia> using DataTools
using Transducers
julia> foldl(averaging, Filter(isodd), 1:10)
5.0
julia> rf = oncol(a = averaging, b = averaging);
julia> foldl(rf, Map(identity), [(a = 1, b = 2), (a = 2, b = 3)])
(a = 1.5, b = 2.5)
```
"""
averaging
struct AverageState{Sum}
sum::Sum
count::Int
end
@inline singleton_average(x) = AverageState(x, 1)
@inline merge_state(a::AverageState, b::AverageState) =
AverageState(a.sum + b.sum, a.count + b.count)
@inline Transducers.complete(::typeof(merge_state), a::AverageState) = a.sum / a.count
const averaging = reducingfunction(Map(singleton_average), merge_state)
"""
meanvar
A reducing function for computing the mean and variance.
# Examples
```jldoctest
julia> using DataTools, Transducers, Statistics
julia> acc = foldl(meanvar, Filter(isodd), 1:96)
MeanVarState(mean=48.0, var=784.0, count=48)
julia> acc.mean, mean(acc)
(48.0, 48.0)
julia> acc.var, var(acc), var(acc, corrected = false)
(784.0, 784.0, 767.6666666666666)
julia> acc.std, std(acc)
(28.0, 28.0)
julia> acc.count
48
julia> m, v, c = acc; # destructuring works
julia> Tuple(acc) # (mean, var, count)
(48.0, 784.0, 48)
julia> NamedTuple(acc)
(mean = 48.0, var = 784.0, count = 48)
julia> rf = oncol(a = meanvar, b = meanvar);
julia> foldl(rf, Map(identity), [(a = 1, b = 2), (a = 2, b = 3)])
(a = MeanVarState(mean=1.5, var=0.5, count=2), b = MeanVarState(mean=2.5, var=0.5, count=2))
```
"""
meanvar
struct MeanVarState{Count,Mean,M2}
count::Count
mean::Mean
m2::M2
end
@inline singleton_meanvar(x) = MeanVarState(static(1), x, static(0))
# Optimization for avoiding type-changing accumulator
@inline function initialize_state(a::MeanVarState)
count, ione = promote(a.count, 1)
mean = float(a.mean)
m2, = promote(a.m2, (one(mean) * one(mean) + one(a.m2)) / (ione + ione))
return MeanVarState(count, mean, m2)
end
@inline function merge_state(a::MeanVarState, b::MeanVarState)
d = b.mean - a.mean
count = a.count + b.count
return MeanVarState(
a.count + b.count,
a.mean + d * b.count / count,
a.m2 + b.m2 + d^2 * a.count * b.count / count,
)
end
@inline Transducers.complete(::typeof(merge_state), a::MeanVarState) = a
Statistics.mean(a::MeanVarState) = a.mean
Statistics.var(a::MeanVarState; corrected::Bool = true) =
a.m2 / (corrected ? (a.count - 1) : a.count)
Statistics.std(a::MeanVarState; kw...) = sqrt(var(a; kw...))
const meanvar = reducingfunction(Map(singleton_meanvar), merge_state)
Base.propertynames(::MeanVarState) = (:mean, :var, :count)
Base.propertynames(::MeanVarState, private) =
private ? (:mean, :var, :count, :m2, :std) : (:mean, :var, :count)
@inline function Base.getproperty(a::MeanVarState, name::Symbol)
if name === :count
return getfield(a, :count)
elseif name === :mean
return getfield(a, :mean)
elseif name === :m2
return getfield(a, :m2)
elseif name === :var
return var(a)
elseif name === :std
return std(a)
else
throw(KeyError(name))
end
end
Base.IteratorEltype(::Type{<:MeanVarState}) = Base.EltypeUnknown()
Base.IteratorSize(::Type{<:MeanVarState}) = Base.HasLength()
Base.length(::MeanVarState) = 3
Base.iterate(a::MeanVarState) = (mean(a), Val(2))
Base.iterate(a::MeanVarState, ::Val{2}) = (var(a), Val(3))
Base.iterate(a::MeanVarState, ::Val{3}) = (a.count, Val(4))
Base.iterate(a::MeanVarState, ::Val{4}) = nothing
Base.NamedTuple(a::MeanVarState) = (mean = mean(a), var = var(a), count = a.count)
function Base.show(io::IO, a::MeanVarState)
if get(io, :limit, false) !== true
print(io, @__MODULE__, '.')
end
if a === MeanVarState(a.count, a.mean, a.m2)
print(io, "MeanVarState")
else
print(IOContext(io, :module => @__MODULE__), typeof(a))
end
print(io, '(')
print(io, "mean=", mean(a))
print(io, ", var=", var(a))
print(io, ", count=", a.count)
if get(io, :limit, false) !== true
print(io, ", m2=", a.m2)
end
print(io, ')')
end
# Constructor to be compatible with the `repr` (and for testing)
function (::Type{T})(; count, mean, m2 = nothing, var = nothing) where {T<:MeanVarState}
m2 === nothing && var === nothing && throw(ArgumentError("`m2` or `var` required"))
if m2 === nothing
m2 = var * (count - 1)
end
return T(count, mean, m2)
end
"""
rightif(predicate, [focus = identity]) -> op::Function
Return a binary function that keeps the first argument unless
`predicate` evaluates to `true`.
This is equivalent to
```julia
(l, r) -> predicate(focus(l), focus(r)) ? r : l
```
# Examples
```jldoctest
julia> using DataTools, Transducers
julia> table = 1:100 |> Map(x -> (k = gcd(x, 42), v = x));
julia> table |> Take(5) |> collect # preview
5-element Array{NamedTuple{(:k, :v),Tuple{Int64,Int64}},1}:
(k = 1, v = 1)
(k = 2, v = 2)
(k = 3, v = 3)
(k = 2, v = 4)
(k = 1, v = 5)
julia> foldl(rightif(<), Map(x -> x.k), table) # maximum
42
julia> foldl(rightif(>), Map(x -> x.k), table) # minimum
1
julia> foldl(rightif(<, x -> x.k), table) # first maximum
(k = 42, v = 42)
julia> foldl(rightif(<=, x -> x.k), table) # last maximum
(k = 42, v = 84)
julia> foldl(rightif(>, x -> x.k), table) # first minimum
(k = 1, v = 1)
julia> foldl(rightif(>=, x -> x.k), table) # last minimum
(k = 1, v = 97)
julia> table |> Scan(rightif(<, x -> x.k)) |> Take(5) |> collect
5-element Array{NamedTuple{(:k, :v),Tuple{Int64,Int64}},1}:
(k = 1, v = 1)
(k = 2, v = 2)
(k = 3, v = 3)
(k = 3, v = 3)
(k = 3, v = 3)
```
"""
rightif(predicate, focus = identity) = RightIf(predicate, focus)
struct RightIf{P,F} <: _Function
predicate::P
focus::F
end
RightIf(predicate::P, ::Type{F}) where {P,F} = RightIf{P,Type{F}}(predicate, F)
@inline (f::RightIf)(l, r) = f.predicate(f.focus(l), f.focus(r)) ? r : l
const InitRightIf{P,F} = InitialValues.GenericInitialValue{RightIf{P,F}}
(::RightIf)(::InitRightIf, x) = x
(::RightIf)(x, ::InitRightIf) = x
(::RightIf)(x::InitRightIf, ::InitRightIf) = x
InitialValues.hasinitialvalue(::Type{<:RightIf}) = true
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 981 | @static if VERSION < v"1.8.0-DEV.410"
using Base: @_inline_meta
else
const var"@_inline_meta" = Base.var"@inline"
end
const RowLike = Union{NamedTuple,Tables.Row,Tables.AbstractRow}
if isdefined(Base, :ComposedFunction) # Julia >= 1.6.0-DEV.85
using Base: ComposedFunction
else
const ComposedFunction = let h = identity ∘ convert
@assert h.f === identity
@assert h.g === convert
getfield(parentmodule(typeof(h)), nameof(typeof(h)))
end
@assert identity ∘ convert isa ComposedFunction
end
const GenericInitializer = Union{typeof(Transducers.Init),Transducers.InitOf}
const InitializerFor{OP} = Union{GenericInitializer,InitialValues.GenericInitialValue{OP}}
# Just like `Function` but for defining some common methods.
abstract type _Function <: Function end
# Avoid `Function` fallbacks:
@nospecialize
Base.show(io::IO, ::MIME"text/plain", f::_Function) = show(io, f)
Base.print(io::IO, f::_Function) = show(io, f)
@specialize
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 498 | module TestDataTools
using Test
@testset "$file" for file in sort([
file for file in readdir(@__DIR__) if match(r"^test_.*\.jl$", file) !== nothing
])
if file == "test_doctest.jl"
if lowercase(get(ENV, "JULIA_PKGEVAL", "false")) == "true"
@info "Skipping doctests on PkgEval."
continue
elseif VERSION >= v"1.6-"
@info "Skipping doctests on Julia $VERSION."
continue
end
end
include(file)
end
end # module
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 422 | module TestAveraging
using DataTools
using Test
using Transducers: Filter, Map
reduce_bs1(args...; kw...) = reduce(args...; basesize = 1, kw...)
@testset for fold in [foldl, reduce_bs1, reduce]
@test fold(averaging, Filter(isodd), 1:10) == 5
@test fold(
oncol(a = averaging, b = averaging),
Map(identity),
[(a = 1, b = 2), (a = 2, b = 3)],
) == (a = 1.5, b = 2.5)
end
end # module
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 158 | module TestDoctest
import DataTools
using Documenter: doctest
using Test
@testset "doctest" begin
doctest(DataTools; manual = false)
end
end # module
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 393 | module TestFirstitems
using DataTools
using Test
using Transducers
include("utils.jl")
@testset "firstitem" begin
@test firstitem(3:7) === 3
@test 3:7 |> Map(x -> x + 1) |> Filter(isodd) |> firstitem == 5
end
@testset "firstitems" begin
@test firstitems(3:7, 2) ==ₜ view(3:7, 1:2)
@test 3:7 |> Map(x -> x + 1) |> Filter(isodd) |> firstitems(2) == [5, 7]
end
end # module
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 386 | module TestLastitems
using DataTools
using Test
using Transducers
include("utils.jl")
@testset "lastitem" begin
@test lastitem(3:7) === 7
@test 3:7 |> Map(x -> x + 1) |> Filter(isodd) |> lastitem == 7
end
@testset "lastitems" begin
@test lastitems(3:7, 2) ==ₜ view(3:7, 4:5)
@test 3:7 |> Map(x -> x + 1) |> Filter(isodd) |> lastitems(2) == [5, 7]
end
end # module
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 2695 | module TestMeanVar
using DataTools
using DataTools: MeanVarState
using Statistics
using Test
using Transducers: Filter, Map, TeeRF
include("utils.jl")
reduce_bs1(args...; kw...) = reduce(args...; basesize = 1, kw...)
@testset for fold in [foldl, reduce_bs1, reduce]
foldl = reduce = nothing
@testset "accessors" begin
s = fold(meanvar, Filter(isodd), 1:16)
m, v, c = s
@test Tuple(s) == (m, v, c) == (8.0, 24.0, 8)
@test NamedTuple(s) == (mean = m, var = v, count = c)
s = fold(meanvar, Filter(isodd), 1:10)
@test mean(s) === mean(1:2:9) === 5.0
@test var(s) === var(1:2:9) === var(s; corrected = true) === 10.0
@test var(s; corrected = false) === var(1:2:9; corrected = false) === 8.0
s = fold(meanvar, Filter(isodd), 1:96)
@test mean(s) === mean(1:2:95) === 48.0
@test var(s) === var(1:2:95) === 784.0
@test std(s) === std(1:2:95) === 28.0
end
@testset "TeeRF" begin
s1, s2 = fold(
TeeRF(Filter(isodd)'(meanvar), Filter(iseven)'(meanvar)),
Map(identity),
1:96,
)
@test s1 isa MeanVarState
@test s2 isa MeanVarState
@test mean(s1) === 48.0
@test mean(s2) === 49.0
@test var(s1) === 784.0
@test var(s2) === 784.0
end
@testset "oncol" begin
snt = fold(
oncol(odd = meanvar, even = meanvar),
Map(x -> (odd = 2x - 1, even = 2x)),
1:48,
)
@test snt.odd isa MeanVarState
@test snt.even isa MeanVarState
@test mean(snt.odd) === 48.0
@test mean(snt.even) === 49.0
@test var(snt.odd) === 784.0
@test var(snt.even) === 784.0
end
end
@testset "show and constructor" begin
@testset "default type parameters" begin
s = MeanVarState(mean = 8.0, count = 8, m2 = 168.0)
@test s === MeanVarState(mean = 8.0, var = 24.0, count = 8)
str = sprint(show, s; context = :limit => true)
@test str == "MeanVarState(mean=8.0, var=24.0, count=8)"
str = sprint(show, s; context = :limit => false)
@test str == "DataTools.MeanVarState(mean=8.0, var=24.0, count=8, m2=168.0)"
end
@testset "non-default type parameters" begin
s = MeanVarState{Any,Any,Any}(mean = 8.0, var = 24.0, count = 8)
str = sprint(show, s; context = :limit => true)
@test str ==ᵣ "MeanVarState{Any,Any,Any}(mean=8.0, var=24.0, count=8)"
str = sprint(show, s; context = :limit => false)
@test str ==ᵣ
"DataTools.MeanVarState{Any,Any,Any}(mean=8.0, var=24.0, count=8, m2=168.0)"
end
end
end # module
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 658 | module TestModifying
using DataTools
using Accessors: @optic
using Test
using Transducers
reduce_bs1(args...; kw...) = reduce(args...; basesize = 1, kw...)
@testset for fold in [foldl, reduce_bs1, reduce]
@test fold(modifying(a = +), Map(identity), [(a = 1, b = 2), (a = 3, b = 4)]) ==
(a = 4, b = 2)
end
@testset "map" begin
@test map(modifying(a = string), [(a = 1, b = 2), (a = 3, b = 4)]) ==
[(a = "1", b = 2), (a = "3", b = 4)]
@test map(
modifying(@optic(_.a[1].b) => x -> 10x),
[(a = ((b = 1,), 2),), (a = ((b = 3,), 4),)],
) == [(a = ((b = 10,), 2),), (a = ((b = 30,), 4),)]
end
end # module
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 887 | module TestNItems
using DataTools
using Test
using Transducers
using Transducers: IdentityTransducer
@testset "_pop_innermost_maplikes" begin
pop(args...) = DataTools._pop_innermost_maplikes(opcompose(args...))
@test pop(Map(inv)) === IdentityTransducer()
@test pop(MapSplat(tuple), Map(inv)) === IdentityTransducer()
@test pop(Filter(isodd), MapSplat(tuple), Map(inv)) === Filter(isodd)
@test pop(Map(isodd), Filter(isodd), MapSplat(tuple), Map(inv)) ===
opcompose(Map(isodd), Filter(isodd))
end
@testset "nitems" begin
@test nitems(1:10) == 10
@test nitems(error(x) for x in 1:10) == 10
@test 1:10 |> Map(error) |> MapSplat(error) |> Scan(+) |> nitems == 10
@test 1:10 |> Filter(isodd) |> Map(error) |> MapSplat(error) |> nitems == 5
@test 1:10 |> Filter(isodd) |> Map(x -> x ÷ 3) |> Filter(isodd) |> nitems == 3
end
end # module
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 693 | module TestOncol
using DataTools
using Test
using Transducers
reduce_bs1(args...; kw...) = reduce(args...; basesize = 1, kw...)
@testset begin
@test oncol(a = +, b = *)((a = 1, b = 2), (a = 3, b = 4)) == (a = 4, b = 8)
@test oncol(:a => (+) => :sum, :a => max => :max)((sum = 1, max = 1), (a = 2,)) ==
(sum = 3, max = 2)
@test oncol(:a => min, :a => max)((a_min = 2, a_max = 2), (a = 1,)) ==
(a_min = 1, a_max = 2)
end
@testset for fold in [foldl, reduce_bs1, reduce]
@test fold(
oncol(a = +, b = averaging),
Filter(x -> isodd(x.a)),
[(a = 1, b = 7), (a = 2, b = 3), (a = 3, b = 4)],
) == (a = 4, b = 5.5)
end
end # module
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 461 | module TestRightIf
using DataTools
using Test
using Transducers: Map, Take
reduce_bs1(args...; kw...) = reduce(args...; basesize = 1, kw...)
@testset for fold in [foldl, reduce_bs1, reduce]
foldl = nothing
table = 43:100 |> Map(x -> (k = gcd(x, 42), v = x))
@test fold(rightif(<), Map(x -> x.k), table) == 42
@test fold(rightif(>), Map(x -> x.k), table) == 1
@test fold(rightif(<, x -> x.k), table) == (k = 42, v = 84)
end
end # module
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | code | 506 | """
==ₜ(x, y)
Check that _type_ and value of `x` and `y` are equal.
"""
==ₜ(_, _) = false
==ₜ(x::T, y::T) where T = x == y
"""
==ₛ(a::AbstractString, b::AbstractString)
Equality check ignoring white spaces
"""
==ₛ(a::AbstractString, b::AbstractString) =
replace(a, r"\s" => "") == replace(b, r"\s" => "")
"""
==ᵣ(a::AbstractString, b::AbstractString)
Equality check appropriate for comparing `repr` output.
"""
==ᵣ
if VERSION >= v"1.6-"
const ==ᵣ = ==ₛ
else
const ==ᵣ = ==
end
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | docs | 1142 | # DataTools: manipulating flat tables and nested data structures using Transducers.jl
[](https://juliafolds.github.io/DataTools.jl/dev)
[](https://github.com/JuliaFolds/DataTools.jl/actions?query=workflow%3A%22Run+tests%22)
```julia
julia> using DataTools: oncol, modifying, averaging
julia> using Transducers: Filter
julia> data = [(a = 1, b = 7), (a = 2, b = 3), (a = 3, b = 4)];
julia> rf = oncol(a = +, b = averaging);
julia> foldl(rf, Filter(x -> isodd(x.a)), data)
(a = 4, b = 5.5)
julia> map(modifying(a = string), data)
3-element Array{NamedTuple{(:a, :b),Tuple{String,Int64}},1}:
(a = "1", b = 7)
(a = "2", b = 3)
(a = "3", b = 4)
julia> reduce(modifying(a = +), data)
(a = 6, b = 7)
julia> using Accessors: @optic
julia> data = [(a = ((b = 1,), 2),), (a = ((b = 3,), 4),)];
julia> map(modifying(@optic(_.a[1].b) => x -> 10x), data)
2-element Array{NamedTuple{(:a,),Tuple{Tuple{NamedTuple{(:b,),Tuple{Int64}},Int64}}},1}:
(a = ((b = 10,), 2),)
(a = ((b = 30,), 4),)
```
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 0.1.5 | e7b4d9e05344a6d24a3d50549e3e1bbf3e309e4c | docs | 107 | ```@meta
CurrentModule = DataTools
```
# DataTools
```@index
```
```@autodocs
Modules = [DataTools]
```
| DataTools | https://github.com/JuliaFolds/DataTools.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | code | 682 | using Continuables
using Documenter
makedocs(;
modules=[Continuables],
authors="Stephan Sahm <[email protected]> and contributors",
repo="https://github.com/jolin-io/Continuables.jl/blob/{commit}{path}#L{line}",
sitename="Continuables.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://jolin-io.github.io/Continuables.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Manual" => "manual.md",
"Benchmark" => "benchmark.md",
"Library" => "library.md",
],
)
deploydocs(;
repo="github.com/jolin-io/Continuables.jl",
devbranch="main",
)
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | code | 22943 | """
A Continuable is a function which takes a single argument function a -> b
We could have made most of the core functionality work for general functions a... -> b
however the semantics become too complicated very soon.
For instance for product. How should the product look like? say we have two continuables and
name the continuation's arguments a... and b... respectively.
How to call the follow up continuation then?
cont(a..., b...) or cont(a, b)?
Probably something like if isa(a and b, SingletonTuple) then cont(a...,b...)
else cont(a,b). However this seems to complicate things unnecessarily complex.
Another point is for example the interaction with iterables which always deliver tuples.
See also this github issue https://github.com/JuliaLang/julia/issues/6614 for tuple deconstructing which
would give a similar familarity.
Many documentation strings are taken and adapted from
https://github.com/JuliaCollections/Iterators.jl/blob/master/src/Iterators.jl.
"""
# TODO copy documentation strings
module Continuables
export
cont, @cont, AbstractContinuable, Continuable, innerfunctype,
@Ref, stoppable, stop,
emptycontinuable, singleton, repeated, iterated,
aschannel, ascontinuable, i2c, @i2c,
reduce, reduce!, zip, product, chain, flatten, cycle, foreach, map, all, any, sum, prod,
take, takewhile, drop, dropwhile, partition, groupbyreduce, groupby,
nth
using ExprParsers
using DataTypesBasic
using OrderedCollections
import Base.Iterators: cycle, flatten, take, drop, partition, product
include("utils.jl")
include("itertools.jl")
## Continuable Core -------------------------------------------------------------------------
"""
`cont` is reserved function parameter name
"""
cont(args...; kwargs...) = error("`cont` is reserved to be used within `Continuables.@cont`")
"""
AbstractContinuable
Abstract type which all continuable helper functions use for dispatch.
The interface for a continuable just consists of
```
Base.foreach(cont, continuable::YourContinuable)
```
For julia 1.0 you further need to provide
```
(continuable::YourContinuable)(cont) = foreach(cont, continuable)
```
which is provided automatically for more recent julia versions.
"""
abstract type AbstractContinuable end
"""
foreach(func, continuable)
Runs the continuable with `func` as the continuation. This is the core interface of a continuable.
It is especially handy when using `do` syntax.
"""
function Base.foreach(cont, ::AbstractContinuable)
error("You need to specialize `Base.foreach` for your custom Continuable type.")
end
"""
Continuable(func)
Assumes func to have a single argument, the continuation function (usually named `cont`).
Example
-------
```
Continuable(function (cont)
for i in 1:10
cont(i)
end
end)
```
"""
struct Continuable{Func} <: AbstractContinuable
f::Func
# making sure typevar `Func` has always the correct meaning so that we can dispatch on it
# otherwise one could construct `Continuable{Any}(func)` which would break dispatching on the typevariable.
Continuable(f) = new{typeof(f)}(f)
end
Base.foreach(cont, c::Continuable) = c.f(cont)
(c::Continuable)(cont) = foreach(cont, c)
"""
innerfunctype(continuable)
Returns the generic type of `continuable.f`.
This can be used to specialize functions on specific Continuables, like `Base.IteratorSize` and else.
# Examples
```julia
mycont(x) = @cont cont(x)
Base.length(::Continuable{<:innerfunctype(mycont(:examplesignature))}) = :justatest
length(mycont(1)) == :justatest
length(mycont("a")) == :justatest
# other continuables are not affected
anothercont(x) = @cont cont(x)
length(anothercont(42)) == 1
```
"""
function innerfunctype(continuable::Continuable)
typeof(continuable.f).name.wrapper
end
include("./syntax.jl") # parts of syntax.jl require the Continuable definition, hence here
## conversions ----------------------------------------------
"""
aschannel(continuable) -> Channel
Convert the continuable into a channel. Performance is identical compared to when you would have build a Channel
directly.
"""
function aschannel(continuable::AbstractContinuable, size=0; elemtype=Any, taskref=nothing, spawn=false)
Channel{elemtype}(size, taskref=taskref, spawn=spawn) do channel
continuable() do x
put!(channel, x)
end
end
end
"""
ascontinuable(iterable)
Converts an iterable to a Continuable. There should not be any performance loss.
"""
ascontinuable(iterable) = @cont foreach(cont, iterable)
"""
i2c(iterable)
Alias for [`ascontinuable`](@ref). "i2c" is meant as abbreviation for "iterable to continuable".
Also comes in macro form as [`@i2c`](@ref).
"""
const i2c = ascontinuable
"""
@i2c iterable
Alias for [`ascontinuable`](@ref). "i2c" is meant as abbreviation for "iterable to continuable".
Also comes in function form as [`i2c`](@ref).
"""
macro i2c(expr)
esc(:(ascontinuable($expr)))
end
## factories -----------------------------------------------
"""
emptycontinuable
The continuable with no elements.
"""
const emptycontinuable = @cont () # mind the space!!
"""
singleton(value)
Construct a Continuable containing only the one given value.
"""
singleton(value) = @cont cont(value)
"""
repeated(() -> 2[, n])
repeated([n]) do
# ...
"returnvalue"
end
Constructs a Continuable which repeatedly yields the returnvalue from calling the given function again and again.
Until infinity or if `n` is given, exactly `n` times.
"""
@cont function repeated(f)
while true
cont(f())
end
end
@cont function repeated(f, n::Integer)
for _ in 1:n
cont(f())
end
end
"""
iterated((x) -> x*x, startvalue)
iterated(startvalue) do x
# ...
x*x
end
Constructs an infinite Continuable which
"""
@cont @Ref function iterated(f, x)
a = Ref(x)
cont(a)
while true
a = f(a)
cont(a)
end
end
## core helpers ----------------------------------------------------------
# Continuable cannot implement Base.iterate efficiently
try
# only in more recent julia, we can specialize call syntax for AbstractTypes
(c::AbstractContinuable)(cont) = foreach(cont, c)
catch
end
Base.IteratorSize(::AbstractContinuable) = Base.SizeUnknown()
@Ref function Base.length(continuable::AbstractContinuable)
i = Ref(0)
continuable() do _
i += 1
end
i
end
Base.IteratorEltype(::AbstractContinuable) = Base.EltypeUnknown()
Base.eltype(::AbstractContinuable) = Any
"""
collect(continuable[, n]) -> Vector
Constructs Vector out of the given Continuable. If also given the length `n` explicitly,
a Vector of respective is preallocated. IMPORTANTLY `n` needs to be the true length of the continuable.
Smaller `n` will result in error.
"""
function Base.collect(c::AbstractContinuable)
everything = Vector(undef, 0)
reduce!(push!, c, init = everything)
end
@Ref function Base.collect(c::AbstractContinuable, n)
a = Vector(undef, n)
# unfortunately the nested call of enumerate results in slower code, hence we have a manual index here
# this is so drastically that for small `n` a preallocate version with enumerate would be slower than the non-preallocate version
i = Ref(1)
c() do x
a[i] = x
i += 1
end
a
end
"""
enumerate(continuable)
Constructs new Continuable with elements `(i, x)` for each `x` in the continuable, where `i` starts at `1` and
increments by `1` for each element.
"""
@cont @Ref function Base.enumerate(continuable::AbstractContinuable)
i = Ref(1)
continuable() do x
cont((i, x))
i += 1
end
end
"""
map(func, continuable)
Constructs new Continuable where the given `func` was applied to each element.
"""
Base.map(func, continuable::AbstractContinuable) = @cont continuable(x -> cont(func(x)))
"""
filter(predicate, continuable)
Constructs new Continuable where only elements `x` with `predicate(x) == true` are kept.
"""
@cont function Base.filter(bool, continuable::AbstractContinuable)
continuable() do x
if bool(x)
cont(x)
end
end
end
"""
reduce(operator, continuable; [init])
Like Base.reduce this will apply `operator` iteratively to combine all elements into one accumulated result.
"""
Base.reduce(op, continuable::AbstractContinuable; init = nothing) = foldl_continuable(op, continuable, init)
"""
reduce(operator, continuable; [init])
Like Base.foldl this will apply `operator` iteratively to combine all elements into one accumulated result.
The order is guaranteed to be left to right.
"""
Base.foldl(op, continuable::AbstractContinuable; init = nothing) = foldl_continuable(op, continuable, init)
struct EmptyStart end
@Ref function foldl_continuable(op, continuable, init::Nothing)
acc = Ref{Any}(EmptyStart())
lifted_op(acc::EmptyStart, x) = x
lifted_op(acc, x) = op(acc, x)
continuable() do x
acc = lifted_op(acc, x)
end
acc
end
@Ref function foldl_continuable(op, continuable, init)
acc = Ref(init)
continuable() do x
acc = op(acc, x)
end
acc
end
"""
reduce!(op!, continuable; [init])
Mutating version of Base.reduce
If no `init` is given
`op!` is assumed to mutate a hidden state (equivalent to mere continuation)
else
`init` is the explicit state and will be passed to `op!` as first argument (the accumulator)
"""
function reduce!(op!, continuable::AbstractContinuable; init = nothing)
if isnothing(init)
continuable(op!)
else
reduce!(op!, continuable, init)
end
end
function reduce!(op!, continuable::AbstractContinuable, acc)
continuable() do x
op!(acc, x)
end
acc
end
"""
sum(continuable)
sums up all elements
"""
Base.sum(c::AbstractContinuable) = reduce(+, c)
"""
sum(continuable)
multiplies up all elements
"""
Base.prod(c::AbstractContinuable) = reduce(*, c)
"""
all([func, ]continuable; [lazy])
Checks whether all elements in the continuable are true.
If a function `func` is given, it is first applied to the elements before comparing for truth.
If `lazy=true` (default) the Continuable will only be evaluated until the first `false` value.
Elseif `lazy=false` all elements of the Continuable will be combined.
"""
@Ref function Base.all(continuable::AbstractContinuable; lazy=true)
if lazy
stoppable(continuable, true) do b
if !b
stop(false)
end
end
else # non-lazy
b = Ref(true)
continuable() do x
b &= x
end
b
end
end
Base.all(f, continuable::AbstractContinuable; kwargs...) = all(map(f, continuable); kwargs...)
"""
any([func, ]continuable; [lazy])
Checks whether at least one element in the continuable is true.
If a function `func` is given, it is first applied to the elements before comparing for truth.
If `lazy=true` (default) the Continuable will only be evaluated until the first `true` value.
Elseif `lazy=false` all elements of the Continuable will be combined.
"""
@Ref function Base.any(continuable::AbstractContinuable; lazy=true)
if lazy
stoppable(continuable, false) do b
if b
stop(true)
end
end
else # non-lazy
b = Ref(false)
continuable() do x
b |= x
end
b
end
end
Base.any(f, continuable::AbstractContinuable; kwargs...) = any(map(f, continuable); kwargs...)
## zip ----------------------------
# zip is the only method which seems to be unimplementable with continuations
# hence we have to go to tasks or arrays
"""
azip(continuables...)
Zipping continuables via intermediate array representation
CAUTION: loads everything into memory
"""
azip(cs::AbstractContinuable...) = @cont begin
# not possible with continuations... bring it to memory and apply normal zip
array_cs = collect.(cs)
for t in zip(array_cs...)
cont(t)
end
end
"""
chzip(continuables...)
Zipping continuables via Channel
"""
chzip(cs::AbstractContinuable...) = @cont begin
# or use aschannel and iterate
channel_cs = aschannel.(cs)
for t in zip(channel_cs...)
cont(t)
end
end
"""
zip(continuables...; [lazy])
Constructs new Continuable with elements from the given continuables zipped up.
I.e. will yield for each position in the original continuables a tuple `(x, y, ...)`
where `x`, `y`, ... are the elements from `continuables` at the same position respectively.
If `lazy=true` (default), it will use Channels to do the zipping.
Elseif `lazy=false`, it will use Arrays instead.
!!! warning CAUTION
`zip` on Continuables is not performant, but will fallback to either Channels (`lazy=true`, default) which are
very slow, or Arrays (`lazy=false`) which will load everything into Memory.
"""
function Base.zip(cs::AbstractContinuable...; lazy=true)
if lazy
chzip(cs...)
else
azip(cs...)
end
end
"""
cycle(continuable[, n])
Constructs new Continuable which loops through the given continuable.
If `n` is given, it will loop `n` times, otherwise endlessly.
"""
cycle(continuable::AbstractContinuable) = @cont while true
continuable(cont)
end
cycle(continuable::AbstractContinuable, n::Integer) = @cont for _ in 1:n
continuable(cont)
end
## combine continuables --------------------------------------------
# IMPORTANT we cannot overload the empty product as it would conflict with iterables
"""
product(continuables...)
Construct a new Continuable which yields all combinations of the given continuables, analog
to how Iterators.product work for iterables.
Mind that `product()` will still return an empty iterator instead of an empty Continuable.
Use [`emptycontinuable`](@ref) instead if you need an empty Continuable.
"""
product(c1::AbstractContinuable) = c1
# note this function in fact returns a continuable, however it is written as highlevel as that no explicit "f(...) = cont -> begin ... end" is needed
product(c1::AbstractContinuable, c2::AbstractContinuable) = @cont begin
c1() do x
c2() do y
cont((x,y))
end
end
end
# this method is underscored because we assume the first continuation to deliver tuples and not values
_product(c1::AbstractContinuable, c2::AbstractContinuable) = @cont begin
c1() do t
c2() do x
cont(tuple(t..., x))
end
end
end
@Ref function product(c1::AbstractContinuable, c2::AbstractContinuable, cs::Vararg{<:AbstractContinuable})
acc = Ref{Any}(product(c1, c2)) # make first into singleton tuple to start recursion
for continuable in cs
acc = _product(acc, continuable)
end
acc
end
"""
flatten(continuable_of_continuables)
Constructs new Continuable by concatinating all continuables in the given `continuable_of_continuables`.
Analog to Iterators.flatten.
For iterables of continuable use `Continuables.chain(iterable_of_continuables...)` instead.
"""
@cont function flatten(continuable::AbstractContinuable)
continuable() do subcontinuable
subcontinuable(cont)
end
end
"""
chain(continuables...)
chain(iterables...) = flatten(iterables)
When given Continuables it will construct a new continuable by concatinating all given continuables.
When given anything else it will default to use `Iterator.flatten`.
"""
chain(iterables::Vararg) = flatten(iterables)
chain(cs::Vararg{<:AbstractContinuable}) = @cont begin
for continuable in cs
continuable(cont)
end
end
# --------------------------
"""
take(continuable, n)
take(n, continuable)
Construct a new Continuable which only yields the first `n` elements.
`n` can be larger as the total length, no problem.
"""
@cont @Ref function take(continuable::AbstractContinuable, n::Integer)
i = Ref(0)
stoppable(continuable) do x
i += 1
if i > n
stop()
end
cont(x)
end
end
take(n::Integer, continuable::AbstractContinuable) = take(continuable, n)
"""
takewhile(predicate, continuable)
takewhile(predicate, iterable)
If given a Continuable, it constructs a new Continuable yielding elements until `predicate(element)` returns `false`.
Also implements a respective functionality for iterables for convenience.
"""
takewhile(bool, iterable) = TakeWhile(bool, iterable)
@cont function takewhile(bool, continuable::AbstractContinuable)
stoppable(continuable) do x
if !bool(x)
stop()
end
cont(x)
end
end
"""
drop(continuable, n)
drop(n, continuable)
Construct a new Continuable which yields all elements but the first `n`.
`n` can be larger as the total length, no problem.
"""
@cont @Ref function drop(continuable::AbstractContinuable, n::Integer)
i = Ref(0)
continuable() do x
i += 1
if i > n
cont(x)
end
end
end
drop(n::Integer, continuable::AbstractContinuable) = drop(continuable, n)
"""
dropwhile(predicate, continuable)
dropwhile(predicate, iterable)
If given a Continuable, it constructs a new Continuable yielding elements until `predicate(element)` returns `true`.
Also implements a respective functionality for iterables for convenience.
"""
dropwhile(bool, iterable) = DropWhile(bool, iterable)
@cont @Ref function dropwhile(bool, continuable::AbstractContinuable)
dropping = Ref(true)
continuable() do x
if dropping
dropping &= bool(x)
# without nested "if" statement we would have to use two separate if statements at the top (instead of using if else)
!dropping && cont(x)
else
cont(x)
end
end
end
"""
partition(continuable, n[, step])
Constructs new Continuable which yields whole subsections of the given continuable, gathered as Vectors.
`n` is the length of a subsection. The very last subsection might be of length `n` or smaller respectively, collecting
the remaining elements.
If `step` is given, the second subsection is exactly `step`-number of elements apart from the previous subsection,
and hence overlapping if `n > step`.
Further, importantly, if `step` is given, there is no rest, but each subsection will be guaranteed to have the same
length. This semantics is copied from [IterTools.jl](https://juliacollections.github.io/IterTools.jl/latest/#partition(xs,-n,-[step])-1)
# Examples
```jldoctest
julia> using Continuables
julia> partition(i2c(1:10), 3) |> collect
4-element Array{Any,1}:
Any[1, 2, 3]
Any[4, 5, 6]
Any[7, 8, 9]
Any[10]
julia> partition(i2c(1:10), 5, 2) |> collect
3-element Array{Any,1}:
Any[1, 2, 3, 4, 5]
Any[3, 4, 5, 6, 7]
Any[5, 6, 7, 8, 9]
julia> partition(i2c(1:10), 3, 3) |> collect
3-element Array{Any,1}:
Any[1, 2, 3]
Any[4, 5, 6]
Any[7, 8, 9]
```
"""
@cont @Ref function partition(continuable::AbstractContinuable, n::Integer)
i = Ref(1)
part = Ref(Vector(undef, n))
continuable() do x
part[i] = x
i += 1
if i > n
cont(part)
part = Vector(undef, n)
i = 1
end
end
# final bit # TODO is this wanted? with additional step parameter I think this is mostly unwanted
if i > 1
# following the implementation for iterable, we cut the length to the defined part
cont(_takewhile_isassigned(part))
end
end
@cont @Ref function partition(continuable::AbstractContinuable, n::Integer, step::Integer)
i = Ref(0)
n_overlap = n - step
part = Ref(Vector(undef, n))
continuable() do x
i += 1
if i > 0 # if i is negative we simply skip these
part[i] = x
end
if i == n
cont(part)
if n_overlap > 0
overlap = part[1+step:n]
part = Vector(undef, n)
part[1:n_overlap] = overlap
else
# we need to recreate new part because of references
part = Vector(undef, n)
end
i = n_overlap
end
end
end
function _takewhile_isassigned(vec::Vector)
n = length(vec)
for i in 1:n
if !isassigned(vec, i)
return vec[1:(i-1)]
end
end
return vec
end
"""
groupbyreduce(by, continuable, op2[, op1])
groupbyreduce(by, iterable, op2[, op1])
Group elements and returns OrderedDict of keys (constructed by `by`) and values (aggregated with `op2`/`op1`)
If given anything else then a continuable, we interpret it as an iterable and provide the same functionality.
# Parameters
by: function of element to return the key for the grouping/dict
continuable: will get grouped
op2: f(accumulator, element) = new_accumulator
op1: f(element) = initial_accumulator
# Examples
```jldoctest
julia> using Continuables
julia> groupbyreduce(x -> x % 4, @i2c(1:10), (x, y) -> x + y)
OrderedCollections.OrderedDict{Any,Any} with 4 entries:
1 => 15
2 => 18
3 => 10
0 => 12
julia> groupbyreduce(x -> x % 4, @i2c(1:10), (x, y) -> x + y, x -> x+5)
OrderedCollections.OrderedDict{Any,Any} with 4 entries:
1 => 20
2 => 23
3 => 15
0 => 17
```
"""
function groupbyreduce(by, continuable::AbstractContinuable, op2, op1=identity)
d = OrderedDict()
continuable() do x
key = by(x)
if key in keys(d)
d[key] = op2(d[key], x)
else
d[key] = op1(x)
end
end
d
end
# adding iterable versions for the general case (tests showed that these are actually compiling to the iterable version in terms of code and speed, awesome!)
groupbyreduce(by, iterable, op2, op1=identity) = groupbyreduce(by, ascontinuable(iterable), op2, op1)
"""
groupby(f, continuable)
groupby(f, iterable)
Wrapper around the more general [`groupbyreduce`](@ref) which combines elements to a Vector.
If you happen to aggregate your resulting grouped Vectors, think about using `groupbyreduce` directly, as
this can massively speed up aggregations.
Note that the interface is different from `IterTools.groupby`, as we directly return an OrderedDict
(instead of a iterable of values).
# Examples
```jldoctest
julia> using Continuables
julia> groupby(x -> x % 4, @i2c 1:10)
OrderedCollections.OrderedDict{Any,Any} with 4 entries:
1 => [1, 5, 9]
2 => [2, 6, 10]
3 => [3, 7]
0 => [4, 8]
```
"""
groupby(f, continuable::AbstractContinuable) = groupbyreduce(f, continuable, push!, x -> [x])
groupby(f, iterable) = groupby(f, ascontinuable(iterable))
## subsets & peekiter -------------------------------------------------------
# subsets seem to be implemented for arrays in the first place (and not iterables in general)
# hence better use IterTools.subsets directly
# peekiter is the only method of Iterators.jl missing. However it in fact makes no sense for continuables
# as they are functions and don't get consumed
## extract values from continuables ----------------------------------------
"""
nth(continuable, n)
nth(n, continuable)
Extracts the `n`th element from the given continuable.
# Examples
```jldoctest
julia> using Continuables
julia> nth(i2c(4:10), 3)
6
julia> nth(1, i2c(4:10))
4
```
"""
@Ref function nth(continuable::AbstractContinuable, n::Integer)
i = Ref(0)
ret = stoppable(continuable) do x
i += 1
if i==n
# CAUTION: we cannot use return here as usual because this is a subfunction. Return works here more like continue
stop(x)
end
end
if ret === nothing
error("given continuable has length $i < $n")
end
ret
end
nth(n::Integer, continuable::AbstractContinuable) = nth(continuable, n)
end # module
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | code | 1664 | # to be a drop in replacement we need to support iterables
struct TakeWhile{Func, Iter}
f::Func
iter::Iter
end
function Base.iterate(tw::TakeWhile)
(nextval, nextstate) = @ifsomething Base.iterate(tw.iter)
tw.f(nextval) ? (nextval, nextstate) : nothing
end
function Base.iterate(tw::TakeWhile, state)
(nextval, nextstate) = @ifsomething Base.iterate(tw.iter, state)
tw.f(nextval) ? (nextval, nextstate) : nothing
end
# to be a drop in replacement we need to support iterables
struct DropWhile{Func, Iter}
f::Func
iter::Iter
end
function Base.iterate(dw::DropWhile)
(nextval, nextstate) = @ifsomething Base.iterate(dw.iter)
while dw.f(nextval)
(nextval, nextstate) = @ifsomething Base.iterate(dw.iter, nextstate)
end
(nextval, nextstate)
end
Base.iterate(dw::DropWhile, state) = Base.iterate(dw.iter, state)
# IteratorEltype and IteratorSize can be defined for both simultanuously
const DropWhile_or_TakeWhile{F, Iter} = Union{DropWhile{F, Iter}, TakeWhile{F, Iter}}
const TypeDropWhile_or_TypeTakeWhile{F, Iter} = Union{Type{DropWhile{F, Iter}}, Type{TakeWhile{F, Iter}}}
Base.IteratorEltype(::TypeDropWhile_or_TypeTakeWhile{F, Iter}) where {F, Iter} = Base.IteratorEltype(Iter)
Base.eltype(::TypeDropWhile_or_TypeTakeWhile{F, Iter}) where {F, Iter} = Base.eltype(Iter)
# defaulting to Base.SizeUnknown
function Base.IteratorSize(::TypeDropWhile_or_TypeTakeWhile)
Base.SizeUnknown()
end
function Base.length(tw::DropWhile_or_TakeWhile)
s::Int = 0
for x in tw
s += 1
end
s
end
function Base.collect(tw::DropWhile_or_TakeWhile)
everything = []
for x in tw
push!(everything, x)
end
everything
end
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | code | 6145 | using ExprParsers
using DataTypesBasic
using SimpleMatch
# @Ref
# ====
# TODO also replace `::Ref` annotations. Currently only `r = Ref(2)` assignments are replaced.
const _parser = EP.AnyOf(EP.NestedDot(), EP.Function())
# entrypoint - start with empty list of substitutions
refify!(expr::Expr) = refify!(expr, Vector{Symbol}())
# map Expr to Parsers
refify!(any, ::Vector{Symbol}) = () # if there is no expression (or Vector, see below), we cannot refify anything
refify!(expr::Expr, Refs::Vector{Symbol}) = refify!(expr, Refs, @TryCatch EP.ParseError parse_expr(_parser, expr))
# if specific parser was detected, then dispatch directly on Parsed result
refify!(expr::Expr, Refs::Vector{Symbol}, parsed::Identity{P}) where P = refify!(expr, Refs, parsed.value)
# Specific Parsers
function refify!(expr::Expr, Refs::Vector{Symbol}, nesteddot_parsed::EP.NestedDot_Parsed)
# refify only most left dot expression `nesteddot_parsed.base`, i.e. the object which is originally accessed
@match(nesteddot_parsed.base) do f
# if `nesteddot_parsed.base` is a Symbol, we cannot do in-place replacement with refify! but can only changed the parsed result
f(_) = nothing
f(s::Symbol) = nesteddot_parsed.base = refify_symbol(s, Refs)
f(e::Expr) = refify!(e, Refs)
end
# as not everything could be replaced inplace, we still have to inplace-replace the whole parsed expression
newexpr = to_expr(nesteddot_parsed)
expr.head = newexpr.head
expr.args = newexpr.args
end
function refify!(expr::Expr, Refs::Vector{Symbol}, function_parsed::EP.Function_Parsed)
# when going into a function, we need to ignore the function parameter names from Refs as they are new variables, not related to the Refs
args = [parse_expr(EP.Arg(), arg) for arg in function_parsed.args]
kwargs = [parse_expr(EP.Arg(), kwarg) for kwarg in function_parsed.kwargs]
# recurse into any default arguments
for arg in [args; kwargs]
@match(arg.default) do f
f(_) = nothing
# in case of Symbol we can only change the parsed result in place, but not the original expression
f(s::Symbol) = arg.default = refify_symbol(s, Refs)
f(e::Expr) = refify!(e, Refs)
end
end
# recurse into body with function arguments not being refified
args_names = [arg.name for arg in args if arg.name != nothing]
kwargs_names = [kwarg.name for kwarg in kwargs if kwarg.name != nothing]
# CAUTION: we need to use Base.filter so that we can still overwrite filter in the module
Refs::Vector{Symbol} = Base.filter(ref -> ref ∉ args_names && ref ∉ kwargs_names, Refs)
refify!(function_parsed.body, Refs)
# some parts might not have been replaced-inplace in the original expression
# hence we have to replace the whole expression
function_parsed.args = args
function_parsed.kwargs = kwargs
newexpr = to_expr(function_parsed)
expr.head = newexpr.head
expr.args = newexpr.args
end
# if no parser was successful, recurse into expr.args
# core logic, capture each new Ref, substitute each old one
# this has to be done on expr.args level, as Refs on the same level need to be available for replacement
# Additionally, this has to be done on expr.args level because Symbols can only be replaced inplace on the surrounding Vector
function refify!(expr::Expr, Refs::Vector{Symbol}, ::Const{<:Any})
# important to use Base.enumerate as plain enumerate would bring Base.enumerate into namespace, however we want to create an own const link
for (i, a) in Base.enumerate(expr.args)
Ref_assignment_parser = EP.Assignment(
left = EP.anysymbol,
right = EP.Call(
name = :Ref,
),
)
parsed = @TryCatch EP.ParseError parse_expr(Ref_assignment_parser, a)
if issuccess(parsed)
# create new Refs to properly handle subexpressions with Refs (so that no sideeffects occur)
Refs = Symbol[Refs; parsed.value.left]
else
substituted = false
for r in Refs
if a == r
expr.args[i] = :($r.x)
substituted = true
break
end
end
if !substituted
refify!(a, Refs)
end
end
end
end
function refify_symbol(sym::Symbol, Refs::Vector{Symbol})
for r in Refs
if sym == r
return :($sym.x)
end
end
# default to identity
sym
end
"""
@Ref begin
a = Ref(2)
a + 3
end
is translated to
begin
a = Ref(2)
a.x + 3
end
So that you do not need to write `a.x` or 'a[]' all the way.
The macro works correctly with subfunctions shadowing the variables.
"""
macro Ref(expr)
refify!(expr)
esc(expr)
end
# @cont
# =====
macro assert_noerror(expr, msg)
esc(quote
try
$expr
catch
error(msg)
end
end)
end
"""
@cont begin
cont(1)
cont(2)
end
translates to
Continuable(function(cont)
cont(1)
cont(2)
end)
Furthermore, also function syntax is especially supported
@cont function(a, b) begin
cont(a)
cont(b)
end
is translated to
function(a, b)
Continuable(function(cont)
cont(1)
cont(2)
end)
end
In summary, `@cont` wraps the return value into a Continuable.
"""
macro cont(expr)
expr = macroexpand(__module__, expr) # get rid of maybe confusing macros
esc(cont_expr(expr))
end
function cont_expr(expr::Expr)
if issuccess(@TryCatch EP.ParseError parse_expr(EP.Function(), expr))
cont_funcexpr(expr)
else
quote
Continuables.Continuable(cont -> $expr)
end
end
end
function _extract_symbol(a)
if isa(a, Symbol)
a
elseif isa(a.args[1], Symbol) # something like Type annotation a::Any or Defaultvalue b = 3
a.args[1]
else # Type annotation with Defaultvalue
a.args[1].args[1]
end
end
function cont_funcexpr(expr::Expr)
func_parsed = parse_expr(EP.Function(), expr)
@assert Base.all(func_parsed.args) do s
_extract_symbol(s) != :cont
end "No function parameter can be called `cont` for @cont to apply."
# return Continuable instead
func_parsed.body = :(Continuables.Continuable(cont -> $(func_parsed.body)))
# make Expr
to_expr(func_parsed)
end
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | code | 1275 | # We realise early stopping via exceptions
struct StopException{T} <: Exception
ret::T
end
Stop = StopException(nothing)
stop() = throw(Stop)
stop(ret) = throw(StopException(ret))
"""
contextmanager handling custom breakpoints with `stop()`
This is usually only used within creating a new continuable from a previous one
# Examples
```julia
@cont stop_at4(continuable) = stoppable(continuable) do x
x == 4 && stop()
cont(x)
end
```
"""
function stoppable(func, continuable, default_return = nothing)
try
continuable(func)
default_return # default returnvalue to be able to handle `stop(returnvalue)` savely
catch exc
if !isa(exc, StopException)
rethrow(exc)
end
exc.ret
end
end
"""
Continuables.@ifsomething expr
If `expr` evaluates to `nothing`, equivalent to `return nothing`, otherwise the macro
evaluates to the value of `expr`. Not exported, useful for implementing iterators.
# Example
```jldoctest
julia> using Continuables
julia> Continuables.@ifsomething iterate(1:2)
(1, 1)
julia> let elt, state = Continuables.@ifsomething iterate(1:2, 2); println("not reached"); end
```
"""
macro ifsomething(ex)
quote
result = $(esc(ex))
result === nothing && return nothing
result
end
end
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | code | 4410 | using Continuables
import BenchmarkTools.@benchmark
@cont function crange(n::Int)
for i in 1:n
cont(i)
end
end
function trange(n::Int)
c = Channel{Int}(1)
task = @async for i ∈ 1:n
put!(c, i)
end
bind(c, task)
end
@Ref function sum_continuable(continuable)
a = Ref(0)
continuable() do i
a += i
end
a
end
function sum_continuable_withoutref(continuable)
a = 0
continuable() do i
a += i
end
a
end
function sum_iterable(it)
a = 0
for i in it
a += i
end
a
end
function collect_continuable(continuable)
a = []
continuable() do i
push!(a, i)
end
a
end
function collect_iterable(it)
a = []
for i in it
push!(a, i)
end
a
end
@benchmark sum_continuable(crange(1000))
#=
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 1.185 ns (0.00% GC)
median time: 1.580 ns (0.00% GC)
mean time: 1.756 ns (0.00% GC)
maximum time: 57.679 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 1000
=#
@benchmark sum_continuable(@i2c 1:1000)
#=
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 1.185 ns (0.00% GC)
median time: 1.580 ns (0.00% GC)
mean time: 1.877 ns (0.00% GC)
maximum time: 31.210 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 1000
=#
@benchmark sum_continuable_withoutref(crange(1000))
#=
BenchmarkTools.Trial:
memory estimate: 22.81 KiB
allocs estimate: 1460
--------------
minimum time: 26.074 μs (0.00% GC)
median time: 27.654 μs (0.00% GC)
mean time: 39.603 μs (15.68% GC)
maximum time: 42.758 ms (99.85% GC)
--------------
samples: 10000
evals/sample: 1
=#
@benchmark sum_continuable_withoutref(@i2c 1:1000)
#=
BenchmarkTools.Trial:
memory estimate: 22.81 KiB
allocs estimate: 1460
--------------
minimum time: 26.074 μs (0.00% GC)
median time: 27.654 μs (0.00% GC)
mean time: 39.910 μs (16.12% GC)
maximum time: 46.847 ms (99.92% GC)
--------------
samples: 10000
evals/sample: 1
=#
@benchmark sum_iterable(trange(1000))
#=
BenchmarkTools.Trial:
memory estimate: 33.05 KiB
allocs estimate: 2019
--------------
minimum time: 4.968 ms (0.00% GC)
median time: 5.839 ms (0.00% GC)
mean time: 6.089 ms (0.34% GC)
maximum time: 23.466 ms (73.27% GC)
--------------
samples: 821
evals/sample: 1
=#
@benchmark sum_iterable(1:1000)
#=
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 1.185 ns (0.00% GC)
median time: 1.580 ns (0.00% GC)
mean time: 1.742 ns (0.00% GC)
maximum time: 56.494 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 1000
=#
@benchmark collect_continuable(crange(1000))
#=
BenchmarkTools.Trial:
memory estimate: 24.03 KiB
allocs estimate: 499
--------------
minimum time: 11.456 μs (0.00% GC)
median time: 12.247 μs (0.00% GC)
mean time: 20.871 μs (30.69% GC)
maximum time: 43.985 ms (99.97% GC)
--------------
samples: 10000
evals/sample: 1
=#
@benchmark collect_continuable(@i2c 1:1000)
#=
BenchmarkTools.Trial:
memory estimate: 24.03 KiB
allocs estimate: 499
--------------
minimum time: 11.456 μs (0.00% GC)
median time: 12.247 μs (0.00% GC)
mean time: 25.120 μs (26.38% GC)
maximum time: 44.245 ms (99.96% GC)
--------------
samples: 10000
evals/sample: 1
=#
@benchmark collect_iterable(trange(1000))
#=
BenchmarkTools.Trial:
memory estimate: 57.08 KiB
allocs estimate: 2518
--------------
minimum time: 4.968 ms (0.00% GC)
median time: 5.715 ms (0.00% GC)
mean time: 6.082 ms (1.31% GC)
maximum time: 57.567 ms (89.97% GC)
--------------
samples: 822
evals/sample: 1
=#
@benchmark collect_iterable(1:1000)
#=
BenchmarkTools.Trial:
memory estimate: 24.03 KiB
allocs estimate: 499
--------------
minimum time: 11.456 μs (0.00% GC)
median time: 12.642 μs (0.00% GC)
mean time: 23.706 μs (27.20% GC)
maximum time: 45.308 ms (99.97% GC)
--------------
samples: 10000
evals/sample: 1
=# | Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | code | 5685 | using Continuables
using Test
@testset "utils" begin
include("utils.jl")
end
# check that @Ref is working correctly
# ====================================
@testset "Ref" begin
expr1_simple = @macroexpand @Ref begin
a = Ref(1)
f(a) = 2 * a
a + f(a)
end
expr2_simple = quote
a = Ref(1)
f(a) = 2 * a
a.x + f(a.x)
end
@test same_expr(expr1_simple, expr2_simple)
# difficult case to parse:
expr1_complex = @macroexpand @Ref begin
a = Ref(Some(1))
b = Ref("hi")
f(a::Int = b; c = b, d = 4) = 2 * a
a.value + f(a.value)
end
expr2_complex = quote
a = Ref(Some(1))
b = Ref("hi")
f(a::Int = b.x; c = b.x, d = 4) = 2 * a
a.x.value + f(a.x.value)
end
@test same_expr(expr1_complex, expr2_complex)
end
@testset "specializing functions for Continuable" begin
mycont(x) = @cont cont(x)
Base.length(::Continuable{<:innerfunctype(mycont(:examplesignature))}) = :justatest
@test length(mycont(1)) == :justatest
@test length(mycont("a")) == :justatest
# other continuables are not affected
anothercont(x) = @cont cont(x)
@test length(anothercont(42)) == 1
end
@testset "constructing Continuables" begin
cont1 = @cont cont(1)
@test collect(cont1) == [1]
@test collect(singleton(3)) == [3]
@test collect(take(repeated(() -> 4), 3)) == [4, 4, 4]
@test collect(repeated(() -> 4, 3)) == [4, 4, 4]
@test collect(take(3, iterated(x -> x*x, 2))) == [2, 4, 16]
end
@testset "DropWhile TakeWhile iterables" begin
@test length(dropwhile(x -> x<4, 1:10)) == 7
@test Base.IteratorSize(typeof(dropwhile(x -> x<4, 1:10))) == Base.SizeUnknown()
@test eltype(dropwhile(x -> x<4, 1:10)) == Int
@test Base.IteratorEltype(typeof(dropwhile(x -> x<4, 1:10))) == Base.HasEltype()
@test length(takewhile(x -> x<4, 1:10)) == 3
@test Base.IteratorSize(typeof(takewhile(x -> x<4, 1:10))) == Base.SizeUnknown()
@test eltype(takewhile(x -> x<4, 1:10)) == Int
@test Base.IteratorEltype(typeof(takewhile(x -> x<4, 1:10))) == Base.HasEltype()
end
# Check Continuables standard Interface
# =====================================
@testset "standard interface" begin
@test_throws ErrorException cont(1)
@test collect(@i2c 2:4:10) == collect(2:4:10)
@test collect(i2c(2:4:10), length(2:4:10)) == collect(2:4:10)
@test collect(enumerate(@i2c 2:4:10)) == collect(enumerate(2:4:10))
@test collect(filter(x -> x < 7, @i2c 2:4:10)) == collect(filter(x -> x < 7, 2:4:10))
@test collect(map(x->x^2, @i2c 2:4:10)) == collect(map(x->x^2, 2:4:10))
@test collect(take(cycle(@i2c 1:3), 11)) == collect(take(cycle(1:3), 11))
@test collect(take(cycle(@i2c 1:3), 11)) == collect(take(cycle(1:3), 11))
@test collect(cycle(i2c(1:3), 3)) == [1,2,3, 1,2,3, 1,2,3]
@test collect(drop(i2c(1:10), 3)) == collect(drop(1:10, 3))
@test collect(drop(3, i2c(1:10))) == collect(drop(1:10, 3))
@test nth(i2c(0:10), 8) == (0:10)[8]
@test_throws ErrorException nth(12, i2c(0:10))
@test Base.IteratorSize(@i2c 1:4) == Base.SizeUnknown()
@test length(@i2c 3:20) == 18
@test Base.IteratorEltype(emptycontinuable) == Base.EltypeUnknown()
@test Base.eltype(@i2c 1:4) == Any
test_continuable_product = @i2c 1:10
@test product(test_continuable_product) === test_continuable_product # should be a no-op
@test collect(product(i2c(1:10), i2c(1:3))) == [(i,j) for i in 1:10 for j in 1:3]
@test collect(product(i2c(1:10), i2c(1:3), i2c(2:4))) == [(i,j,k) for i in 1:10 for j in 1:3 for k in 2:4]
# however they both are not easily comparable... because transpose does not work on arrays of tuples right now...
@test collect(partition(i2c(1:15), 4)) == collect(partition(1:15, 4))
@test collect(partition(i2c(1:20), 3, 5)) == [
Any[1, 2, 3],
Any[6, 7, 8],
Any[11, 12, 13],
Any[16, 17, 18],
]
@test collect(partition(i2c(1:10), 5, 2)) == Any[
Any[1, 2, 3, 4, 5],
Any[3, 4, 5, 6, 7],
Any[5, 6, 7, 8, 9],
]
@test collect(zip(i2c(1:10), i2c(4:13))) == collect(zip(1:10, 4:13))
@test collect(zip(i2c(1:10), i2c(4:13), lazy = false)) == collect(zip(1:10, 4:13))
@test any(x -> x == 4, @i2c 1:10) == any(x -> x == 4, 1:10)
@test any(x -> x == 4, i2c(1:10), lazy=false) == any(x -> x == 4, 1:10)
@test any(x -> x == 4, i2c(1:3)) == any(x -> x == 4, 1:3)
@test any(x -> x == 4, i2c(1:3), lazy=false) == any(x -> x == 4, 1:3)
@test all(x -> x <= 4, i2c(1:10)) == all(x -> x <= 4, 1:10)
@test all(x -> x <= 4, i2c(1:10), lazy=false) == all(x -> x <= 4, 1:10)
@test all(x -> x <= 4, i2c(1:3)) == all(x -> x <= 4, 1:3)
@test all(x -> x <= 4, i2c(1:3), lazy=false) == all(x -> x <= 4, 1:3)
@test reduce((acc, x) -> acc + x*x, @i2c 1:10) == reduce((acc, x) -> acc + x*x, 1:10)
@test foldl((acc, x) -> acc + x*x, @i2c 1:10) == foldl((acc, x) -> acc + x*x, 1:10)
@test reduce((acc, x) -> acc + x*x, i2c(1:10), init = 100) == reduce((acc, x) -> acc + x*x, 1:10, init = 100)
@test reduce!(push!, i2c(1:10), init = []) == collect(1:10)
@test sum(@i2c 1:10) == sum(1:10)
@test prod(@i2c 1:10) == prod(1:10)
@test collect(chain(@i2c(1:10), @i2c(3:5))) == collect(chain(1:10, 3:5))
contcont = @cont begin
for i in 1:10
cont(@i2c i:10)
end
end
@test collect(flatten(contcont)) == collect(flatten(i:10 for i in 1:10))
@test collect(takewhile(x -> x <= 4, @i2c 1:10)) == collect(takewhile(x -> x <= 4, 1:10))
@test collect(dropwhile(x -> x <= 4, @i2c 1:10)) == collect(dropwhile(x -> x <= 4, 1:10))
@test groupby(x -> x % 4, @i2c 1:10) == groupby(x -> x % 4, 1:10)
@test groupbyreduce(x -> x % 4, @i2c(1:10), (x, y) -> x + y, x -> x+5) == groupbyreduce(x -> x % 4, 1:10, (x, y) -> x + y, x -> x+5)
end
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | code | 1277 |
# zip of uneven iterators is still not supported in julia... super weird
# see https://discourse.julialang.org/t/collecting-zip/20739
# and https://github.com/JuliaLang/julia/issues/17928
function Base.collect(itr::Base.Iterators.Zip)
itrsize = Base.IteratorSize(itr)
itrsize isa Base.HasShape && (itrsize = Base.HasLength())
Base._collect(1:1, itr, Base.IteratorEltype(itr), itrsize)
end
"""
only continue if true, else return false immediately
"""
macro iftrue(expr)
quote
$(esc(expr)) || return false
end
end
same_expr(e1, e2) = e1 == e2
function same_expr(e1::Expr, e2::Expr)
# don't differentiate between `f(a) = a` and `function f(a); a; end`
e1.head == :function && (e1.head = :(=))
e2.head == :function && (e2.head = :(=))
@iftrue e1.head == e2.head
args1 = filter(x -> !isa(x, LineNumberNode), e1.args)
args2 = filter(x -> !isa(x, LineNumberNode), e2.args)
@iftrue length(args1) == length(args2)
# recurse
all(zip(args1, args2)) do (a, b)
same_expr(a, b)
end
end
@test same_expr(:a, :a)
@test same_expr(:(a = 4), :(a = 4))
@test same_expr(quote
f(a) = a
end, quote
function f(a)
a
end
end)
@test !same_expr(:a, :b)
@test !same_expr(:(a = 4), :(a = 5))
@test !same_expr(quote
f(a) = a
end, quote
f(b) = b
end)
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | docs | 1299 | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [1.0.3] - 2023-07-01
### Changed
- generalized dependencies compat entries
- removed Compat dependency
## [1.0.2] - 2022-07-19
### Changed
- Compat compat now includes version 4
- BenchmarkTools are now in Test dependencies
## [1.0.1] - 2021-07-15
### Changed
- updated Compat section
## [1.0.0] - 2020-07-29
### Added
- GithubActions for CICD
- Documentation using Documenter.jl
- License
- Codecoverage
- extensive doc strings
### Added
- introduced abstract type `AbstractContinuable` to enable other Continuable types with more specialized information about (e.g. you may know `Iterators.HasEltype` or `Iterators.IteratorSize`)
### Changed
- License is now MIT
## [0.3.1] - 2020-02-04
### Changed
- switched deprecated dependency AstParsers.jl to renamed ExprParsers.jl
## [0.2.1] - 2020-01-11
### Changed
- more stable macros by switching to use AstParsers.jl
- Continuables is now a wrapper type
- we now reuse functions from Base and Iterators instead of defining our own
## [0.1.0] - 2018-10-07
initial sketch
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | docs | 2566 | # Continuables
[](https://jolin-io.github.io/Continuables.jl/stable)
[](https://jolin-io.github.io/Continuables.jl/dev)
[](https://github.com/jolin-io/Continuables.jl/actions)
[](https://codecov.io/gh/jolin-io/Continuables.jl)
TLDR: Python / C# `yield` with performance matching plain Julia iterators (i.e. unbelievably fast)
Continuables are generator-like higher-order functions which take a continuation as an extra argument. The key macro provided by the package is `@cont` which will give access to the special function `cont` within its scope and wraps the computation in a special Type `Continuables.Continuable`.
It is best to think of `cont` in the sense of `yield` from Python's Generators. It generates values and takes feedback from the outer process as return value.
If you come from Python, use Continuables wherever you would use generators. If you are Julia-native, Continuables can be used instead of Julia's Channels in many place with drastic performance-improvements (really drastic: in the little benchmark example below it is 20 million times faster!).
This package implements all standard functions like e.g. `collect`, `reduce`, `any` and others. As well as functionalities known from `Base.Iterators` and [`IterTools.jl`](https://github.com/JuliaCollections/IterTools.jl) like `take`, `dropwhile`, `groupby`, `partition`, `nth` and others.
For convenience, all methods also work for plain iterables.
## Installation
Install like
```julia
using Pkg
pkg"add Continuables"
```
Use it like
```julia
using Continuables
```
For further information take a look at the [documentation](https://jolin-io.github.io/Continuables.jl/dev).
## Example: flexible alternative to `walkdir`
Sometimes you recursively want to read files, skipping certain directories and doing other individual adaptations. Using `Continuables` you get full flexibility with very well readable code and good performance:
```julia
list_all_juliafiles(path=abspath(".")) = @cont begin
if isfile(path)
endswith(path, ".jl") && cont(path)
elseif isdir(path)
basename(path) in (".git",) && return
for file in readdir(path)
foreach(cont, list_all_juliafiles(joinpath(path, file)))
end
end
end
collect(list_all_juliafiles())
```
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | docs | 5315 | # Benchmark
We compare Continuables with standard Julia Channel and iterators for performance an a simple implementation of `sum`.
The equivalent Channel function to the above `corange` function is:
```julia
# standard Channel -----------------------------------------------------
function chrange(r)
Channel{Int}(1) do ch
for i ∈ 1:r
put!(ch, i)
end
end
end
```
The sum benchmark functions are defined as follows
```julia
using Continuables
# Summing continuable --------------------------------------
# we use a convenient macro which replaces all uses of r where r was defined as r = Ref(value) with r.x, i.e. the pointer to its referenced value.
# The effect is that the variable assignment becomes a mutation of the Reference's field.
# This macro leads to very clean code while being intuitively transparent.
@Ref function sum_continuable(continuable)
a = Ref(0)
continuable() do i
a += i
end
a
end
function sum_continuable_withoutref(continuable)
# interestingly, this works too, however with a lot of magic happening in the background
# which is also decreasing performance
a = 0
continuable() do i
a += i
end
a
end
# Summing Task ----------------------------------------------
function sum_iterable(it)
a = 0
for i in it
a += i
end
a
end
```
You may need to add BenchmarkTools to your julia project by running `] add BenchmarkTools`. All below are tested on the same machine, results may vary for your architecture.
We start with the base-line, i.e. summing up the pure range iterator:
```julia
julia> import BenchmarkTools.@benchmark
julia> @benchmark sum_iterable(1:1000)
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 1.420 ns (0.00% GC)
median time: 1.706 ns (0.00% GC)
mean time: 1.663 ns (0.00% GC)
maximum time: 16.456 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 1000
```
We reach the same performance with our self-written continuable version of range. However, as you can see below, if you do not use References everywhere (like Ref or arrays or dictionaries) then performance decreases.
```julia
julia> @benchmark sum_continuable(corange(1000))
BenchmarkTools.Trial:
memory estimate: 0 bytes
allocs estimate: 0
--------------
minimum time: 1.420 ns (0.00% GC)
median time: 1.708 ns (0.00% GC)
mean time: 1.671 ns (0.00% GC)
maximum time: 16.778 ns (0.00% GC)
--------------
samples: 10000
evals/sample: 1000
julia> @benchmark sum_continuable_withoutref(corange(1000))
BenchmarkTools.Trial:
memory estimate: 22.81 KiB
allocs estimate: 1460
--------------
minimum time: 22.658 μs (0.00% GC)
median time: 24.315 μs (0.00% GC)
mean time: 28.105 μs (2.64% GC)
maximum time: 1.925 ms (97.74% GC)
--------------
samples: 10000
evals/sample: 1
```
Last but not least the Channel version of range.
```julia
julia> @benchmark sum_iterable(chrange(1000))
BenchmarkTools.Trial:
memory estimate: 32.95 KiB
allocs estimate: 2026
--------------
minimum time: 28.208 ms (0.00% GC)
median time: 34.169 ms (0.00% GC)
mean time: 33.836 ms (0.00% GC)
maximum time: 38.737 ms (0.00% GC)
--------------
samples: 148
evals/sample: 1
```
Mind that 1μs = 1000ns and 1ms = 1000μs. So on median we have
| range | median | x-times of range |
|----------------------------------|------------|------------------|
| 1:1000 | 1.706ns | 1 |
| corange(1000) summed with Ref | 1.708ns | 1 |
| corange(1000) summed without Ref | 24315ns | 1.4e4 |
| chrange(1000) | 34169000ns | 2e7 |
Also note that the continuable version with Ref has 0 bytes memory footprint!
## Related packages
There is a package called [ResumableFunctions.jl](https://github.com/BenLauwens/ResumableFunctions.jl) with the same motivation but completely different implementation.
```julia
using ResumableFunctions
@resumable function rfrange(n::Int)
for i in 1:n
@yield i
end
end
# apparently the @resumable macro relies of having Base.iterate directly available on the namespace, but Continuables also exports one, so that we have to explicitly declare which we want to use to repair this little @resumable bug
const iterate = Base.iterate
@benchmark sum_iterable(rfrange(1000))
```
The resulting time are as follows on my machine:
```julia
BenchmarkTools.Trial:
memory estimate: 93.84 KiB
allocs estimate: 3001
--------------
minimum time: 453.640 μs (0.00% GC)
median time: 475.210 μs (0.00% GC)
mean time: 505.774 μs (1.18% GC)
maximum time: 4.360 ms (85.91% GC)
--------------
samples: 9869
evals/sample: 1
```
I.e. you see it is an impressive factor of `2.8e5` slower on median compared to the plain range or the Continuables version. It is still a factor 100 faster than the current Channels version, but the Channel one is exceptionally slow (probably because of thread-safety). And in terms of memory allocation, `@resumable` is even the worst of all for this very simple computation.
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | docs | 1480 | # Continuables.jl
TLDR: Python / C# `yield` with performance matching plain Julia iterators (i.e. unbelievably fast)
Continuables are generator-like higher-order functions which take a continuation as an extra argument. The key macro provided by the package is `@cont` which will give access to the special function `cont` within its scope and wraps the computation in a special Type `Continuables.Continuable`.
It is best to think of `cont` in the sense of `yield` from Python's Generators. It generates values and takes feedback from the outer process as return value.
If you come from Python, use Continuables wherever you would use generators. If you are Julia-native, Continuables can be used instead of Julia's Channels in many place with drastic performance-improvements (really drastic: in the little benchmark example below it is 20 million times faster!).
This package implements all standard functions like e.g. `collect`, `reduce`, `any` and others. As well as functionalities known from `Base.Iterators` and [`IterTools.jl`](https://github.com/JuliaCollections/IterTools.jl) like `take`, `dropwhile`, `groupby`, `partition`, `nth` and others.
For convenience, all methods also work for plain iterables.
## Installation
Install like
```julia
using Pkg
pkg"add Continuables"
```
Use it like
```julia
using Continuables
```
## Manual Outline
```@contents
Pages = ["manual.md"]
```
## [Library Index](@id main-index)
```@contents
Pages = ["library.md"]
```
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | docs | 470 | ```@meta
CurrentModule = Continuables
```
# Public API
```@index
```
## Core
```@docs
Continuable
@cont
@Ref
innerfunctype
AbstractContinuable
```
## Conversions
```@docs
aschannel
ascontinuable
i2c
@i2c
```
## Factories
```@docs
emptycontinuable
singleton
repeated
iterated
```
## Common Helpers
```@docs
collect
reduce
reduce!
zip
product
chain
flatten
cycle
foreach
map
all
any
sum
prod
take
takewhile
drop
dropwhile
partition
groupbyreduce
groupby
nth
```
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"MIT"
] | 1.0.3 | 96107b5ecb77d0397395cec4a95a28873e124204 | docs | 5679 | # Manual
TLDR: Python / C# `yield` with performance matching plain Julia iterators (i.e. unbelievably fast)
Continuables are generator-like higher-order functions which take a continuation as an extra argument. The key macro provided by the package is `@cont` which will give access to the special function `cont` within its scope and wraps the computation in a special Type `Continuables.Continuable`.
It is best to think of `cont` in the sense of `yield` from Python's Generators. It generates values and takes feedback from the outer process as return value.
If you come from Python, use Continuables wherever you would use generators. If you are Julia-native, Continuables can be used instead of Julia's Channels in many place with drastic performance-improvements (really drastic: in the little benchmark example below it is 20 million times faster!).
This package implements all standard functions like e.g. `collect`, `reduce`, `any` and others. As well as functionalities known from `Base.Iterators` and [`IterTools.jl`](https://github.com/JuliaCollections/IterTools.jl) like `take`, `dropwhile`, `groupby`, `partition`, `nth` and others.
For convenience, all methods also work for plain iterables.
## Example of a Continuable
Let's define our fist continuable by wrapping a simple range iterator `1:n`.
```julia
using Continuables
# new Continuable ---------------------------------------------
corange(n::Integer) = @cont begin
for i in 1:n
cont(i)
end
end
```
That's it. Very straight forward and intuitive.
Many standard functions work seamlessly for Continuables.
```julia
using Continuables
collect(corange(10)) == collect(1:10)
co2 = map(corange(5)) do x
2x
end
collect(co2) == [2,4,6,8,10]
foreach(println, corange(3)) # 1, 2, 3
foreach(chain(corange(2), corange(4))) do x
print("$x, ")
end # 1, 2, 1, 2, 3, 4,
reduce(*, corange(4)) == 24
all(x -> x < 5, corange(3))
any(x -> x == 2, corange(3))
map(corange(10)) do x
corange(x)
end |> flatten |> co -> take(co, 5) |> collect == Any[1,1,2,1,2]
collect(product(corange(2), corange(3))) == Any[
(1, 1),
(1, 2),
(1, 3),
(2, 1),
(2, 2),
(2, 3),
]
collect(partition(corange(11), 4)) == [
Any[1,2,3,4],
Any[5,6,7,8],
Any[9,10,11],
]
using OrderedCollections
groupbyreduce(isodd, corange(5), +) == OrderedDict{Any, Any}(
true => 9,
false => 6,
)
nth(3, ascontinuable(4:10)) == 6
nth(4, i2c(4:10)) == 7
nth(5, @i2c 4:10) == 8
# further defined are `takewhile`, `drop`, `dropwhile`, `repeated` and `iterate`, as well as `groupby`.
```
Importantly, Continuables do not support `Base.iterate`, i.e. you cannot directly for-loop over a Continuable. There is just no direct way to implement `iterate` on top of Continuables. Give it a try. Instead, you have to convert it into an Array first using `collect`, or to a Channel using `aschannel`.
The same holds true for `zip`, however we provide a convenience implementation where you can choose which interpretation you want to have
```julia
# uses Channels and hence offers lazy execution, however might be slower
zip(i2c(1:4), i2c(3:6), lazy=true) # Default
# uses Array, might be faster, but loads everything into memory
zip(i2c(1:4), i2c(3:6), lazy=false)
```
Last but not least, you can call a Continuable directly. It is just a higher order function expecting a `cont` function to run its computation.
```julia
continuable = corange(3)
foreach(print, continuable) # 123
# is the very same as
continuable(print) # 123
```
## The `@Ref` macro
As you already saw, for continuables we cannot use for-loops. Instead we use higher-order functions like `map`, `foreach`, `reduce` or `groupbyreduce` to work with Continuables.
Fortunately, julia supports beautiful `do` syntax for higher-order functions. In fact, `do` becomes the equivalent of `for` for continuables.
However, importantly, a `do`-block constructs an anonymous function and consequently what happens within the do-block has its own variable namespace! This is essential if you want to define your own Continuables. You cannot easily change an outer variable from within a do-block like you may have done it within a for-loop. The solution is to simply use julia's `Ref` object to get mutations instead of simple variable assignments. For example instead of `var_changing_every_loop = 0`, and an update `var_changing_every_loop += 1` you use `var_changing_every_loop = Ref(yourvalue)` and `var_changing_every_loop.x += 1`.
(If you would use something mutable instead like an Vector instead of the non-mutable Int here, you of course can directly work in place. I.e. say `a = []`, then `push!(a, i)` will do the right thing also in a do-block).
For convenience, Continuables comes with a second macro `@Ref` which checks your code for `variable = Ref(value)` parts and replaces all plain assignments `var = newvalue` with `var.x = newvalue`. This makes for beautiful code. Let's implement reduce with it:
```julia
using Continuables
@Ref function myreduce(continuable, merge, init)
accumulator = Ref(init)
continuable() do x
accumulator = merge(accumulator, x)
end
accumulator
end
myreduce(i2c(0:5), +, 0) == 15
```
Let's check that `@Ref` indeed only replaced `accumulator` with `accumulator.x`. Run `@macroexpand` on the whole definition, i.e. `@macroexpand @Ref function myreduce(....`, which returns
```julia
:(function myreduce(continuable, merge, init)
accumulator = Ref(init)
continuable() do x
accumulator.x = merge(accumulator.x, x)
end
accumulator.x
end)
```
When combining `@cont` with `@Ref` do `@cont @Ref ...`, i.e. let `@cont` be the outer and `@Ref` be the inner macro.
| Continuables | https://github.com/jolin-io/Continuables.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.2 | 870b7b286b2114e1b051860e55f6ca7ee7a39b83 | code | 1782 | """
Set of recipes to compute opacities.
"""
module Transparency
export σ_hminus_ff, σ_hminus_bf, α_hminus_ff, α_hminus_bf
export σ_hydrogenic_ff, σ_hydrogenic_bf, α_hydrogenic_ff, α_hydrogenic_bf
export σ_hydrogenic_bf_scaled
export σ_h2minus_ff, σ_h2plus_ff, σ_h2plus_bf, α_h2minus_ff, α_h2plus_ff, α_h2plus_bf
export σ_rayleigh_h2, σ_rayleigh_h, σ_thomson, α_rayleigh_h2, α_rayleigh_h, α_thomson
export humlicek, voigt_profile, dispersion_profile
export calc_Aul, calc_Bul, damping, doppler_width
export const_unsold, γ_unsold, γ_stark_linear
export const_barklem, γ_barklem
export const_deridder_rensbergen, γ_deridder_rensbergen
export const_stark_quadratic, γ_stark_quadratic, γ_stark_quadratic_gray
export blackbody_λ, blackbody_ν
export coll_CE, coll_CI, coll_Ω
export coll_deexc_hydrogen_PB04, coll_exc_hydrogen_johnson, coll_ion_hydrogen_johnson
export CE_RH_hydrogen, CI_RH_hydrogen
using Interpolations
using StaticArrays
using Unitful
import PhysicalConstants.CODATA2018: h, k_B, R_∞, c_0, m_e, m_u, e, ε_0, a_0
import SpecialFunctions: expint, gamma
@derived_dimension NumberDensity Unitful.𝐋^-3
@derived_dimension PerLength Unitful.𝐋^-1
@derived_dimension UnitsIntensity_λ Unitful.𝐋^-1 * Unitful.𝐌 * Unitful.𝐓^-3
const Ar_H = 1.007975 # Atomic weight of hydrogen
const Ry = R_∞ * c_0 * h # Rydberg energy
const Ryh = Ry / (1 + m_e / (Ar_H * m_u)) # Hydrogen ionisation energy
const αp = 4.5 * 4*π * ε_0 * a_0^3 # Polarisability of hydrogen [F m^2]
const inv_4πε0 = 1. / (4 * π * ε_0)
include("line.jl")
include("broadening.jl")
include("collisions.jl")
include("hydrogen.jl")
include("thomson.jl")
include("voigt.jl")
end
| Transparency | https://github.com/tiagopereira/Transparency.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.2 | 870b7b286b2114e1b051860e55f6ca7ee7a39b83 | code | 14259 | """
Functions to calculate different line broadenings.
"""
# For now
const mass_H = 1.008 * m_u
const mass_He = 4.003 * m_u
const abund_He = 10^10.99 / 10^12 # From RH
#=----------------------------------------------------------------------------
Linear Stark broadening: C_2 / r^2
----------------------------------------------------------------------------=#
"""
γ_stark_linear(
electron_density::NumberDensity{T},
n_upper::Integer,
n_lower::Integer
) where T <: AbstractFloat
Calculate linear Stark broadening according to the approximation of
[Sutton (1978)](https://ui.adsabs.harvard.edu/abs/1978JQSRT..20..333S/), eq (24),
so that it can be added into a Voigt profile and avoid the computation of a Holtsmark
profile. Valid up to electron densities of 1e19 m^-3 in the chromosphere.
# Arguments
- `electron_density`: electron density per volume
- `n_upper`: principal quantum number of upper level
- `n_lower`: principal quantum number of lower level
# Returns
- `γ::Unitful.Frequency`: broadening in units of rad / s.
"""
function γ_stark_linear(
electron_density::NumberDensity{T},
n_upper::Integer,
n_lower::Integer
) where T <: AbstractFloat
@assert n_upper > n_lower
@assert n_upper > 1
if n_upper - n_lower == 1
a1 = convert(T, 0.642)
else
a1 = convert(T, 1)
end
γβ = convert(T, 0.425)
power = convert(T, 2/3)
zcoeff = convert(T, 6e-5)u"s^-1" # should be m^2/s, dropping m^2 to allow ne=ustrip(ne)
ne = ustrip(electron_density |> u"m^-3")
# Conversion factors: 2 from half half-width to half-width, 2π from 1 / s to rad / s
return 2 * 2 * π * u"rad" * γβ * a1 * zcoeff * (n_upper^2 - n_lower^2) * ne^power
end
#=----------------------------------------------------------------------------
Quadratic Stark broadening: C_4 / r^4
----------------------------------------------------------------------------=#
"""
c4_traving(χup, χlo, χ∞, Z)
Calculate the \$C_4\$ interaction constant (quadratic Stark effect) using the
recipe of Traving (1960), "Uber die Theorie der Druckverbreiterung von Spektrallinien",
p 93.
# Arguments
- χup, χlo, χ∞: energies of upper, lower, and ionisation
- Z: effective nuclear charge of the ionised level
"""
function c4_traving(χup, χlo, χ∞, Z)
n_eff_u = n_eff(χ∞, χup, Z)
n_eff_l = n_eff(χ∞, χlo, Z)
C4 = (e^2 * inv_4πε0 * a_0^3 * 2 * π / (h * 18 * Z^4) *
((n_eff_u * (5 * n_eff_u^2 + 1))^2 - (n_eff_l * (5 * n_eff_l^2 + 1))^2))
return C4 |> u"m^4 / s"
end
"""
const_stark_quadratic(atomic_mass::Unitful.Mass, χup::Unitful.Energy,
χlo::Unitful.Energy, χ∞::Unitful.Energy, Z::Real;
mean_atomic_weight::Unitful.Mass=28 * m_u,
scaling::Real=1)
Calculate height-independent constant to use in `γ_stark_quadratic`, using the recipe
from RH, which is based on the following estimate:
\$\$
\\gamma = 11.37 \\cdot vrel^{1/3} * C_4^{2/3} * (n_e + n_{ion}),
\$\$
Using the estimate for \$C_4\$ from Traving (1960), "Uber die Theorie der
Druckverbreiterung von Spektrallinien", p 93., and \$n_{ion}\\approx n_e\$
(following Gray).
"""
function const_stark_quadratic(atomic_mass::Unitful.Mass, χup::Unitful.Energy,
χlo::Unitful.Energy, χ∞::Unitful.Energy, Z::Real;
mean_atomic_weight::Unitful.Mass=28 * m_u,
scaling::Real=1)
C = ustrip(8 * k_B / (π * atomic_mass) |> u"J/(K * kg)")
Cm = ((1 + atomic_mass / m_e)^(1/6) +
(1 + atomic_mass / mean_atomic_weight)^(1/6))
C4 = ustrip(c4_traving(χup, χlo, χ∞, Z) |> u"m^4 / s")
cStark23 = 11.37u"m^3 * rad / s" * (scaling * C4)^(2/3)
return C^(1/6) * cStark23 * Cm
end
"""
γ_stark_quadratic(
electron_density::NumberDensity,
temperature::Unitful.Temperature;
stark_constant::Unitful.VolumeFlow=1.0u"m^3 / s",
)
Compute quadratic Stark broadening for a given `electron_density`. If `temperature` is
nonzero, then it will apply the standard recipe of RH (using \$C_4\$ from Traving 1960).
The `stark_constant` can be obtained either from atomic data sources, or, if using the RH
recipe, using the function `const_stark_quadratic`.
"""
function γ_stark_quadratic(
electron_density::NumberDensity,
temperature::Unitful.Temperature;
stark_constant::Unitful.VolumeFlow=1.0u"m^3 * rad / s",
)
if temperature > 0u"K"
t_factor = ustrip(temperature |> u"K")^(1/6)
else
t_factor = 1.0
end
return stark_constant * t_factor * electron_density
end
"""
γ_stark_quadratic_gray(
electron_density::NumberDensity,
temperature::Unitful.Temperature,
c4::Quantity{<:AbstractFloat, Unitful.𝐋^4 / Unitful.𝐓},
)
Compute quadratic Stark broadening using the recipe of Gray (2005), page 244, eq 11.27.
The interaction constant `c4` should be provided, either from atomic data or from the
estimate of Traving (1960) using `c4_traving`.
"""
function γ_stark_quadratic_gray(
electron_density::NumberDensity,
temperature::Unitful.Temperature,
c4::Quantity{<:AbstractFloat, Unitful.𝐋^4 / Unitful.𝐓},
)
# Formula assumes CGS
ne_term = ustrip(electron_density * k_B |> u"erg / (K * cm^3)")
c4_term = ustrip(c4 |> u"cm^4 / s")
t_term = ustrip(temperature |> u"K")
log10γ = 19 + (2/3) * log10(c4_term) + log10(ne_term) + (1/6) * log10(t_term)
return (10^log10γ) * u"rad / s"
end
#=----------------------------------------------------------------------------
van der Waals broadening, C_6 / r^6
----------------------------------------------------------------------------=#
"""
function const_unsold(atom_mass::Unitful.Mass, χup::Unitful.Energy, χlo::Unitful.Energy,
χ∞::Unitful.Energy, Z::Real; H_scaling=1, He_scaling=1)
Compute atmosphere-independent constant for γ_unsold, to be used in function `γ_unsold`.
Based on expressions from RH broad.c, which uses formula in Mihalas (1978),
pp 282, 286-287, eq. (9-50) for v_rel, table 9-1 and eq. (9-76) for the interaction
coefficient C6. Arguments are line parameters, where Z is the nuclear charge of the
upper level plus one (e.g. 1 for neutral, 2 for singly ionised).
The van der Waals broadening can be scaled for both H and He perturbers
using `H_scaling` and `He_scaling`.
"""
function const_unsold(atomic_mass::Unitful.Mass, χup::Unitful.Energy, χlo::Unitful.Energy,
χ∞::Unitful.Energy, Z::Real; H_scaling=1, He_scaling=1)
Δr = (Ry^2 * (1 / (χ∞ - χup)^2 - 1 / (χ∞ - χlo)^2)) |> u"J/J"
C6 = ustrip((2.5 * e^2 * αp * inv_4πε0^2 * 2 * π *
(Z * a_0)^2 / h * Δr) |> u"C^2 * m^6 / (F * J * s)")
v_rel_const = ustrip(8 * k_B / (π * atomic_mass) |> u"J/(K * kg)")
v_rel_H = v_rel_const * (1 + atomic_mass / mass_H)
v_rel_He = v_rel_const * (1 + atomic_mass / mass_He)
return 8.08 * (H_scaling * v_rel_H^0.3 + He_scaling * abund_He * v_rel_He^0.3) * C6^0.4
end
"""
function γ_unsold(unsold_const::AbstractFloat, temperature::Unitful.Temperature,
h_neutral_density::NumberDensity)
Compute van der Waals broadening in Lindholm theory using Unsöld's approximation
for the interaction coefficient \$C_6\$. Based on Mihalas (1978), pp 282, 286-287.
Takes the atmosphere-indepenent `unsold_const` from `γ_unsold_const`, temperature,
and populations of neutral hydrogen, and returns broadening in units of rad * s^-1.
"""
function γ_unsold(
unsold_const::AbstractFloat,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity
)
return (unsold_const * ustrip(temperature |> u"K")^0.3 *
ustrip(h_neutral_density |> u"m^-3") * u"rad / s")
end
"""
function const_barklem(atomic_weight::Unitful.Mass, α::Real, σ::Real)
Compute the atmosphere-independent constant used to calculate broadening from collisions
with neutral hydrogen using the recipes of Barklem/O'Mara/Anstee, in the function
`γ_barklem`. The calculation performed here follows eq (3) of
[Anstee & O'Mara (1995)](https://ui.adsabs.harvard.edu/abs/1995MNRAS.276..859A).
# Arguments
- `atomic_weight::Unitful.Mass`: atomic weight of element
- `α::Real`: velocity exponent from Barklem/O'Mara/Anstee tables
- `σ::Real`: line broadening cross section in atomic units (a_0^2) for a collision
velocity of 10 km/s, from Barklem/O'Mara/Anstee tables.
# Returns
- `Unitful.VolumeFlow`: line broadening width per neutral hydrogen atom. Needs
to be multiplied by temperature ^ ((1 - α)/2) to give proper temperature dependence.
"""
function const_barklem(atomic_mass::Unitful.Mass, α::Real, σ::Real)
α < 0 && error("α must be non-negative")
σ < 0 && error("σ must be non-negative")
μ = m_u / (1 / Ar_H + 1 / (atomic_mass / m_u))
# Using 1 K to keep units right for later multiplication by correct temperature
v_bar = sqrt(8 * k_B * u"K"/ (π * μ)) |> u"m/s"
v_ratio = (1e4u"m/s" / v_bar) |> u"m/m"
# Squared Bohr radius is to convert from atomic units to m^2, factor of 2 from HW to FW
return (a_0^2 * 2 * (4 / π)^(α / 2) * gamma((4 - α) / 2) * v_bar * σ *
v_ratio^α) |> u"m^3 * rad / s"
end
"""
function γ_barklem(
α::AbstractFloat,
barklem_const::Unitful.VolumeFlow,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
)
Compute van der Waals broadening from collisions with neutral hydrogen atoms following the
theory from Barklem/O'Mara/Anstee.
# Arguments
- `α::AbstractFloat`: velocity exponent from Barklem/O'Mara/Anstee tables
- `barklem_const::Unitful.VolumeFlow`: atmosphere-independent constant computed from
`const_barklem()`.
- `temperature::Unitful.Temperature`
- `h_neutral_density::NumberDensity`: number density of neutral hydrogen atoms
# Returns
- `γ::Unitful.Frequency`: broadening in units of rad / s.
# Notes
This computes broadening only from hydrogen atoms. For collisions with helium atoms,
it is recommended to add van der Waals broadening using Unsöld's approximation (see example).
# Examples
Example for Ca II 854.2 nm line:
```
julia> Ca8542 = AtomicLine(25414.400u"cm^-1", 13710.880u"cm^-1", 95785.470u"cm^-1",
4, 6, 7.242e-02, 40.08 * m_u, 20);
julia> temp = 6000u"K";
julia> h_density = 1e23u"m^-3";
julia> bconst = const_barklem(Ca8542.atom_weight, 0.275, 291)
7.495208174533257e-16 m³ rad s⁻¹
julia> γ = γ_barklem(0.275, bconst, temp, h_density)
1.3596876505340942e11 rad s⁻¹
```
Now adding van der Waals broadening for helium as well:
```
julia> uconst = const_unsold(Ca8542; H_scaling=0, He_scaling=1)
2.482484115415461e-16
julia> γ = γ_barklem(0.275, bconst, temp, h_density) + γ_unsold(uconst, temp, h_density)
1.3630631018876224e11 rad s⁻¹
```
# References
- [Anstee & O'Mara (1995)](https://ui.adsabs.harvard.edu/abs/1995MNRAS.276..859A)
- [Barklem & O'Omara (1997)](https://ui.adsabs.harvard.edu/abs/1997MNRAS.290..102B)
- [Barklem, O'Mara & Ross (1998)](https://ui.adsabs.harvard.edu/abs/1998MNRAS.296.1057B)
- [Barklem & O'Mara (1998)](https://ui.adsabs.harvard.edu/abs/1998MNRAS.300..863B)
"""
function γ_barklem(
α::AbstractFloat,
barklem_const::Unitful.VolumeFlow,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
)
return barklem_const * ustrip(temperature |> u"K")^((1 - α)/2) * h_neutral_density
end
"""
function const_deridder_rensbergen(
atomic_mass_pert::Unitful.Mass,
atomic_mass_rad::Unitful.Mass
α::Real,
β::Real
)
Compute the atmosphere-independent constant used to calculate broadening using
the recipes of [Deridder & Rensbergen (1976)](https://ui.adsabs.harvard.edu/abs/1976A%26AS...23..147D),
in the function `γ_deridder_rensbergen`.
# Arguments
- `atomic_weight_pert::Unitful.Mass`: atomic mass of perturbing element (hydrogen or helium)
- `atomic_weight_rad::Unitful.Mass`: atomic mass of line-producing species
- `α::Real`: α parameter as taken from the tables of Deridder & Rensbergen (1976),
in units of 10^-8 cm^3/s (ignoring the dimensions of the temperature exponent)
- `β::Real`: β parameter as taken from the tables of Deridder & Rensbergen (1976),
dimensionless.
# Returns
- `Unitful.VolumeFlow`: line broadening width per perturber atom. Needs
to be multiplied by temperature ^ β to give proper temperature dependence.
"""
function const_deridder_rensbergen(
atomic_mass_pert::Unitful.Mass,
atomic_mass_rad::Unitful.Mass,
α::Real,
β::Real,
)
α < 0 && error("α must be non-negative")
α = (α * 1e-8u"cm^3/s") |> u"m^3/s" # Convert from paper's 1e-8 units to SI
mass_corr = (1 + atomic_mass_pert / atomic_mass_rad) ^ β
return α * mass_corr
end
"""
function γ_deridder_rensbergen(
β::Real,
deridder_const::Unitful.VolumeFlow,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
)
Compute van der Waals broadening from collisions with neutral hydrogen or helium
atoms following [Deridder & Rensbergen (1976)](https://ui.adsabs.harvard.edu/abs/1976A%26AS...23..147D).
# Arguments
- `β::Real`: β parameter as taken from the tables of Deridder & Rensbergen (1976),
dimensionless.
- `deridder_const::Unitful.VolumeFlow`: atmosphere-independent constant computed from
`const_deridder_rensbergen()`.
- `temperature::Unitful.Temperature`
- `perturber_density::NumberDensity`: number density of perturber atoms, either
neutral hydrogen or helium.
# Returns
- `γ::Unitful.Frequency`: broadening in units of s^-1.
"""
function γ_deridder_rensbergen(
β::Real,
deridder_const::Unitful.VolumeFlow,
temperature::Unitful.Temperature,
perturber_density::NumberDensity,
)
return deridder_const * ustrip(temperature |> u"K")^β * perturber_density
end
#=----------------------------------------------------------------------------
Utilities
----------------------------------------------------------------------------=#
"""
Compute damping parameter.
"""
function damping(γ::Unitful.Frequency, λ::Unitful.Length, ΔλD::Unitful.Length)
return (γ * λ^2 / (4 * π * c_0 * ΔλD)) |> u"m/m"
end
| Transparency | https://github.com/tiagopereira/Transparency.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.2 | 870b7b286b2114e1b051860e55f6ca7ee7a39b83 | code | 14180 | """
Functions to calculate atomic collisional rates.
"""
# Dimension for CE and CI collisional tables, in SI units m^3 K^-1/2 s-1
@derived_dimension CEI_dimension Unitful.𝐋^3 / (Unitful.𝚯^(1/2) * Unitful.𝐓)
const Ω_c0 = Ry / sqrt(m_e) * π * a_0^2 * sqrt(8 / (π * k_B))
"""
coll_CE(
rate_interpolant::Interpolations.AbstractInterpolation{<:CEI_dimension, 1},
g_ratio::Real,
electron_density::NumberDensity,
temperature::Unitful.Temperature
)
Calculate collisional de-excitation by electrons of a bound-bound transition, using
the CE expression from RH / MULTI.
# Arguments
- `rate_interpolant`: a generic interpolant that takes as argument a value of
temperature, and returns in units similar to m^3 K^-1/2 s^-1
- `g_ratio`: ratio g_l / g_u, statistical weights of lower and upper level
- `electron_density`: electron density
- `temperature`: gas temperature
# Returns
- `coll_deexc`: collisional de-excitations per second (from upper level to lower level)
"""
function coll_CE(
rate_interpolant::Interpolations.AbstractInterpolation{<:CEI_dimension, 1},
g_ratio::Real,
electron_density::NumberDensity,
temperature::Unitful.Temperature
)
return (rate_interpolant(temperature) * g_ratio *
electron_density * sqrt(temperature)) |> u"s^-1"
end
"""
coll_CI(
rate_interpolant::Interpolations.AbstractInterpolation{<:CEI_dimension, 1},
dE::Unitful.Energy,
electron_density::NumberDensity,
temperature::Unitful.Temperature
)
Calculate collisional ionisation by electrons of a given level, using
the CI expression from RH / MULTI.
# Arguments
- `rate_interpolant`: a generic interpolant that takes as argument a value of
temperature, and returns in units similar to m^3 K^-1/2 s^-1
- `dE`: energy difference between continuum and level, dE = E_cont - E_level.
- `electron_density`: electron density
- `temperature`: gas temperature
# Returns
- `coll_ion`: collisional ionisations per second (from level to continuum)
"""
function coll_CI(
rate_interpolant::Interpolations.AbstractInterpolation{<:CEI_dimension, 1},
dE::Unitful.Energy,
electron_density::NumberDensity,
temperature::Unitful.Temperature
)
@assert dE > 0u"J" "dE should be positive"
return (rate_interpolant(temperature) * exp(-dE / (k_B * temperature)) *
electron_density * sqrt(temperature)) |> u"s^-1"
end
"""
coll_Ω(
rate_interpolant::Interpolations.AbstractInterpolation{<:CEI_dimension, 1},
g_u::Integer,
electron_density::NumberDensity,
temperature::Unitful.Temperature
)
Calculate collisional de-excitation by electrons of a bound-bound transition, using
the OMEGA expression from RH (OHMEGA from MULTI), from a tabulated dimensionless Ω
(collision strength).
# Arguments
- `rate_interpolant`: a generic interpolant that takes as argument a value of
temperature, and returns a dimensionless Ω
- `g_u`: statistical weight of upper level
- `electron_density`: electron density
- `temperature`: gas temperature
# Returns
- `coll_deexc`: collisional de-excitations per second (from upper level to lower level)
"""
function coll_Ω(
rate_interpolant::Interpolations.AbstractInterpolation{<:Real, 1},
g_u::Integer,
electron_density::NumberDensity,
temperature::Unitful.Temperature
)
return (rate_interpolant(temperature) * Ω_c0 * electron_density /
(g_u * sqrt(temperature))) |> u"s^-1"
end
#=----------------------------------------------------------------------------
Collisional rates for hydrogen from Johnson (1972), based on make_h.c from RH
----------------------------------------------------------------------------=#
const johnson_c0 = sqrt(8 * k_B / (π * m_e)) * 2 * π * a_0^2
const johnson_c1 = 32 / (3 * sqrt(3) * π)
"""
coll_exc_hydrogen_johnson(
n_l::Integer,
n_u::Integer,
electron_density::NumberDensity,
temperature::Unitful.Temperature,
)
Calculates the rates for collisional excitation for a hydrogen line, resulting
from collisions with electrons, using the recipe of
[Johnson (1972)](https://ui.adsabs.harvard.edu/abs/1972ApJ...174..227J/abstract)
# Arguments
- `n_l`: lower level (principal quantum number) of transition
- `n_u`: upper level (principal quantum number) of transition
- `electron_density`: electron density
- `temperature`: gas temperature
# Returns
- `coll_exc`: collisional excitations per second from n_l -> n_u
"""
function coll_exc_hydrogen_johnson(
n_l::Integer,
n_u::Integer,
electron_density::NumberDensity,
temperature::Unitful.Temperature,
)
@assert n_l > 0 "n_l must be > 0"
@assert n_u > n_l "n_u must be > n_l"
rn = _rn(n_l)
bn = _bn(n_l)
x = 1 - (n_l / n_u)^2
rnn = rn * x
fnn = johnson_c1 * n_l / (n_u * x)^3 * (g0(n_l) + (g1(n_l) + g2(n_l) / x) / x)
Ann = 2 * n_l^2 / x * fnn
Bnn = 4 * n_l * (n_l / n_u)^3 * (1 + 4 / (3 * x) + bn / x^2) / x^2
y = x * Ryh / (n_l^2 * k_B * temperature)
z = rnn + y
coll_exc = (johnson_c0 * (n_l * y)^2 / x * sqrt(temperature) * electron_density *
(Ann * ((1 / y + 0.5) * expint(1, y) - (1 / z + 0.5) * expint(1, z)) +
(Bnn - Ann * log(2 * n_l^2 / x)) * (expint(2, y) / y - expint(2, z) / z)))
return coll_exc |> u"s^-1"
end
"""
coll_ion_hydrogen_johnson(
n::Integer,
electron_density::NumberDensity,
temperature::Unitful.Temperature,
)
Calculates the rates for collisional ionisation for a hydrogen level, resulting
from collisions with electrons, using the recipe of
[Johnson (1972)](https://ui.adsabs.harvard.edu/abs/1972ApJ...174..227J/abstract).
# Arguments
- `n`: principal quantum number of hydrogen level
- `electron_density`: electron density
- `temperature`: gas temperature
# Returns
- `coll_ion`: collisional ionisations per second from level n
"""
function coll_ion_hydrogen_johnson(
n::Integer,
electron_density::NumberDensity,
temperature::Unitful.Temperature,
)
@assert n > 0 "n must be > 0"
An = johnson_c1 * n * (g0(n) / 3 + g1(n) / 4 + g2(n) / 5)
Bn = 2 * n^2 / 3 * (5 + _bn(n))
yn = Ryh / (n^2 * k_B * temperature)
zn = _rn(n) + yn
coll_ion = (johnson_c0 * (n * yn)^2 * sqrt(temperature) * electron_density *
(An * (expint(1, yn) / yn - expint(1, zn) / zn) +
(Bn - An * log(2 * n^2)) * (ξ(yn) - ξ(zn))))
return coll_ion |> u"s^-1"
end
"""
CI_RH_hydrogen(n::Integer, temperature::Unitful.Temperature)
Calculate CI coefficients for collisional ionisation for a hydrogen level,
in the SI units of RH, using the recipe of
[Johnson (1972)](https://ui.adsabs.harvard.edu/abs/1972ApJ...174..227J/abstract).
# Arguments
- `n`: principal quantum number of hydrogen level
- `temperature`: gas temperature
"""
function CI_RH_hydrogen(n::Integer, temperature::Unitful.Temperature)
tmp = coll_ion_hydrogen_johnson(n, 1.0u"m^-3", temperature)
yn = Ryh / (n^2 * k_B * temperature)
return tmp * exp(yn) / sqrt(temperature) / 1.0u"m^-3"
end
"""
CE_RH_hydrogen(n_l::Integer, n_u::Integer, temperature::Unitful.Temperature)
Calculate CE coefficients for collisional deexcitation for a hydrogen transition,
in the SI units of RH, using the recipe of
[Johnson (1972)](https://ui.adsabs.harvard.edu/abs/1972ApJ...174..227J/abstract).
# Arguments
- `n_l`: lower level (principal quantum number) of transition
- `n_u`: upper level (principal quantum number) of transition
- `temperature`: gas temperature
"""
function CE_RH_hydrogen(n_l::Integer, n_u::Integer, temperature::Unitful.Temperature)
tmp = coll_exc_hydrogen_johnson(n_l, n_u, 1.0u"m^-3", temperature)
x = 1 - (n_l / n_u)^2
y = x * Ryh / (n_l^2 * k_B * temperature)
return tmp * exp(y) / sqrt(temperature) / 1.0u"m^-3"
end
#=----------------------------------------------------------------------------
Utility functions from Johnson (1972)
----------------------------------------------------------------------------=#
# bn and rn expressions, from eqs (24) and (32) of Johnson (1972)
_bn(n::Integer) = (n == 1) ? -0.603 : (4.0+ (-18.63 + (36.24 - 28.09 / n) / n) / n) / n
_rn(n::Integer) = (n == 1) ? 0.45 : 1.94 * n^-1.57
# ξ(t), eq (42)
ξ(t::AbstractFloat) = expint(0, t) - 2 * expint(1, t) + expint(2, t)
# Gaunt factor coefficients, Table 1 of Johnson (1972)
function g0(n::Integer)
if n == 1
return 1.1330f0
elseif n == 2
return 1.0785f0
else
return 0.9935f0 + (0.2328f0 - 0.1296f0 / n) / n
end
end
function g1(n::Integer)
if n == 1
return -0.4059f0
elseif n == 2
return -0.2319f0
else
return -(0.6282 - (0.5598 - 0.5299 / n) / n) / n
end
end
function g2(n::Integer)
if n == 1
return 0.07014f0
elseif n == 2
return 0.02947f0
else
return (0.3887f0 - (1.181f0 - 1.4700f0 / n) / n) / n^2
end
end
#=----------------------------------------------------------------------------
Collisional rates for hydrogen from Przybilla & Butler (2004)
----------------------------------------------------------------------------=#
const PB04_temp = [0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3,
4, 5, 6, 8, 10, 15, 20, 25] * 10000u"K"
const PB04_Ω =
[[6.40f-1 6.98f-1 7.57f-1 8.09f-1 8.97f-1 9.78f-1 1.06f+0 1.15f+0 #= 1->2
=# 1.32f+0 1.51f+0 1.68f+0 2.02f+0 2.33f+0 2.97f+0 3.50f+0 3.95f+0];
[2.20f-1 2.40f-1 2.50f-1 2.61f-1 2.88f-1 3.22f-1 3.59f-1 3.96f-1 #= 1->3
=# 4.64f-1 5.26f-1 5.79f-1 6.70f-1 7.43f-1 8.80f-1 9.79f-1 1.06f+0];
[9.93f-2 1.02f-1 1.10f-1 1.22f-1 1.51f-1 1.80f-1 2.06f-1 2.28f-1 #= 1->4
=# 2.66f-1 2.95f-1 3.18f-1 3.55f-1 3.83f-1 4.30f-1 4.63f-1 4.88f-1];
[4.92f-2 5.84f-2 7.17f-2 8.58f-2 1.12f-1 1.33f-1 1.50f-1 1.64f-1 #= 1->5
=# 1.85f-1 2.01f-1 2.12f-1 2.29f-1 2.39f-1 2.59f-1 2.71f-1 2.81f-1];
[2.97f-2 4.66f-2 6.28f-2 7.68f-2 9.82f-2 1.14f-1 1.25f-1 1.33f-1 #= 1->6
=# 1.45f-1 1.53f-1 1.58f-1 1.65f-1 1.70f-1 1.77f-1 1.82f-1 1.85f-1];
[5.03f-2 6.72f-2 7.86f-2 8.74f-2 1.00f-1 1.10f-1 1.16f-1 1.21f-1 #= 1->7
=# 1.27f-1 1.31f-1 1.34f-1 1.36f-1 1.37f-1 1.39f-1 1.39f-1 1.40f-1];
[2.35f+1 2.78f+1 3.09f+1 3.38f+1 4.01f+1 4.71f+1 5.45f+1 6.20f+1 #= 2->3
=# 7.71f+1 9.14f+1 1.05f+2 1.29f+2 1.51f+2 1.93f+2 2.26f+2 2.52f+2];
[1.07f+1 1.15f+1 1.23f+1 1.34f+1 1.62f+1 1.90f+1 2.18f+1 2.44f+1 #= 2->4
=# 2.89f+1 3.27f+1 3.60f+1 4.14f+1 4.56f+1 5.31f+1 5.83f+1 6.23f+1];
[5.22f+0 5.90f+0 6.96f+0 8.15f+0 1.04f+1 1.23f+1 1.39f+1 1.52f+1 #= 2->5
=# 1.74f+1 1.90f+1 2.03f+1 2.23f+1 2.37f+1 2.61f+1 2.78f+1 2.89f+1];
[2.91f+0 4.53f+0 6.06f+0 7.32f+0 9.17f+0 1.05f+1 1.14f+1 1.21f+1 #= 2->6
=# 1.31f+1 1.38f+1 1.44f+1 1.51f+1 1.56f+1 1.63f+1 1.68f+1 1.71f+1];
[5.25f+0 7.26f+0 8.47f+0 9.27f+0 1.03f+1 1.08f+1 1.12f+1 1.14f+1 #= 2->7
=# 1.17f+1 1.18f+1 1.19f+1 1.19f+1 1.20f+1 1.19f+1 1.19f+1 1.19f+1];
[1.50f+2 1.90f+2 2.28f+2 2.70f+2 3.64f+2 4.66f+2 5.70f+2 6.72f+2 #= 3->4
=# 8.66f+2 1.04f+3 1.19f+3 1.46f+3 1.67f+3 2.08f+3 2.39f+3 2.62f+3];
[7.89f+1 9.01f+1 1.07f+2 1.26f+2 1.66f+2 2.03f+2 2.37f+2 2.68f+2 #= 3->5
=# 3.19f+2 3.62f+2 3.98f+2 4.53f+2 4.95f+2 5.68f+2 6.16f+2 6.51f+2];
[4.13f+1 6.11f+1 8.21f+1 1.01f+2 1.31f+2 1.54f+2 1.72f+2 1.86f+2 #= 3->6
=# 2.08f+2 2.24f+2 2.36f+2 2.53f+2 2.65f+2 2.83f+2 2.94f+2 3.02f+2];
[7.60f+1 1.07f+2 1.25f+2 1.37f+2 1.52f+2 1.61f+2 1.68f+2 1.72f+2 #= 3->7
=# 1.78f+2 1.81f+2 1.83f+2 1.85f+2 1.86f+2 1.87f+2 1.86f+2 1.87f+2];
[5.90f+2 8.17f+2 1.07f+3 1.35f+3 1.93f+3 2.47f+3 2.96f+3 3.40f+3 #= 4->5
=# 4.14f+3 4.75f+3 5.25f+3 6.08f+3 6.76f+3 8.08f+3 9.13f+3 1.00f+4];
[2.94f+2 4.21f+2 5.78f+2 7.36f+2 1.02f+3 1.26f+3 1.46f+3 1.64f+3 #= 4->6
=# 1.92f+3 2.15f+3 2.33f+3 2.61f+3 2.81f+3 3.15f+3 3.36f+3 3.51f+3];
[4.79f+2 7.06f+2 8.56f+2 9.66f+2 1.11f+3 1.21f+3 1.29f+3 1.34f+3 #= 4->7
=# 1.41f+3 1.46f+3 1.50f+3 1.55f+3 1.57f+3 1.61f+3 1.62f+3 1.63f+3];
[1.93f+3 2.91f+3 4.00f+3 5.04f+3 6.81f+3 8.20f+3 9.29f+3 1.02f+4 #= 5->6
=# 1.15f+4 1.26f+4 1.34f+4 1.49f+4 1.63f+4 1.97f+4 2.27f+4 2.54f+4];
[1.95f+3 3.24f+3 4.20f+3 4.95f+3 6.02f+3 6.76f+3 7.29f+3 7.70f+3 #= 5->7
=# 8.26f+3 8.63f+3 8.88f+3 9.21f+3 9.43f+3 9.78f+3 1.00f+4 1.02f+4];
[6.81f+1 1.17f+4 1.50f+4 1.73f+4 2.03f+4 2.21f+4 2.33f+4 2.41f+4 #= 6->7
=# 2.52f+4 2.60f+4 2.69f+4 2.90f+4 3.17f+4 3.94f+4 4.73f+4 5.50f+4]]
# Index of transition data in matrix form: PB04_index[n_l, n_u] = i,
# where i is row index in PB04_Ω
const PB04_index = [0 1 2 3 4 5 6
0 0 7 8 9 10 11
0 0 0 12 13 14 15
0 0 0 0 16 17 18
0 0 0 0 0 19 20
0 0 0 0 0 0 21]
const PB04_interp = [linear_interpolation(PB04_temp, PB04_Ω[i, :],
extrapolation_bc=Line()) for i in 1:16]
"""
coll_deexc_hydrogen_PB04(
n_l::Integer,
n_u::Integer,
g_u::Integer,
electron_density::NumberDensity,
temperature::Unitful.Temperature,
)
Calculate collisional de-excitation by electrons of a bound-bound hydrogen transition,
using the Ω data from Table 3 of
[Przybilla & Butler (2004)](https://ui.adsabs.harvard.edu/abs/2004ApJ...610L..61P/abstract).
Data available only up to n=7.
# Arguments
- `n_l`: lower level (principal quantum number) of transition
- `n_u`: upper level (principal quantum number) of transition
- `g_u`: statistical weight of upper level
- `electron_density`: electron density
- `temperature`: gas temperature
# Returns
- `coll_deexc`: collisional de-excitations per second (from upper level to lower level)
"""
function coll_deexc_hydrogen_PB04(
n_l::Integer,
n_u::Integer,
g_u::Integer,
electron_density::NumberDensity,
temperature::Unitful.Temperature,
)
@assert 7 > n_l > 0 "Must have 7 > n_l > 0"
@assert 8 > n_u > n_l "Must have 8 > n_u > n_l"
return coll_Ω(PB04_interp[PB04_index[n_l, n_u]], g_u, electron_density, temperature)
end
| Transparency | https://github.com/tiagopereira/Transparency.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.2 | 870b7b286b2114e1b051860e55f6ca7ee7a39b83 | code | 45178 | """
Computes the extinction for hydrogen-related bound-free and free-free transitions.
Functions are organised in two types:
1. `σ_*` functions compute the cross section (in m^2) or "cross section
coefficient" (in m^5)
2. `α_*` functions compute the linear extinction coefficient (in m^-1),
by multiplying
extinction coefficient \$\\alpha_\\nu\$ for hydrogen-related
bound-free and free-free transitions.
Includes:
* Neutral Hydrogen bound-free and free-free.
* H\$^-\$ bound-free and free-free.
* H\$_2^-\$ free-free.
* H\$_2^+\$ free-free.
* Rayleigh scattering by molecular H\$_2\$
"""
#=----------------------------------------------------------------------------
Catch-all functions
----------------------------------------------------------------------------=#
"""
σ_hminus_ff(
λ::Unitful.Length,
temperature::Unitful.Temperature,
recipe::String="stilley"
)
Compute free-free extinction from H minus ion. Recipe can be one of:
- `stilley` (default): Interpolates table from
[Stilley & Callaway (1970)](https://ui.adsabs.harvard.edu/abs/1970ApJ...160..245S/abstract),
which is valid for λ up to 9113 nm.
- `john`: Follows
[John (1988)](https://ui.adsabs.harvard.edu/abs/1988A%26A...193..189J/abstract),
which is valid beyond 9113 nm but may not be good below 364.5 nm.
"""
function σ_hminus_ff(λ::Unitful.Length, temperature::Unitful.Temperature;
recipe::String="stilley"
)
if recipe == "stilley"
σ = σ_hminus_ff_stilley(λ, temperature)
elseif recipe == "john"
σ = σ_hminus_ff_john(λ, temperature)
else
throw("NotImplemented recipe $recipe")
end
return σ
end
"""
α_hminus_ff(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity;
recipe::String="stilley"
)
Compute free-free extinction from H minus ion. Recipe can be one of:
- `stilley` (default): Interpolates table from
[Stilley & Callaway (1970)](https://ui.adsabs.harvard.edu/abs/1970ApJ...160..245S/abstract),
which is valid for λ up to 9113 nm.
- `john`: Follows
[John (1988)](https://ui.adsabs.harvard.edu/abs/1988A%26A...193..189J/abstract),
which is valid beyond 9113 nm but may not be good below 364.5 nm.
"""
function α_hminus_ff(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity;
recipe::String="stilley"
)
σ = σ_hminus_ff(λ, temperature; recipe=recipe)
return σ * h_neutral_density * electron_density
end
"""
σ_hminus_bf(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity;
recipe::String="wbr"
)
Compute bound-free extinction from H minus ion. Recipe can be one of:
- `geltman` (default): Uses recipe from
[Geltman (1962)](https://ui.adsabs.harvard.edu/abs/1962ApJ...136..935G/abstract)
- `john`: Follows
[John (1988)](https://ui.adsabs.harvard.edu/abs/1988A%26A...193..189J/abstract),
which is valid beyond 9113 nm but may not be good below 364.5 nm.
- `wbr`: Follows
[Wishart (1979)](https://ui.adsabs.harvard.edu/abs/1979MNRAS.187P..59W) for λ > 175 nm,
and [Broad and Reinhardt (1976)](https://ui.adsabs.harvard.edu/abs/1976PhRvA..14.2159B)
for λ <= 164 nm.
"""
function σ_hminus_bf(λ::Unitful.Length, temperature::Unitful.Temperature;
recipe::String="wbr"
)
if recipe == "geltman"
σ = σ_hminus_bf_geltman(λ, temperature)
elseif recipe == "john"
σ = σ_hminus_bf_john(λ, temperature)
elseif recipe == "wbr"
σ = σ_hminus_bf_wbr(λ, temperature)
else
throw("NotImplemented recipe $recipe")
end
return σ
end
"""
α_hminus_bf(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity;
recipe::String="wbr"
)
Compute bound-free extinction from H minus ion. Recipe can be one of:
- `geltman`: Uses recipe from
[Geltman (1962)](https://ui.adsabs.harvard.edu/abs/1962ApJ...136..935G/abstract)
- `john`: Follows
[John (1988)](https://ui.adsabs.harvard.edu/abs/1988A%26A...193..189J/abstract),
which is valid beyond 9113 nm but may not be good below 364.5 nm.
- `wbr` (default): Follows
[Wishart (1979)](https://ui.adsabs.harvard.edu/abs/1979MNRAS.187P..59W) for λ > 175 nm,
and [Broad and Reinhardt (1976)](https://ui.adsabs.harvard.edu/abs/1976PhRvA..14.2159B)
for λ <= 164 nm.
"""
function α_hminus_bf(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity;
recipe::String="wbr"
)
σ = σ_hminus_bf(λ, temperature; recipe=recipe)
return σ * h_neutral_density * electron_density
end
#=----------------------------------------------------------------------------
Recipes from Karzas and Latter / Kurucz
----------------------------------------------------------------------------=#
#= Tabulated Gaunt factors from Kurucz (1970, SAO Special Report no. 309), page 77,
which is a fit to figures 3-5 of Karzas and Latter (1961, ApJ Suppl 6, 167)
- Note from Mats Carlsson in RH:
There is extrapolation outside the range of lg(gamma2) (T outside [1570,1.57e8K])
or outside range of lg(U) (for lambda=3.2mm for T>49 kK). This should be OK for
reasonable extrapolation distances (and better than setting to constant end value
or zero). Interpolation tested against table in Gustafsson (1973) with results
within 1%.
=#
const kurucz_ff_table = [5.53 5.49 5.46 5.43 5.40 5.25 5.00 4.69 4.48 4.16 3.85
4.91 4.87 4.84 4.80 4.77 4.63 4.40 4.13 3.87 3.52 3.27
4.29 4.25 4.22 4.18 4.15 4.02 3.80 3.57 3.27 2.98 2.70
3.64 3.61 3.59 3.56 3.54 3.41 3.22 2.97 2.70 2.45 2.20
3.00 2.98 2.97 2.95 2.94 2.81 2.65 2.44 2.21 2.01 1.81
2.41 2.41 2.41 2.41 2.41 2.32 2.19 2.02 1.84 1.67 1.50
1.87 1.89 1.91 1.93 1.95 1.90 1.80 1.68 1.52 1.41 1.30
1.33 1.39 1.44 1.49 1.55 1.56 1.51 1.42 1.33 1.25 1.17
0.90 0.95 1.00 1.08 1.17 1.30 1.32 1.30 1.20 1.15 1.11
0.55 0.58 0.62 0.70 0.85 1.01 1.15 1.18 1.15 1.11 1.08
0.33 0.36 0.39 0.46 0.59 0.76 0.97 1.09 1.13 1.10 1.08
0.19 0.21 0.24 0.28 0.38 0.53 0.76 0.96 1.08 1.09 1.09]
# table_y is log10(hν / kT)
const kurucz_ff_table_y = -4:0.5:1.5
# table_x is log10(3.28805e15 * (Z^2 h) / (kT))
const kurucz_ff_table_x = -3:0.5:2.0
const h_k = h / k_B
const hc_k = h * c_0 / k_B
const hminusχ = 0.754u"eV"
const saha_const = h^2 / (2 * π * m_e * k_B) # Constant for Saha equation
const αff_const = (4 / (3 * h * c_0) * (e^2 / (4 * π * ε_0))^3 *
sqrt(2 * π / (3 * m_e^3 * k_B))) |> u"K^(1/2) * m^5 / s^3"
const αbf_const = (4 * e^2 / (3 * π * sqrt(3) * ε_0 * m_e * c_0^2 * R_∞)) |> u"m^2"
const kurucz_ff_interp = linear_interpolation((kurucz_ff_table_y, kurucz_ff_table_x),
kurucz_ff_table, extrapolation_bc=Line())
"""
gaunt_ff(ν::Unitful.Frequency, temperature::Unitful.Temperature, charge::Int)
gaunt_ff(λ::Unitful.Length, temperature::Unitful.Temperature, charge::Int)
Compute Gaunt factor for free-free based on [Karzas and Latter (1961, ApJ Suppl 6, 167)]
(https://ui.adsabs.harvard.edu/abs/1961ApJS....6..167K/abstract)
fit in
[Kurucz (1970, SAO Special Report no. 309), page 77](https://ui.adsabs.harvard.edu/abs/1970SAOSR.309.....K/abstract)
"""
function gaunt_ff(ν::Unitful.Frequency, temperature::Unitful.Temperature, charge::Int)
lookup_y = log10(h_k * ν / temperature)
lookup_x = log10(3.28805e15u"Hz" * charge^2 * h_k / temperature)
return kurucz_ff_interp(lookup_y, lookup_x)::Float64
end
function gaunt_ff(λ::Unitful.Length, temperature::Unitful.Temperature, charge::Int)
return gaunt_ff(c_0 / λ, temperature, charge)
end
#=----------------------------------------------------------------------------
Recipes from Seaton
----------------------------------------------------------------------------=#
"""
gaunt_bf(charge::Int, n_eff::Number, λ::Unitful.Length)::Float64
Compute bound-free Gaunt factor for a given nuclear charge Z, effective principal
quantum number and wavelength λ. Taken from RH. Formula from
[Seaton (1960), Rep. Prog. Phys. 23, 313](https://ui.adsabs.harvard.edu/abs/1960RPPh...23..313S/abstract),
page 316.
"""
function gaunt_bf(λ::Unitful.Length, Z::Real, n_eff::Real)::Float64
x = ustrip(1 / (λ * R_∞ * Z^2) |> u"m/m")
x3 = x^(1/3)
nsqx = 1 / (n_eff^2 * x)
g_bf = 1 + 0.1728 * x3 * (1 - 2 * nsqx) - 0.0496 * x3^2 * (1 - (1 - nsqx) * 0.66666667 * nsqx)
@assert g_bf >= 0 "gaunt_bf negative, calculation will not be reliable"
return g_bf
end
function gaunt_bf(ν::Unitful.Frequency, Z::Real, n_eff::Real)::Float64
return gaunt_bf(c_0 / ν, Z, n_eff)
end
"""
n_eff(energy_upper::Unitful.Energy, energy_lower::Unitful.Energy, Z::Integer)
Compute the effective principal quantum number for a given energy difference
and nuclear charge Z.
"""
function n_eff(energy_upper::Unitful.Energy, energy_lower::Unitful.Energy, Z::Real)
return Z * sqrt(Ryh / (energy_upper - energy_lower))
end
#=----------------------------------------------------------------------------
Recipes from Stilley
----------------------------------------------------------------------------=#
const stilley_ff_λ = [0.0, 303.8, 455.6, 506.3, 569.5, 650.9, 759.4, 911.3,
1013.0, 1139.0, 1302.0, 1519.0, 1823.0, 2278.0, 3038.0,
4556.0, 9113.0] # in nm
const stilley_ff_t = 5040.0 ./ [0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2,
1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] # in K
const stilley_ff_table =
[[0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 #= 0.0 nm
=# 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00];
[3.44e-02 4.18e-02 4.91e-02 5.65e-02 6.39e-02 7.13e-02 7.87e-02 8.62e-02 #= 303.8 nm
=# 9.36e-02 1.01e-01 1.08e-01 1.16e-01 1.23e-01 1.30e-01 1.38e-01 1.45e-01];
[7.80e-02 9.41e-02 1.10e-01 1.25e-01 1.40e-01 1.56e-01 1.71e-01 1.86e-01 #= 455.6 nm
=# 2.01e-01 2.16e-01 2.31e-01 2.45e-01 2.60e-01 2.75e-01 2.89e-01 3.03e-01];
[9.59e-02 1.16e-01 1.35e-01 1.53e-01 1.72e-01 1.90e-01 2.08e-01 2.25e-01 #= 506.3 nm
=# 2.43e-01 2.61e-01 2.78e-01 2.96e-01 3.13e-01 3.30e-01 3.47e-01 3.64e-01];
[1.21e-01 1.45e-01 1.69e-01 1.92e-01 2.14e-01 2.36e-01 2.58e-01 2.80e-01 #= 569.5 nm
=# 3.01e-01 3.22e-01 3.43e-01 3.64e-01 3.85e-01 4.06e-01 4.26e-01 4.46e-01];
[1.56e-01 1.88e-01 2.18e-01 2.47e-01 2.76e-01 3.03e-01 3.31e-01 3.57e-01 #= 650.9 nm
=# 3.84e-01 4.10e-01 4.36e-01 4.62e-01 4.87e-01 5.12e-01 5.37e-01 5.62e-01];
[2.10e-01 2.53e-01 2.93e-01 3.32e-01 3.69e-01 4.06e-01 4.41e-01 4.75e-01 #= 759.4 nm
=# 5.09e-01 5.43e-01 5.76e-01 6.08e-01 6.40e-01 6.72e-01 7.03e-01 7.34e-01];
[2.98e-01 3.59e-01 4.16e-01 4.70e-01 5.22e-01 5.73e-01 6.21e-01 6.68e-01 #= 911.3 nm
=# 7.15e-01 7.60e-01 8.04e-01 8.47e-01 8.90e-01 9.32e-01 9.73e-01 1.01e+00];
[3.65e-01 4.39e-01 5.09e-01 5.75e-01 6.39e-01 7.00e-01 7.58e-01 8.15e-01 #= 1013.0 nm
=# 8.71e-01 9.25e-01 9.77e-01 1.03e+00 1.08e+00 1.13e+00 1.18e+00 1.23e+00];
[4.58e-01 5.50e-01 6.37e-01 7.21e-01 8.00e-01 8.76e-01 9.49e-01 1.02e+00 #= 1139.0 nm
=# 1.09e+00 1.15e+00 1.22e+00 1.28e+00 1.34e+00 1.40e+00 1.46e+00 1.52e+00];
[5.92e-01 7.11e-01 8.24e-01 9.31e-01 1.03e+00 1.13e+00 1.23e+00 1.32e+00 #= 1302.0 nm
=# 1.40e+00 1.49e+00 1.57e+00 1.65e+00 1.73e+00 1.80e+00 1.88e+00 1.95e+00];
[7.98e-01 9.58e-01 1.11e+00 1.25e+00 1.39e+00 1.52e+00 1.65e+00 1.77e+00 #= 1519.0 nm
=# 1.89e+00 2.00e+00 2.11e+00 2.21e+00 2.32e+00 2.42e+00 2.51e+00 2.61e+00];
[1.14e+00 1.36e+00 1.58e+00 1.78e+00 1.98e+00 2.17e+00 2.34e+00 2.52e+00 #= 1823.0 nm
=# 2.68e+00 2.84e+00 3.00e+00 3.15e+00 3.29e+00 3.43e+00 3.57e+00 3.70e+00];
[1.77e+00 2.11e+00 2.44e+00 2.75e+00 3.05e+00 3.34e+00 3.62e+00 3.89e+00 #= 2278.0 nm
=# 4.14e+00 4.39e+00 4.63e+00 4.86e+00 5.08e+00 5.30e+00 5.51e+00 5.71e+00];
[3.10e+00 3.71e+00 4.29e+00 4.84e+00 5.37e+00 5.87e+00 6.36e+00 6.83e+00 #= 3038.0 nm
=# 7.28e+00 7.72e+00 8.14e+00 8.55e+00 8.95e+00 9.33e+00 9.71e+00 1.01e+01];
[6.92e+00 8.27e+00 9.56e+00 1.08e+01 1.19e+01 1.31e+01 1.42e+01 1.52e+01 #= 4556.0 nm
=# 1.62e+01 1.72e+01 1.82e+01 1.91e+01 2.00e+01 2.09e+01 2.17e+01 2.25e+01];
[2.75e+01 3.29e+01 3.80e+01 4.28e+01 4.75e+01 5.19e+01 5.62e+01 6.04e+01 #= 9133.0 nm
=# 6.45e+01 6.84e+01 7.23e+01 7.60e+01 7.97e+01 8.32e+01 8.67e+01 9.01e+01]]
const stilley_ff_interp = linear_interpolation((stilley_ff_λ, stilley_ff_t[end:-1:1]),
stilley_ff_table[:, end:-1:1], extrapolation_bc=Line())
"""
σ_hminus_ff_stilley(λ::Unitful.Length, temperature::Unitful.Temperature)
Compute free-free cross section coefficient from H minus ion, for a given wavelength
and temperature. Units are m^5, needs to be multiplied by electron density and
density of neutral hydrogen atoms to obtain linear extinction. Interpolates table from
[Stilley & Callaway (1970)](https://ui.adsabs.harvard.edu/abs/1970ApJ...160..245S/abstract),
page 255, which is valid for λ up to 9113 nm.
"""
function σ_hminus_ff_stilley(λ::Unitful.Length, temperature::Unitful.Temperature)
λi = ustrip(λ |> u"nm") # convert to units of table
temp = ustrip(temperature |> u"K")
kappa = max(0.0, stilley_ff_interp(λi, temp)::Float64) * 1e-29u"m^4/N"
return k_B * temperature * kappa |> u"m^5"
end
"""
α_hminus_ff_stilley(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity
)
Compute free-free extinction from H minus ion, for a given wavelength, temperature,
density of neutral hydrogen atoms `h_neutral_density`, and electron density.
Based on `σ_hminus_ff_stilley`.
"""
function α_hminus_ff_stilley(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity
)
σ = σ_hminus_ff_stilley(λ, temperature)
return σ * h_neutral_density * electron_density |> u"m^-1"
end
#=----------------------------------------------------------------------------
Recipes from Geltman
----------------------------------------------------------------------------=#
const geltman_bf_λ = [ 0.0, 50.0, 100.0, 150.0, 200.0, 250.0, 300.0,
350.0, 400.0, 450.0, 500.0, 550.0, 600.0, 650.0,
700.0, 750.0, 800.0, 850.0, 900.0, 950.0, 1000.0,
1050.0, 1100.0, 1150.0, 1200.0, 1250.0, 1300.0, 1350.0,
1400.0, 1450.0, 1500.0, 1550.0, 1600.0, 1641.9] # in nm
const geltman_bf_σ = [0.00, 0.15, 0.33, 0.57, 0.85, 1.17, 1.52, 1.89, 2.23,
2.55, 2.84, 3.11, 3.35, 3.56, 3.71, 3.83, 3.92, 3.95,
3.93, 3.85, 3.73, 3.58, 3.38, 3.14, 2.85, 2.54, 2.20,
1.83, 1.46, 1.06, 0.71, 0.40, 0.17, 0.0] # in 1e-21 m^2
const geltman_bf_interp = linear_interpolation(geltman_bf_λ, geltman_bf_σ, extrapolation_bc=0)
"""
σ_hminus_bf_geltman(λ::Unitful.Length, temperature::Unitful.Temperature)
Compute bound-free cross section from H minus ion. Uses recipe from
[Geltman (1962)](https://ui.adsabs.harvard.edu/abs/1962ApJ...136..935G/abstract)
Units are m^5, needs to be multiplied by density of neutral H atoms and electron
density to obtain linear extinction.
"""
function σ_hminus_bf_geltman(λ::Unitful.Length, temperature::Unitful.Temperature)
λi = ustrip(λ |> u"nm") # convert to units of table
stimulated_emission = exp(-hc_k / (λ * temperature))
# Get H- fraction to convert from σ per H- atom to σ per H atom per electron
hminus_frac = calc_hminus_density(1.0u"m^-3", temperature, 1.0u"m^-3") * u"m^6"
σ = geltman_bf_interp(λi)::Float64 * 1e-21u"m^2" *
(1 - stimulated_emission) * hminus_frac
return σ
end
"""
α_hminus_bf_geltman(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_minus_density::NumberDensity
)
α_hminus_bf_geltman(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity
)
Compute extinction from H minus ion, from input H minus populations.
Uses recipe from
[Geltman (1962)](https://ui.adsabs.harvard.edu/abs/1962ApJ...136..935G/abstract)
"""
function α_hminus_bf_geltman(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_minus_density::NumberDensity
)
σ = σ_hminus_bf_geltman(λ, temperature)
λi = ustrip(λ |> u"nm") # convert to units of table
stimulated_emission = exp(-hc_k / (λ * temperature))
σ = geltman_bf_interp(λi)::Float64 * 1e-21u"m^2" * (1 - stimulated_emission)
return σ * h_minus_density
end
function α_hminus_bf_geltman(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity
)
h_minus_density = calc_hminus_density(h_neutral_density, temperature, electron_density)
return α_hminus_bf_geltman(λ, temperature, h_minus_density)
end
"""
calc_hminus_density(
h_neutral_density::NumberDensity,
temperature::Unitful.Temperature,
electron_density::NumberDensity
)
Compute H minus populations based on electron density, temperature, and
density of neutral hydrogen atoms.
"""
function calc_hminus_density(
h_neutral_density::NumberDensity,
temperature::Unitful.Temperature,
electron_density::NumberDensity
)
tmp = (ustrip(saha_const) / ustrip(temperature |> u"K"))^(3/2) * u"m^3"
ϕ = tmp * exp(hminusχ / (k_B * temperature)) / 4
return h_neutral_density * electron_density * ϕ
end
#=----------------------------------------------------------------------------
Recipes from John
----------------------------------------------------------------------------=#
"""
σ_hminus_ff_john(λ::Unitful.Length, temperature::Unitful.Temperature)
Compute free-free cross section coefficient from H minus ion, for a given wavelength
and temperature. Units are m^5, needs to be multiplied by electron density and
density of neutral hydrogen atoms to obtain linear extinction. Uses recipe from
[John (1988)](https://ui.adsabs.harvard.edu/abs/1988A%26A...193..189J/abstract),
which is valid beyond 9113 nm but may not be good below 364.5 nm.
Includes stimulated emission.
"""
function σ_hminus_ff_john(λ::Unitful.Length, temperature::Unitful.Temperature)
λμ = ustrip(λ |> u"μm")
if λμ > 0.3645
table =
SA[ 0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
2483.3460 285.8270 -2054.2910 2827.7760 -1341.5370 208.9520
-3449.8890 -1158.3820 8746.5230 -11485.6320 5303.6090 -812.9390
2200.0400 2427.7190 -13651.1050 16755.5240 -7510.4940 1132.7380
-696.2710 -1841.4000 8624.9700 -10051.5300 4400.0670 -655.0200
88.2830 444.5170 -1863.8640 2095.2880 -901.7880 132.9850]
else
table =
SA[ 518.1021 -734.8666 1021.1775 -479.0721 93.1373 -6.4285
473.2636 1443.4137 -1977.3395 922.3575 -178.9275 12.3600
-482.2089 -737.1616 1096.8827 -521.1341 101.7963 -7.0571
115.5291 169.6374 -245.6490 114.2430 -21.9972 1.5097
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000
0.0000 0.0000 0.0000 0.0000 0.0000 0.0000]
end
sqrtθ = sqrt((5040.0u"K" / temperature) |> u"K/K")
λinv = 1.0 / λμ
κ = 0.0
for i in 1:6
κ += sqrtθ^(1 + i) * (λμ^2 * table[i, 1] + table[i, 2] +
λinv * (table[i, 3] + λinv * (table[i, 4] +
λinv * (table[i, 5] + λinv * table[i, 6]))))
end
κ = max(0.0, κ) # ensure no extrapolation to negative values
return κ * 1e-32u"m^4/N" * k_B * temperature |> u"m^5"
end
"""
α_hminus_ff_john(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity
)
Compute free-free extinction from H minus ion. Based on `σ_hminus_ff_john`.
"""
function α_hminus_ff_john(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity
)
σ = σ_hminus_ff_john(λ, temperature)
# NOTE: in RH temperature from electron pressure is set to 5040 K!
# Also, RH uses sometimes nHminus = nH * pe, other times atmos.nHmin...
return σ * h_neutral_density * electron_density
end
"""
σ_hminus_bf_john(λ::Unitful.Length, temperature::Unitful.Temperature)
Compute free-free cross section coefficient from H minus ion, for a given wavelength
and temperature. Units are m^5, needs to be multiplied by electron density and
density of neutral hydrogen atoms to obtain linear extinction. Uses recipe from
[John (1988)](https://ui.adsabs.harvard.edu/abs/1988A%26A...193..189J/abstract),
which is valid for 0.125 <= λ (μm) <= 1.6419. Seems to already include stimulated
emission.
"""
function σ_hminus_bf_john(λ::Unitful.Length, temperature::Unitful.Temperature)
table = SA[152.519, 49.534, -118.858, 92.536, -34.194, 4.982]
λμ = ustrip(λ |> u"μm")
λ0 = ustrip(1.6419u"μm")
λ1 = ustrip(0.125u"μm") # edge wavelength when approximation for fλ no longer valid
temp = ustrip(temperature |> u"K")
λidiff = max(0.0, 1.0 / λμ - 1.0 / λ0) # cases beyond λ0 set to zero
σλ = 1e-18 * λμ^3 * λidiff^1.5
fλ = 0.0
if λμ < λ1
λidiff = 1.0 / λ1 - 1.0 / λ0
end
for n in 1:6
fλ += table[n] * λidiff^((n-1)/2)
end
σλ *= fλ
α = h_k * c_0
κ = 0.750 * sqrt(temp)^-5 * exp(α / (λ0*u"μm" * temperature)) *
(1 - exp(-α / (λ * temperature))) * σλ * 1e-3u"m^4/N"
return κ * k_B * temperature |> u"m^5"
end
"""
α_hminus_bf_john(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity
)
Compute extinction from H minus ion. Based on `σ_hminus_bf_john`.
"""
function α_hminus_bf_john(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity
)
σ = σ_hminus_bf_john(λ, temperature)
return σ * h_neutral_density * electron_density
end
#=----------------------------------------------------------------------------
Recipes from Wishart (1979) and Broad and Reinhardt (1976)
----------------------------------------------------------------------------=#
const wbr_λ = [ 18, 19.6, 21.4, 23.6, 26.4, 29.8, 34.3, 40.4, 49.1, 62.6, 121, 139,
164, 175, 200, 225, 250, 275, 300, 325, 350, 375, 400, 425,
450, 475, 500, 525, 550, 575, 600, 625, 650, 675, 700, 725,
750, 775, 800, 825, 850, 875, 900, 925, 950, 975, 1000, 1025,
1050, 1075, 1100, 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300, 1325,
1350, 1375, 1400, 1425, 1450, 1475, 1500, 1525, 1550, 1575, 1600, 1610,
1620, 1630] # in nm
const wbr_σ = [0.067, 0.088, 0.117, 0.155, 0.206, 0.283, 0.414, 0.703, 1.24, 2.33,
5.43, 5.91, 7.29, 7.918, 9.453, 11.08, 12.75, 14.46, 16.19, 17.92,
19.65, 21.35, 23.02, 24.65, 26.24, 27.77, 29.23, 30.62, 31.94, 33.17,
34.32, 35.37, 36.32, 37.17, 37.91, 38.54, 39.07, 39.48, 39.77, 39.95,
40.01, 39.95, 39.77, 39.48, 39.06, 38.53, 37.89, 37.13, 36.25, 35.28,
34.19, 33.01, 31.72, 30.34, 28.87, 27.33, 25.71, 24.02, 22.26, 20.46,
18.62, 16.74, 14.85, 12.95, 11.07, 9.211, 7.407, 5.677, 4.052, 2.575,
1.302, 0.8697, 0.4974, 0.1989] # in 1e-22 m^2
const wbr_bf_interp = linear_interpolation(wbr_λ, wbr_σ, extrapolation_bc=0)
"""
σ_hminus_bf_wbr(λ::Unitful.Length, temperature::Unitful.Temperature)
Compute cross section coefficient for bound-free from H minus ion (units m^5).
Needs to be multiplied by density of neutral hydrogen atoms and electron density
to obtain linear extinction. Uses recipe from
[Wishart (1979)](https://ui.adsabs.harvard.edu/abs/1979MNRAS.187P..59W) for λ down to 175 nm,
and recipe from [Broad and Reinhardt (1976)](https://ui.adsabs.harvard.edu/abs/1976PhRvA..14.2159B)
for λ=164 nm and below, following the recommendation from Mathisen (1984, MSC thesis).
"""
function σ_hminus_bf_wbr(λ::Unitful.Length, temperature::Unitful.Temperature)
λi = ustrip(λ |> u"nm") # convert to units of table
κ = wbr_bf_interp(λi)::Float64 * 1e-22u"m^2"
stimulated_emission = exp(-hc_k / (λ * temperature))
# Get H- fraction to convert from σ per H- atom to σ per H atom per electron
hminus_frac = calc_hminus_density(1.0u"m^-3", temperature, 1.0u"m^-3") * u"m^6"
return κ * (1 - stimulated_emission) * hminus_frac
end
"""
α_hminus_bf_wbr(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_minus_density::NumberDensity
)
α_hminus_bf_wbr(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity
)
Compute extinction from H minus ion, from input H minus populations. Uses recipe from
[Wishart (1979)](https://ui.adsabs.harvard.edu/abs/1979MNRAS.187P..59W) for λ down to 175 nm,
and recipe from [Broad and Reinhardt (1976)](https://ui.adsabs.harvard.edu/abs/1976PhRvA..14.2159B)
for λ=164 nm and below, following the recommendation from Mathisen (1984, MSC thesis).
"""
function α_hminus_bf_wbr(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_minus_density::NumberDensity
)
λi = ustrip(λ |> u"nm") # convert to units of table
κ = wbr_bf_interp(λi)::Float64 * 1e-22u"m^2"
stimulated_emission = exp(-hc_k / (λ * temperature))
return κ * h_minus_density * (1 - stimulated_emission)
end
function α_hminus_bf_wbr(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
electron_density::NumberDensity
)
h_minus_density = calc_hminus_density(h_neutral_density, temperature, electron_density)
return α_hminus_bf_wbr(λ, temperature, h_minus_density)
end
#=----------------------------------------------------------------------------
Recipes from Mihalas
----------------------------------------------------------------------------=#
"""
σ_hydrogenic_ff(
ν::Unitful.Frequency,
charge::Real
)
Compute free-free cross section (units m^5) for a hydrogen-like species.
Following Mihalas (1978) p. 101 and
[Rutten's IART](https://www.uio.no/studier/emner/matnat/astro/AST4310/h20/pensumliste/iart.pdf)
p 69. For linear extinction, needs to be multiplied by electron density
and species density. Includes stimulated emission.
Will not work in single-precision.
"""
function σ_hydrogenic_ff(
ν::Unitful.Frequency,
temperature::Unitful.Temperature,
charge::Real
)
ν = ν |> u"s^-1"
stimulated_emission = exp(-h_k * ν / temperature)
return (αff_const * charge^2 / sqrt(temperature) * ν^-3 *
gaunt_ff(ν, temperature, charge)) * (1 - stimulated_emission)
end
"""
α_hydrogenic_ff(
ν::Unitful.Frequency,
temperature::Unitful.Temperature,
electron_density::NumberDensity,
species_density::NumberDensity,
charge::Int
)
Compute free-free linear extinction using `hydrogenic_ff_σ`.
For the hydrogen case, `ion_density` is the proton density (H II).
"""
function α_hydrogenic_ff(
ν::Unitful.Frequency,
temperature::Unitful.Temperature,
electron_density::NumberDensity,
species_density::NumberDensity,
charge::Real
)
α = σ_hydrogenic_ff(ν, temperature, charge) * electron_density * species_density
return α
end
"""
σ_hydrogenic_bf(
ν::Unitful.Frequency,
ν_edge::Unitful.Frequency,
temperature::Unitful.Temperature,
species_density::NumberDensity,
charge::Real,
n_eff::AbstractFloat
)
Compute bound-free cross section for a hydrogen-like species in m^2. Multiply
by number density of species to obtain linear extinction. Following Mihalas (1978) p. 99 and
[Rutten's IART](https://www.uio.no/studier/emner/matnat/astro/AST4310/h20/pensumliste/iart.pdf)
p. 70. Using simplified constant and expression for threshold cross section.
"""
function σ_hydrogenic_bf(
ν::Unitful.Frequency,
ν_edge::Unitful.Frequency,
temperature::Unitful.Temperature,
charge::Real,
n_eff::AbstractFloat
)
if ν < ν_edge
return 0 * αbf_const
else
ν3_ratio = (ν_edge / ν)^3
stimulated_emission = exp(-h_k * ν / temperature)
return (αbf_const * charge^4 * ν3_ratio * n_eff *
(1 - stimulated_emission) * gaunt_bf(ν, charge, n_eff))
end
end
"""
α_hydrogenic_bf(
ν::Unitful.Frequency,
ν_edge::Unitful.Frequency,
temperature::Unitful.Temperature,
species_density::NumberDensity,
charge::Real,
n_eff::AbstractFloat
)
Compute bound-free extinction for a hydrogen-like species.
Based on `σ_hydrogenic_bf`.
"""
function α_hydrogenic_bf(
ν::Unitful.Frequency,
ν_edge::Unitful.Frequency,
temperature::Unitful.Temperature,
species_density::NumberDensity,
charge::Real,
n_eff::AbstractFloat
)
σ = σ_hydrogenic_bf(ν, ν_edge, temperature, charge, n_eff)
return σ * species_density
end
"""
σ_hydrogenic_bf_scaled(
σ0::Unitful.Area,
ν::Unitful.Frequency,
ν_edge::Unitful.Frequency,
charge::Real,
n_eff::AbstractFloat
)
σ_hydrogenic_bf_scaled(
σ0::Unitful.Area,
λ::Unitful.Length,
λ_edge::Unitful.Length,
charge::Real,
n_eff::AbstractFloat
)
Compute bound-free cross section for a hydrogen-like species by scaling
a peak cross section σ0 with frequency and the appropriate Gaunt factor.
No stimulated emission is added.
"""
function σ_hydrogenic_bf_scaled(
σ0::Unitful.Area,
ν::Unitful.Frequency,
ν_edge::Unitful.Frequency,
charge::Real,
n_eff::AbstractFloat
)
if ν < ν_edge
σ = 0 * σ0
else
σ = σ0 * (ν_edge / ν)^3 * (
gaunt_bf(ν, charge, n_eff) / gaunt_bf(ν_edge, charge, n_eff))
end
return σ
end
function σ_hydrogenic_bf_scaled(
σ0::Unitful.Area,
λ::Unitful.Length,
λ_edge::Unitful.Length,
charge::Real,
n_eff::AbstractFloat
)
return σ_hydrogenic_bf_scaled(σ0, c_0 / λ, c_0 / λ_edge, charge, n_eff)
end
#=----------------------------------------------------------------------------
Recipes from Bell (1980)
----------------------------------------------------------------------------=#
const bell_ff_λ = [ 0.0, 350.5, 414.2, 506.3, 569.6, 650.9, 759.4,
911.3, 1139.1, 1518.8, 1822.6, 2278.3, 3037.7, 3645.2,
4556.5, 6075.3, 9113.0, 11391.3, 15188.3] # in nm
const bell_ff_t = 5040.0 ./ [0.5, 0.8, 1.0, 1.2, 1.6, 2.0, 2.8, 3.6] # in K
const bell_ff_κ =
[0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00 0.00e+00
4.17e-02 6.10e-02 7.34e-02 8.59e-02 1.11e-01 1.37e-01 1.87e-01 2.40e-01
5.84e-02 8.43e-02 1.01e-01 1.17e-01 1.49e-01 1.82e-01 2.49e-01 3.16e-01
8.70e-02 1.24e-01 1.46e-01 1.67e-01 2.10e-01 2.53e-01 3.39e-01 4.27e-01
1.10e-01 1.54e-01 1.80e-01 2.06e-01 2.55e-01 3.05e-01 4.06e-01 5.07e-01
1.43e-01 1.98e-01 2.30e-01 2.59e-01 3.17e-01 3.75e-01 4.92e-01 6.09e-01
1.92e-01 2.64e-01 3.03e-01 3.39e-01 4.08e-01 4.76e-01 6.13e-01 7.51e-01
2.73e-01 3.71e-01 4.22e-01 4.67e-01 5.52e-01 6.33e-01 7.97e-01 9.63e-01
4.20e-01 5.64e-01 6.35e-01 6.97e-01 8.06e-01 9.09e-01 1.11e+00 1.32e+00
7.36e-01 9.75e-01 1.09e+00 1.18e+00 1.34e+00 1.48e+00 1.74e+00 2.01e+00
1.05e+00 1.39e+00 1.54e+00 1.66e+00 1.87e+00 2.04e+00 2.36e+00 2.68e+00
1.63e+00 2.14e+00 2.36e+00 2.55e+00 2.84e+00 3.07e+00 3.49e+00 3.90e+00
2.89e+00 3.76e+00 4.14e+00 4.44e+00 4.91e+00 5.28e+00 5.90e+00 6.44e+00
4.15e+00 5.38e+00 5.92e+00 6.35e+00 6.99e+00 7.50e+00 8.32e+00 9.02e+00
6.47e+00 8.37e+00 9.20e+00 9.84e+00 1.08e+01 1.16e+01 1.28e+01 1.38e+01
1.15e+01 1.48e+01 1.63e+01 1.74e+01 1.91e+01 2.04e+01 2.24e+01 2.40e+01
2.58e+01 3.33e+01 3.65e+01 3.90e+01 4.27e+01 4.54e+01 4.98e+01 5.33e+01
4.03e+01 5.20e+01 5.70e+01 6.08e+01 6.65e+01 7.08e+01 7.76e+01 8.30e+01
7.16e+01 9.23e+01 1.01e+02 1.08e+02 1.18e+02 1.26e+02 1.38e+02 1.47e+02]
# Linear extrapolation in λ, flat extrapolation in θ
const bell_ff_interp = linear_interpolation((bell_ff_λ, bell_ff_t[end:-1:1]),
bell_ff_κ[:, end:-1:1], extrapolation_bc=(Line(), Flat()))
"""
σ_h2minus_ff(λ::Unitful.Length, temperature::Unitful.Temperature)
Compute free-free cross section coefficient from H2^- molecule. Units are m^5,
needs to be multiplied by electron density anddensity of neutral hydrogen atoms
to obtain linear extinction. Follows recipe from
[Bell (1980)](https://ui.adsabs.harvard.edu/abs/1980JPhB...13.1859B/abstract),
page 1863. Stimulated emission is included.
"""
function σ_h2minus_ff(λ::Unitful.Length, temperature::Unitful.Temperature)
λi = ustrip(λ |> u"nm") # convert to units of table
temp = ustrip(temperature |> u"K")
κ = bell_ff_interp(λi, temp)::Float64 * 1e-29u"m^4/N"
return κ * k_B * temperature |> u"m^5"
end
"""
α_h2minus_ff(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h2_density::NumberDensity,
electron_density::NumberDensity
)
Compute extinction from H2^- molecule. Based on `σ_h2minus_ff`.
"""
function α_h2minus_ff(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h2_density::NumberDensity,
electron_density::NumberDensity
)
σ = σ_h2minus_ff(λ, temperature)
return σ * h2_density * electron_density |> u"m^-1"
end
#=----------------------------------------------------------------------------
Recipes from Bates (1952)
----------------------------------------------------------------------------=#
const bates_λ = 1f7 ./ [ 500, 1000, 1500, 2000, 2500, 3000,
3500, 4000, 5000, 6000, 7000, 8000,
9000, 10_000, 12_000, 14_000, 16_000, 18_000,
20_000, 22_000, 24_000, 26_000, Inf32] # in nm
const bates_t = [2.5f+03, 3.0f+03, 3.5f+03, 4.0f+03, 5.0f+03,
6.0f+03, 7.0f+03, 8.0f+03, 1.0f+04, 1.2f+04] # in K
# H2plus bf + ff extinction coefficient in 1e-49 m^5
const bates_κ = convert(Array{Float32, 2},
[ 1.14 0.94 0.80 0.69 0.55 0.46 0.39 0.34 0.27 0.226
1.61 1.32 1.12 0.97 0.77 0.63 0.54 0.47 0.38 0.31
1.97 1.60 1.35 1.17 0.92 0.76 0.64 0.56 0.44 0.37
2.28 1.83 1.53 1.32 1.03 0.85 0.72 0.63 0.50 0.41
2.56 2.04 1.69 1.45 1.13 0.92 0.78 0.68 0.54 0.44
2.84 2.23 1.84 1.56 1.21 0.99 0.83 0.72 0.57 0.47
3.1 2.42 1.97 1.67 1.28 1.04 0.88 0.76 0.60 0.49
3.4 2.60 2.10 1.77 1.35 1.09 0.92 0.79 0.62 0.51
4.0 2.98 2.36 1.96 1.47 1.17 0.98 0.84 0.66 0.54
4.8 3.4 2.63 2.15 1.57 1.25 1.04 0.89 0.69 0.57
5.6 3.9 2.91 2.33 1.67 1.31 1.08 0.92 0.71 0.58
6.7 4.4 3.2 2.53 1.77 1.37 1.12 0.95 0.73 0.59
7.9 5.0 3.5 2.74 1.87 1.43 1.16 0.97 0.74 0.60
9.3 5.6 3.9 2.95 1.97 1.48 1.19 0.99 0.75 0.61
13.0 7.2 4.7 3.4 2.18 1.58 1.25 1.03 0.77 0.62
18.1 9.3 5.7 4.0 2.40 1.69 1.30 1.06 0.78 0.62
25.1 11.9 7.0 4.7 2.64 1.80 1.36 1.09 0.78 0.62
35. 15.2 8.4 5.4 2.91 1.91 1.41 1.11 0.79 0.61
47. 19.3 10.2 6.3 3.2 2.03 1.46 1.14 0.79 0.61
64. 24.3 12.2 7.3 3.5 2.16 1.52 1.16 0.79 0.60
86. 31. 14.6 8.4 3.8 2.29 1.57 1.18 0.79 0.59
114. 38. 17.3 9.6 4.2 2.42 1.63 1.21 0.79 0.58
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ])
const bates_bf_fraction = convert(Array{Float32, 2},
[0.059 0.046 0.037 0.031 0.022 0.017 0.014 0.011 0.008 0.006
0.135 0.107 0.087 0.072 0.053 0.041 0.033 0.027 0.020 0.015
0.214 0.171 0.141 0.118 0.088 0.069 0.056 0.046 0.034 0.026
0.291 0.236 0.196 0.166 0.125 0.098 0.080 0.067 0.049 0.038
0.363 0.298 0.250 0.214 0.162 0.129 0.105 0.088 0.065 0.050
0.430 0.357 0.303 0.260 0.200 0.159 0.131 0.110 0.082 0.064
0.490 0.413 0.353 0.305 0.237 0.190 0.157 0.132 0.099 0.077
0.546 0.464 0.400 0.349 0.273 0.221 0.183 0.155 0.116 0.091
0.640 0.556 0.486 0.429 0.342 0.280 0.234 0.200 0.151 0.120
0.715 0.632 0.561 0.501 0.406 0.336 0.284 0.243 0.186 0.148
0.775 0.696 0.625 0.564 0.464 0.388 0.331 0.285 0.220 0.176
0.822 0.748 0.680 0.619 0.517 0.437 0.375 0.326 0.254 0.204
0.859 0.792 0.727 0.667 0.564 0.482 0.417 0.364 0.286 0.232
0.888 0.827 0.767 0.709 0.607 0.524 0.456 0.400 0.317 0.258
0.929 0.881 0.829 0.777 0.680 0.597 0.526 0.467 0.375 0.309
0.954 0.917 0.874 0.829 0.739 0.658 0.586 0.525 0.428 0.356
0.970 0.942 0.906 0.867 0.786 0.708 0.638 0.577 0.476 0.400
0.980 0.959 0.930 0.897 0.824 0.751 0.683 0.622 0.519 0.440
0.987 0.970 0.947 0.919 0.854 0.786 0.721 0.661 0.558 0.476
0.991 0.978 0.960 0.936 0.879 0.816 0.754 0.695 0.593 0.510
0.994 0.984 0.969 0.949 0.898 0.841 0.782 0.726 0.625 0.541
0.996 0.988 0.976 0.959 0.914 0.862 0.806 0.752 0.653 0.569
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ])
const bates_ff_fraction = 1 .- bates_bf_fraction
const bates_ff_κ = bates_κ .* bates_ff_fraction
const bates_bf_κ = bates_κ .* bates_bf_fraction
const bates_ff_interp = linear_interpolation((bates_λ[end:-1:1], bates_t),
bates_ff_κ[end:-1:1, :], extrapolation_bc=(Line(), Flat()))
const bates_bf_interp = linear_interpolation((bates_λ[end:-1:1], bates_t),
bates_bf_κ[end:-1:1, :], extrapolation_bc=(Line(), Flat()))
"""
σ_h2plus_ff(λ::Unitful.Length, temperature::Unitful.Temperature)
Compute free-free cross section coefficient from H2plus molecule, according to recipe
from [Bates (1952)](https://ui.adsabs.harvard.edu/abs/1952MNRAS.112...40B/abstract),
page 43.
"""
function σ_h2plus_ff(λ::Unitful.Length, temperature::Unitful.Temperature)
λi = convert(Float32, ustrip(λ |> u"nm")) # convert to units of table
temp = convert(Float32, ustrip(temperature |> u"K"))
σ = bates_ff_interp(λi, temp)::Float32 * u"m^5" * 1e-49
return σ
end
"""
α_h2plus_ff(λ::Unitful.Length, temperature::Unitful.Temperature,
h_neutral_density::NumberDensity, proton_density::NumberDensity)
Compute free-free extinction from H2plus molecule. Based on `σ_h2plus_ff`.
"""
function α_h2plus_ff(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
proton_density::NumberDensity
)
σ = σ_h2plus_ff(λ, temperature)
return σ * h_neutral_density * proton_density
end
"""
σ_h2plus_bf(λ::Unitful.Length, temperature::Unitful.Temperature)
Compute bound-free extinction from H2plus molecule, according to recipe from
[Bates (1952)](https://ui.adsabs.harvard.edu/abs/1952MNRAS.112...40B/abstract),
page 43.
"""
function σ_h2plus_bf(λ::Unitful.Length, temperature::Unitful.Temperature)
λi = convert(Float32, ustrip(λ |> u"nm")) # convert to units of table
temp = convert(Float32, ustrip(temperature |> u"K"))
# Table in 1e-49 m^5
return bates_bf_interp(λi, temp)::Float32 * u"m^5" * 1e-49
end
"""
h2plus_bf(λ::Unitful.Length, temperature::Unitful.Temperature,
h_neutral_density::NumberDensity, proton_density::NumberDensity)
Compute bound-free extinction from H2plus molecule, based on `σ_h2plus_bf`.
"""
function α_h2plus_bf(
λ::Unitful.Length,
temperature::Unitful.Temperature,
h_neutral_density::NumberDensity,
proton_density::NumberDensity
)
σ = σ_h2plus_bf(λ, temperature)
return σ * h_neutral_density * proton_density
end
#=----------------------------------------------------------------------------
Recipes from Victor & Dalgarno (1969)
----------------------------------------------------------------------------=#
const victor_h2_λ = SA[121.57, 130.00, 140.00, 150.00, 160.00, 170.00, 185.46,
186.27, 193.58, 199.05, 230.29, 237.91, 253.56, 275.36,
296.81, 334.24, 404.77, 407.90, 435.96, 546.23, 632.80] # in nm
const victor_h2_σ = SA[2.35E-28, 1.22E-28, 6.80E-29, 4.24E-29, 2.84E-29, 2.00E-29,
1.25E-29, 1.22E-29, 1.00E-29, 8.70E-30, 4.29E-30, 3.68E-30,
2.75E-30, 1.89E-30, 1.36E-30, 8.11E-31, 3.60E-31, 3.48E-31,
2.64E-31, 1.04E-31, 5.69E-32] # in m2
const victor_h2_interp = linear_interpolation(victor_h2_λ, victor_h2_σ, extrapolation_bc=Line())
"""
function σ_rayleigh_h2(λ::Unitful.Length)
Compute cross section from Rayleigh scattering from H2 molecules. Uses recipe from
[Victor and Dalgarno (1969)](https://aip.scitation.org/doi/pdf/10.1063/1.1671412),
J. Chem. Phys. 50, 2535, page 2538 for λ <= 632.80, and the recipe from
[Tarafdar & Vardya (1973)](https://ui.adsabs.harvard.edu/abs/1973MNRAS.163..261T/abstract)
page 272, for λ > 632.80.
"""
function σ_rayleigh_h2(λ::Unitful.Length)
λi = ustrip(λ |> u"nm")
if λi >= victor_h2_λ[1]
if λi <= victor_h2_λ[end]
σ_h2 = victor_h2_interp(λi)::Float64 * u"m^2"
else
λ2 =1 / λi^2
# Tarafdar coeffs converted from λ^4, λ^6, λ^8 in Å to nm and cm^2 to m^2:
σ_h2 = (8.779e-21 + (1.323e-16 + 2.245e-12 * λ2) * λ2) * λ2^2 * u"m^2"
end
else
σ_h2 = 0.0u"m^2"
end
return σ_h2
end
"""
function α_rayleigh_h2(λ::Unitful.Length, h2_density::NumberDensity)
Compute extinction from Rayleigh scattering from H2 molecules. Based on `σ_rayleigh_h2`.
"""
function α_rayleigh_h2(λ::Unitful.Length, h2_density::NumberDensity)
σ_h2 = σ_rayleigh_h2(λ)
return σ_h2 * h2_density
end
#=----------------------------------------------------------------------------
Recipes from Dalgarno (1962)
----------------------------------------------------------------------------=#
"""
σ_rayleigh_h(λ::Unitful.Length)
Compute cross section from Rayleigh scattering from neutral H atoms. To obtain extinction,
multiply by the number density of neutral hydrogen atoms.
Uses recipe from Dalgarno (1962), Geophysics Corp. of America, Technical Report No. 62-28-A
(unavailable), which is accurate to 1% for λ > 125.0 nm.
"""
function σ_rayleigh_h(λ::Unitful.Length)
λi = ustrip(λ |> u"Å")
if λi >= 1215.7
λ2 = 1 / λi^2
# First coefficient has conversion from Mbarn to m^2. From RH:
σ_h = (5.81e-17 * λ2^2 * (1 + 2.452e6 * λ2 + 4.801e12 * λ2^2)) * u"m^2"
else
σ_h = 0.0u"m^2"
end
return σ_h
end
"""
α_rayleigh_h(λ::Unitful.Length, h_neutral_density::NumberDensity)
Compute extinction from Rayleigh scattering from neutral H atoms.
"""
function α_rayleigh_h(λ::Unitful.Length, h_neutral_density::NumberDensity)
σ_h = σ_rayleigh_h(λ)
return σ_h * h_neutral_density
end
| Transparency | https://github.com/tiagopereira/Transparency.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.2 | 870b7b286b2114e1b051860e55f6ca7ee7a39b83 | code | 2061 | """
Computes line extinction and associated quantities.
"""
"""
function calc_Aul(λ0::Unitful.Length, g_ratio::Real, f_value::AbstractFloat)
Compute the spontaneous deexcitation rate \$A_{ul}\$ (natural broadening)
for a bound-bound transition, using the SI expression *per wavelength*:
\$\$
A_{ul} = \\frac{2\\pi e^2}{\\varepsilon_0 m_e c} \\frac{g_l}{g_u} \\frac{f_{lu}}{\\lambda^2}
\$\$
for a given rest wavelength `λ0`, ration between statistical weights of lower and
upper levels (`g_ratio` = gl / gu), and `f_value` .
"""
function calc_Aul(λ0::Unitful.Length, g_ratio::Real, f_value::AbstractFloat)
(2π * e^2 / (ε_0 * m_e * c_0) * g_ratio * f_value / λ0^2) |> u"s^-1"
end
"""
function calc_Bul(λ0::Unitful.Length, Aul::Unitful.Frequency)
Compute the induced deexcitation rate \$B_{ul}\$ for a bound-bound transition,
using the SI expression *per wavelength*:
\$\$
B_{ul} = \\frac{\\lambda^5}{2 h c} A_{ul}
\$\$
for a given rest wavelength `λ0`, and spontaneous deexcitation rate `Aul.`
"""
calc_Bul(λ0::Unitful.Length, Aul::Unitful.Frequency) = (λ0^5 * Aul / (2h * c_0^2)) |> u"m^3 / J"
"""
If input is in wavenumber, convert to energy. Otherwise keep as energy.
"""
function wavenumber_to_energy(a::Quantity{T}) where T <: AbstractFloat
if typeof(a) <: PerLength
a = convert(Unitful.Quantity{T, Unitful.𝐋^2 * Unitful.𝐓^-2 * Unitful.𝐌},
(h * c_0 * a) |> u"aJ")
end
@assert typeof(a) <: Unitful.Energy{T} "Input units must either be wavenumber or energy"
return a
end
"""
Calculates the Blackbody (Planck) function per wavelength, for given
wavelength and temperature.
"""
function blackbody_λ(λ::Unitful.Length, temperature::Unitful.Temperature)
(2h * c_0^2) / (λ^2 * λ^3 * (exp((h * c_0 / k_B) / (λ * temperature)) - 1))
end
"""
Calculates the Blackbody (Planck) function per frequency, for a given
frequency and temperature.
"""
function blackbody_ν(ν::Unitful.Frequency, temperature::Unitful.Temperature)
(2h / c_0^2) * (ν^3 / (exp((h / k_B) * (ν / temperature)) - 1))
end
| Transparency | https://github.com/tiagopereira/Transparency.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.2 | 870b7b286b2114e1b051860e55f6ca7ee7a39b83 | code | 262 | const σ_thomson = (e^4 / (6π * ε_0^2 * m_e^2 * c_0^4)) |> u"m^2"
"""
α_thomson(electron_density::NumberDensity)
Compute the Thomson extinction as a function of electron density.
"""
α_thomson(electron_density::NumberDensity) = σ_thomson * electron_density
| Transparency | https://github.com/tiagopereira/Transparency.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.2 | 870b7b286b2114e1b051860e55f6ca7ee7a39b83 | code | 3229 | """
Computes line profiles and associated quantities.
"""
const invSqrtPi = 1. / sqrt(π)
"""
Compute scaled complex complementary error function using
[Humlicek (1982, JQSRT 27, 437)](https://doi.org/10.1016/0022-4073(82)90078-4)
W4 rational approximations.
Here, z is defined as z = v + i * a, and returns w(z) = H(a,v) + i * L(a, v).
"""
function humlicek(z::Complex)
s = abs(real(z)) + imag(z)
if s > 15.0
# region I
w = im * invSqrtPi * z / (z * z - 0.5)
elseif s > 5.5
# region II
zz = z * z
w = im * (z * (zz * invSqrtPi - 1.4104739589)) / (0.75 + zz * (zz - 3.0))
else
x, y = real(z), imag(z)
t = y - im * x
if y >= 0.195 * abs(x) - 0.176
# region III
w = ((16.4955 + t * (20.20933 + t * (11.96482 + t * (3.778987 + 0.5642236 * t))))
/ (16.4955 + t * (38.82363 + t * (39.27121 + t * (21.69274 + t * (6.699398 + t))))))
else
# region IV
u = t * t
nom = t * (36183.31 - u * (3321.99 - u * (1540.787 - u *
(219.031 - u * (35.7668 - u * (1.320522 - u * .56419))))))
den = 32066.6 - u * (24322.8 - u * (9022.23 - u * (2186.18 -
u * (364.219 - u * (61.5704 - u * (1.84144 - u))))))
w = exp(u) - nom / den
end
end
return w
end
"""
voigt_profile(a::T, v::AbstractFloat, ΔD::T)::T
Compute the normalised Voigt profile, given a damping constant `a`, dimensionless
wavelength or frequency `v`, and Doppler width `ΔD` (wavelength or frequency).
In the case of wavelength, v = (λ - λ0) / ΔD.
Uses Humlicek's W4 approximation. Returns in inverse units of ΔD.
"""
function voigt_profile(a::T, v::AbstractFloat, ΔD::T)::T where T <: AbstractFloat
z = v + a * im
profile = real(humlicek(z))
return profile * invSqrtPi / ΔD
end
"""
dispersion_profile(a::T, v::AbstractFloat, ΔD::T)::T
Compute the normalised dispersion (or Faraday) profile, given a damping constant `a`,
dimensionless wavelength or frequency `v`, and Doppler width `ΔD` (wavelength or frequency).
In the case of wavelength, v = (λ - λ0) / ΔD.
Uses Humlicek's W4 approximation. Returns in inverse units of ΔD.
"""
function dispersion_profile(a::T, v::AbstractFloat, ΔD::T)::T where T <: AbstractFloat
z = v + a * im
profile = imag(humlicek(z))
return profile * invSqrtPi / ΔD
end
"""
doppler_width(
λ0::Unitful.Length,
mass::Unitful.Mass,
temperature::Unitful.Temperature
)
doppler_width(
ν0::Unitful.Frequency,
mass::Unitful.Mass,
temperature::Unitful.Temperature
)
Compute Doppler width in wavelength or frequency units, given a rest wavelength/frequency,
mass of atom/ion, and temperature. Returns in typical units for UV/optical/infrared lines.
"""
function doppler_width(
λ0::Unitful.Length,
mass::Unitful.Mass,
temperature::Unitful.Temperature
)
return (λ0 / c_0 * sqrt(2 * k_B * temperature / mass)) |> u"nm"
end
function doppler_width(
ν0::Unitful.Frequency,
mass::Unitful.Mass,
temperature::Unitful.Temperature
)
return (ν0 / c_0 * sqrt(2 * k_B * temperature / mass)) |> u"THz"
end
| Transparency | https://github.com/tiagopereira/Transparency.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.2 | 870b7b286b2114e1b051860e55f6ca7ee7a39b83 | code | 15028 | using Interpolations
using SpecialFunctions: erfcx
using Test
using Transparency
using Unitful
import PhysicalConstants.CODATA2018: h, k_B, R_∞, c_0, m_e, e, ε_0, m_u
@testset "Thomson" begin
@test α_thomson(1.e29u"m^-3") ≈ 6.652458732173518u"m^-1"
@test α_thomson(0u"m^-3") == 0.0u"m^-1"
@test_throws MethodError α_thomson(1)
@test_throws MethodError α_thomson(1u"m")
end
@testset "Hydrogen" begin
@testset "hminus_ff" begin
θ = [0.5, 1, 2]
temp = 5040u"K" ./ θ
λ = [91130, 11390, 3038]u"Å"
# Testing against values in the original table
@test all(Transparency.α_hminus_ff_stilley.(
λ, temp, 1u"m^-3", 1u"Pa" ./ (k_B .* temp)) ≈ [2.75e-28, 8.76e-30, 1.45e-30]u"m^-1")
# Only testing against implementation (no table, only coefficients)
@test all(Transparency.α_hminus_ff_john.(
λ, temp, 1u"m^-3", 1u"Pa" ./ (k_B .* temp)) ≈ [2.7053123415611454e-28,
8.7520478601176e-30, 1.6977098265784962e-30]u"m^-1")
@test α_hminus_ff.(λ, temp, 1u"m^-3", 1u"m^-3", recipe="stilley") ≈
Transparency.α_hminus_ff_stilley.(λ, temp, 1u"m^-3", 1u"m^-3")
@test α_hminus_ff.(λ, temp, 1u"m^-3", 1u"m^-3", recipe="john") ≈
Transparency.α_hminus_ff_john.(λ, temp, 1u"m^-3", 1u"m^-3")
@test_throws "ErrorException" α_hminus_ff.(λ, temp, 1u"m^-3", 1u"m^-3", recipe="aaa")
end
@testset "hminus_bf" begin
# Values from the table, ensuring no stimulated emission
λ = [18, 121, 164, 500, 850, 1600]u"nm" # three values from each table
temp = 6000u"K"
@test σ_hminus_bf.(λ, temp; recipe="wbr") ≈ Transparency.σ_hminus_bf_wbr.(λ, temp)
@test all(Transparency.α_hminus_bf_wbr.(λ, 0u"K", 1u"m^-3") ≈
[6.7e-24, 5.43e-22, 7.29e-22, 2.923e-21, 4.001e-21, 1.302e-22]u"m^-1")
@test Transparency.α_hminus_bf_wbr(0u"nm", 0u"K", 1u"m^-3") == 0u"m^-1"
λ = [500, 8000, 16000]u"Å"
tmp = [1.257012463394068, 36.18291836328047, 0.9682451193929176]
@test Transparency.α_hminus_bf_wbr.(λ, temp, 1e24u"m^-3", 1e25u"m^-3") ≈
tmp * u"m^-1"
@test Transparency.σ_hminus_bf_wbr.(λ, temp) * 1e49 ≈ tmp * u"m^5"
@test all(Transparency.α_hminus_bf_geltman.(λ, 0u"K", 1u"m^-3") ≈ [1.5e-22,
3.92e-21, 1.7e-22]u"m^-1")
@test Transparency.α_hminus_bf_geltman(0u"nm", 0u"K", 1u"m^-3") == 0u"m^-1"
# Only testing against implementation (no table, only coefficients)
@test all(Transparency.α_hminus_bf_geltman.(λ, 5000u"K", 1e24u"m^-3", 1e25u"m^-3") ≈ [
2.5276368595110785, 64.24516492134173, 2.3904062765360763]u"m^-1")
@test all(Transparency.α_hminus_bf_john.(λ, 5000u"K", 1e24u"m^-3", 1e25u"m^-3") ≈ [
2.5257481813577, 65.22804371400161, 1.8270928643449478]u"m^-1")
temp = [2000, 5000, 10000]u"K"
@test Transparency.calc_hminus_density.(1e13u"m^-3", temp, 1e14u"m^-3") ≈
[91.94566524525405, 1.6850912396740527, 0.24835857088939245]u"m^-3"
@test α_hminus_bf.(λ, temp, 1u"m^-3", 1u"m^-3"; recipe="geltman") ≈
Transparency.α_hminus_bf_geltman.(λ, temp, 1u"m^-3", 1u"m^-3")
@test α_hminus_bf.(λ, temp, 1u"m^-3", 1u"m^-3"; recipe="john") ≈
Transparency.α_hminus_bf_john.(λ, temp, 1u"m^-3", 1u"m^-3")
@test_throws "ErrorException" α_hminus_bf.(λ, temp, 1u"m^-3", 1u"m^-3"; recipe="aaa")
end
@testset "hydrogenic" begin
t1 = (3.28805e15u"Hz" * h / k_B) |> u"K"
temp = [t1 * 1e3, t1, t1 * 1e-2]
ν = k_B / h .* [temp[1] * 1e-4, temp[2] * 1e-1, temp[3] * 1e1]
# Test a few points from Kurucz's table
@test all(Transparency.gaunt_ff.(ν, temp, 1) .≈ [5.53, 1.8, 1.08])
@test Transparency.gaunt_ff(500u"nm", 5000u"K", 1) == Transparency.gaunt_ff(
c_0 / 500u"nm", 5000u"K", 1)
@test Transparency.gaunt_bf(500u"nm", 2, 5) == Transparency.gaunt_bf(c_0 / 500u"nm", 2, 5)
# Note: gaunt_bf only tested against RH version, which is < 0 in some cases!
@test Transparency.gaunt_bf(500u"nm", 1, 1.1) ≈ 0.023132239149173173
@test Transparency.gaunt_bf(100u"nm", 2, 5) ≈ 1.0517607988011628
@test_throws AssertionError Transparency.gaunt_bf(500u"nm", 1, 1)
ν = [0.15, 0.6, 300]u"PHz"
# Only testing against implementation (no table, only expression)
@test all(α_hydrogenic_ff.(ν, 5000u"K", 1e24u"m^-3", 1e24u"m^-3", 1) ≈ [
134.2760926064222, 2.662541955122332, 2.0317078183858428e-8]u"m^-1")
@test all(σ_hydrogenic_ff.(ν, 5000u"K", 1) ≈ [
1.342760926064222e-46, 2.6625419551223324e-48, 2.0317078183858426e-56]u"m^5")
@test all(α_hydrogenic_bf.(ν, ν / 1.1, 5000u"K", 1e22u"m^-3", 1., 2.) ≈ [
2.4903105889694794, 9.569685175346825, 9.154320813938323]u"m^-1")
@test α_hydrogenic_bf(ν[1], ν[1] * 1.1, 1u"K", 1u"m^-3", 1, 2.) == 0u"m^-1"
@test α_hydrogenic_bf(ν[1], ν[1] * 1.1, 1u"K", 1u"m^-3", 1, 2.) == 0u"m^-1"
@test σ_hydrogenic_bf_scaled(1u"m^2", ν[3], ν[3] * 1.1, 1., 1.) == 0u"m^2"
@test σ_hydrogenic_bf_scaled(42u"m^2", ν[3], ν[3], 1., 1.) == 42u"m^2"
@test σ_hydrogenic_bf_scaled(1u"m^2", c_0 / ν[3], c_0 /ν[3], 2., 5.) ≈
σ_hydrogenic_bf_scaled(1u"m^2", ν[3], ν[3], 2., 5.)
end
@testset "h2minus" begin
# Test a few points from the table
λ = [350.5, 1139.1, 15_188.3]u"nm"
temp = 5040u"K" ./ [0.5, 1.6, 3.6]
@test all(α_h2minus_ff.(λ, temp, 1e31u"m^-3", 1u"Pa" ./ (k_B .* temp)) ≈ [
4.17, 80.60, 14700]u"m^-1")
end
@testset "h2plus" begin
λ = 1f7u"nm" ./ [500, 4000, 26_000]
temp = [12_000, 5000, 2500]u"K"
# Test opacity table directly, sum of bf and ff
κ_total = α_h2plus_ff.(λ, temp, 1f25u"m^-3", 1f24u"m^-3") .+
α_h2plus_bf.(λ, temp, 1f25u"m^-3", 1f24u"m^-3")
κ_total = convert.(typeof(1f0u"m^-1"), κ_total) # convert to Float32
@test all(κ_total ≈ [0.226, 1.35, 114.]u"m^-1")
# Test bf fraction table
κ_tmp = α_h2plus_bf.(λ, temp, 1f25u"m^-3", 1f24u"m^-3")
@test all(convert.(typeof(1f0u"m^-1"), κ_tmp) ./
κ_total ≈ [0.006, 0.273, 0.996])
end
@testset "rayleigh" begin
# Testing against implementation
@test α_rayleigh_h2(100u"nm", 1u"m^-3") == 0.0u"m^-1"
λ = [200, 600, 2000]u"nm"
@test all(α_rayleigh_h2.(λ, 1e30u"m^-3") ≈ [
8.565893085787453, 0.0747454429941088, 0.0005507634570312499]u"m^-1")
@test α_rayleigh_h(100u"nm", 1u"m^-3") == 0.0u"m^-1"
@test all(α_rayleigh_h.(λ, 1e30u"m^-3") ≈ [
6.946808203124999, 0.04804975738502133, 0.00036536185226953123]u"m^-1")
end
end
@testset "Voigt" begin
a = im .* 10 .^LinRange(-4, -0.3, 20)'
v = 10 .^LinRange(-1, 2, 20)'
z = a' .+ hcat(-v, v)
@testset "humlicek" begin
# Test against more precise erfcx function
@test isapprox(humlicek.(z), erfcx.(-im .* z), rtol=1e-4)
@test humlicek(im * 0) == 1.
@test_throws MethodError humlicek(0.)
end
@testset "profiles" begin
wave = 1.0
@test voigt_profile(0.1, 0.5, wave) ≈ real(humlicek(0.5 + 0.1 *im)) / sqrt(π)
@test dispersion_profile(0.1, 0.5, wave) ≈ imag(humlicek(0.5 + 0.1 *im)) / sqrt(π)
# Symmetry / anti-symmetry
@test voigt_profile.(0.1, v, wave) == voigt_profile.(0.1, -v, wave)
@test dispersion_profile.(0.1, v, wave) == -dispersion_profile.(0.1, -v, wave)
end
@test doppler_width(ustrip(c_0) * u"m", 1u"kg", ustrip(0.5 / k_B) * u"K") ≈ 1u"m"
@test doppler_width(ustrip(c_0) / u"s", 1u"kg", ustrip(0.5 / k_B) * u"K") ≈ 1u"Hz"
end
@testset "Broadening" begin
χup = 1.1u"aJ"
χlo = 1.0u"aJ"
χ∞ = 1.5u"aJ"
Z = 1
mass = 1.0u"kg"
@testset "van der Waals" begin
# Testing against implementation
@test const_unsold(mass, χup, χlo, χ∞, Z) ≈ 1.1131993895644783e-15 rtol=1e-10
@test γ_unsold.(1.0, 1u"K", [1, 2]u"m^-3") ≈ [1, 2]u"s^-1"
@test γ_unsold(1.0, 1000u"K", 1u"m^-3") ≈ (1000^0.3)u"s^-1"
@test const_barklem(m_u * 1, 0.3, 300) ≈ 1.0853795252714703e-15u"m^3 / s"
@test const_barklem(m_u * 1, 1, 2) == 2 * const_barklem(m_u * 1, 1, 1)
@test_throws ErrorException const_barklem(m_u * 1, 1, -1)
@test_throws ErrorException const_barklem(m_u * 1, -1, 1)
@test γ_barklem(0.3, 123u"m^3/s", 1u"K", 1u"m^-3") == 123.0u"s^-1"
@test γ_barklem(0.3, 1e-16u"m^3/s", 6000u"K", 1e23u"m^-3") ≈ 2.100646320154e8u"s^-1"
@test const_deridder_rensbergen(m_u * 1, m_u * 1, 3, 1) ≈ 3*2e-14u"m^3/s"
@test_throws ErrorException const_deridder_rensbergen(m_u * 1, m_u * 1, -1, 1)
@test γ_deridder_rensbergen(0.5, 2e-14u"m^3/s", 6000u"K", 1e22u"m^-3") ≈
1.5491933384829668e10u"s^-1"
@test γ_deridder_rensbergen(0.5, 1u"m^3/s", 1e3u"K", 1u"m^-3") ≈ sqrt(1e3)u"s^-1"
end
@testset "Linear Stark" begin
# Test against Sutton formula
@test γ_stark_linear.([0., 1.]u"m^-3", 3, 1) ≈ [0, 0.0025635396]u"rad / s"
tmp = 0.00102862026663837u"rad / s"
@test γ_stark_linear.([1, 1e20]u"m^-3", 3, 2) ≈ [tmp, tmp * (1e20)^(2/3)]
@test_throws AssertionError γ_stark_linear(1.0u"m^-3", 1, 1)
@test_throws AssertionError γ_stark_linear(1.0u"m^-3", 1, 0)
end
@testset "Quadratic Stark" begin
# Testing against implementation
@test Transparency.c4_traving(χup, χlo, χ∞, Z) ≈
3.744741607310466e-23u"m^4 / s"
@test const_stark_quadratic(mass, χup, χlo, χ∞, Z) ≈
2.7236711602117037e-13u"m^3 / s"
@test const_stark_quadratic(mass, χup, χlo, χ∞, Z; scaling=2) ≈
const_stark_quadratic(mass, χup, χlo, χ∞, Z) * 2^(2/3)
@test γ_stark_quadratic(1.2345u"m^-3", 0u"K") ≈ 1.2345u"s^-1"
@test γ_stark_quadratic(1e10u"m^-3", 10000u"K") ≈ 1e10u"s^-1" * 10000^(1/6)
# Testing against implementation
temp = [5000, 10000]u"K"
@test (γ_stark_quadratic_gray.(1e22u"m^-3", temp, 1e-20u"m^4/s") ≈
[5.709239783376956e11, 6.40840498153864e11]u"s^-1")
end
@testset "Radiation quantities" begin
@test calc_Aul(1u"m", ustrip(ε_0 * m_e * c_0), 1 / ustrip(2π * e^2)) ≈ 1u"s^-1"
@test calc_Bul(1u"m", 0u"Hz") ≈ 0u"m^3 / J"
@test calc_Bul(1000u"nm", 1e9u"Hz") ≈ 8.396002689872053e-6u"m^3 / J"
@test damping(1u"Hz", 1u"m", 1u"m") ≈ ustrip(1 / (4π * c_0))
end
end
@testset "Line" begin
@testset "Blackbody" begin
λ = 500.0u"nm"
@test blackbody_λ(λ, 5000u"K") ≈ 12.107190590398108u"kW / (m^2 * nm)"
@test blackbody_ν(c_0 / λ, 5000u"K") ≈ 10.096310186694323u"nW / (m^2 * Hz)"
@test blackbody_λ(λ, 5000u"K") ≈ blackbody_ν(c_0 / λ, 5000u"K") * c_0 / λ^2
end
@test Transparency.wavenumber_to_energy(ustrip(1 / (h * c_0)) * u"m^-1") ≈ 1u"J"
end
@testset "Collisions" begin
@testset "Johnson" begin
ne = 1e20u"m^-3"
@test_throws AssertionError coll_exc_hydrogen_johnson(2, 1, ne, 1u"K")
@test_throws AssertionError coll_exc_hydrogen_johnson(-1, 1, ne, 1u"K")
@test_throws AssertionError coll_ion_hydrogen_johnson(-1, ne, 1u"K")
@test coll_exc_hydrogen_johnson(1, 2, ne, 1u"K") ≈ 0.0u"s^-1"
@test coll_ion_hydrogen_johnson(1, ne, 1u"K") ≈ 0.0u"s^-1"
@test all(Transparency._bn.([1, 2, 3]) ≈ [-0.603, 0.116875, 0.25876543209876557])
@test all(Transparency._rn.([1, 2, 3]) ≈ [0.45, 1.94 * 2^-1.57, 1.94 * 3^-1.57])
@test Transparency.ξ(1e20) ≈ 0.0
@test Transparency.ξ(0.1) ≈ 6.125071285714834
# Testing against implementation
temp = [3000, 5000, 10000]u"K"
@test all(coll_exc_hydrogen_johnson.(1, 2, ne, temp) ≈
[3.930707156378747e-11, 0.0002263838629287018, 24.38373538697928]u"s^-1")
@test all(coll_exc_hydrogen_johnson.(1, 4, ne, temp) ≈
[1.3966268525286798e-16, 4.2003964667891764e-8, 0.08902629232613525]u"s^-1")
@test all(coll_exc_hydrogen_johnson.(2, 3, ne, temp) ≈
[41449.22586174524, 712944.3194152612, 6.358527475844925e6]u"s^-1")
@test all(coll_ion_hydrogen_johnson.(1, ne, temp) ≈
[2.0663760818067978e-18, 3.980869050632006e-9, 0.04717372440180093]u"s^-1")
@test all(coll_ion_hydrogen_johnson.(2, ne, temp) ≈
[5.6897483527787776, 1746.3754923299948, 166085.00320954874]u"s^-1")
@test all(coll_ion_hydrogen_johnson.(3, ne, temp) ≈
[35134.766878609924, 592137.0862432675, 6.093655265672546e6]u"s^-1")
@test all(CE_RH_hydrogen.(1, [2, 3, 4], 5000u"K") ≈ [6.098416316523097e-16,
1.151527001714162e-16, 4.20364505409004e-17]u"m^3 / (K^(1/2) * s)")
@test all(CI_RH_hydrogen.([1, 2, 3], 5000u"K") ≈ [2.86396932776755e-17,
6.595860989488697e-16, 2.791765778377678e-15]u"m^3 / (K^(1/2) * s)")
end
@testset "RH rates" begin
temp = [3000, 5000, 10000]u"K"
data1 = [1, 1, 1]u"m^3 / (K^(1/2) * s)"
data2 = [1, 1, 1]
interp1 = linear_interpolation(temp, data1)
interp2 = linear_interpolation(temp, data2)
# Using wrong units of data:
@test_throws MethodError coll_CE(interp2, 1, 1u"m^-3", 5000u"K")
@test_throws MethodError coll_CI(interp2, 1u"J", 1u"m^-3", 5000u"K")
@test_throws MethodError coll_Ω(interp1, 1, 1u"m^-3", 5000u"K")
@test_throws AssertionError coll_CI(interp1, -1u"J", 1u"m^-3", 5000u"K")
@test coll_CE(interp1, 1, 1u"m^-3", 10000u"K") ≈ 100.0u"s^-1"
@test coll_CI(interp1, 1e-50u"J", 1u"m^-3", 10000u"K") ≈ 100.0u"s^-1"
@test coll_Ω(interp2, 1, 1u"m^-3"/8.629132180819955e-14, 10000u"K") ≈ 1.0u"s^-1"
end
@testset "Przybilla & Butter" begin
temp = [5000, 10000, 30000, 250000]u"K"
ne = 1u"m^-3"
c0 = sqrt.(temp) ./ Transparency.Ω_c0
@test_throws AssertionError coll_deexc_hydrogen_PB04(1, 8, 1, ne, temp[1])
@test_throws AssertionError coll_deexc_hydrogen_PB04(3, 1, 1, ne, temp[1])
# Compare with a few random values from the original table
@test isapprox(ustrip.(coll_deexc_hydrogen_PB04.(1, 2, 1, ne, temp) .* c0),
[0.698, 0.809, 1.15, 3.95], atol=1e-3)
@test isapprox(ustrip.(coll_deexc_hydrogen_PB04.(1, 4, 1, ne, temp) .* c0),
[0.102, 0.122, 0.228, 0.488], atol=1e-3)
@test isapprox(ustrip.(coll_deexc_hydrogen_PB04.(2, 3, 1, ne, temp) .* c0),
[27.8, 33.8, 62.0, 252], atol=1e-3)
@test isapprox(ustrip.(coll_deexc_hydrogen_PB04.(2, 7, 1, ne, temp) .* c0),
[7.26, 9.27, 11.4, 11.9], atol=1e-3)
@test isapprox(ustrip.(coll_deexc_hydrogen_PB04.(4, 5, 1, ne, temp) .* c0),
[817, 1350, 3400, 10000], atol=1e-3)
end
end
| Transparency | https://github.com/tiagopereira/Transparency.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.2 | 870b7b286b2114e1b051860e55f6ca7ee7a39b83 | docs | 492 | [![Build Status][gha-img]][gha-url] [![][codecov-img]][codecov-url]
# Transparency.jl
A package to compute opacity (or transparency) in stellar atmospheres.
[gha-img]: https://github.com/tiagopereira/Transparency.jl/workflows/CI/badge.svg
[gha-url]: https://github.com/tiagopereira/Transparency.jl/actions?query=workflow%3ACI
[codecov-img]: https://codecov.io/gh/tiagopereira/Transparency.jl/branch/main/graph/badge.svg
[codecov-url]: https://codecov.io/gh/tiagopereira/Transparency.jl
| Transparency | https://github.com/tiagopereira/Transparency.jl.git |
|
[
"MIT"
] | 0.1.3 | 7636444dde71ab19019a2f9c9b1bd244194c2572 | code | 4968 | module TypeTransform
export @transform, transform
include("fexpr.jl")
include("subtypes.jl")
"""
@transform
Transform the given type to another type during defining a method.
Use `@transform` and the function that transforms the type to another type. The function should return an `Array` of types that you want the method to be defined for.
For example, we use `allsubtypes()` type transform function to define specific methods for all of subtypes of a given type (fix ambiguity error!).
```julia
using TypeTransform
abstract type A end
abstract type B <:A end
abstract type C <:B end
@transform function foo(a, b::allsubtypes(A))
println("a new method")
end
```
Since `allsubtypes(A)` returns the array of types `[A, B, C]`, three methods are defined
```julia
julia> methods(foo)
# 3 methods for generic function "foo":
[1] foo(a, b::C) in Main at none:2
[2] foo(a, b::B) in Main at none:2
[3] foo(a, b::A) in Main at none:2
```
Note that you could use `subtypes()` instead of `allsubtypes()`, which defines methods only for the direct subtypes (`[B]` in this case).
If you want that only specific functions to be considered in transformation by `@transform`, give an `Array` of `Symbol`s that contains the function names you want to be transformed.
```julia
@transform [:subtypes, :allsubtypes], function foo_array(a, b::allsubtypes(A))
println("a new method")
end
```
It is possible to use the function names inside curly expressions like `Union{A, subtypes{B}}` or `Type{allsubtypes{A}}` or use arguments without a name:
```julia
@transform function foo_curly(a, ::Union{T,allsubtypes(A)}, c::T) where {T<:Int64}
println("a new method")
end
```
# Motivation
The first motivation for this package was to fix ambiguity error by defining specific methods.
If you run the following program
```julia
abstract type A end
abstract type B <:A end
# my general vector method
foo(a::Vector, b::Type{<:A}) = print("vector method")
# my special B mwthod
foo(a, b::Type{B}) = print("B method")
```
`foo([1,2], B)` will give an ambiguity error, while if you use `allsubtypes`, you can fix the issue.
```julia
# my general vector method
@transform foo(a::Vector, b::allsubtypes(A)) = print("vector method")
```
"""
macro transform(expr::Expr)
#TODO: support for multiple function transforms in a @transform
modul = __module__
macroexpand(modul, expr)
out = transform(modul, expr)
return out
end
function transform(modul::Module, expr)
if expr.head == :block
expr = expr.args[2]
end
if expr.head == :tuple # some functions are specified
funclist = eval(expr.args[1])
isfunclist = true
fexpr = expr.args[2]
else
fexpr = expr
isfunclist = false
end
f, args, wherestack, body = unwrap_fun(fexpr, true, true)
fmethods = Expr[]
for (iArg, arg) in enumerate(args)
if arg isa Expr && arg.head == :(::) &&
arg.args[end] isa Expr
if arg.args[end].head == :call
# skip this function if it is not in funclist
if isfunclist && !(arg.args[end].args[1] in funclist)
continue
end
funcname = arg.args[end].args[1]
intype = arg.args[end].args[2]
isCurly =false
elseif arg.args[end].head == :curly
# string match is faster
strarg = string(arg.args[end])
# match any function name
m = match(r"([a-zA-Z\_][a-zA-Z0-9\_]*)\((.*)\)", strarg)
if m === nothing
continue
end
funcname = Meta.parse(m.captures[1])
# skip this function if it is not in funclist
if isfunclist && !(funcname in funclist)
continue
end
intype = Meta.parse(m.captures[2])
isCurly =true
else
continue
end
outtypes =Core.eval(modul,
quote
$funcname($intype)
end)
outtypes_len = length(outtypes)
fmethod = Vector{Expr}(undef, outtypes_len)
for (iouttype, outtypeI) in enumerate(outtypes)
# replacing with actual trasformed type
if !isCurly
args[iArg].args[end] = outtypeI
else
args[iArg].args[end] = Meta.parse(replace(strarg, m.match=>string(outtypeI)))
end
fmethod[iouttype] = copy(wrap_fun(f, args, wherestack, body))
end
append!(fmethods, fmethod)
end
end
if isempty(fmethods)
error("No method defined")
end
# print(fmethods)
out = quote
Base.@__doc__(function $f end) # supports docuementation
$(esc.(fmethods)...)
end
return out
end
end
| TypeTransform | https://github.com/aminya/TypeTransform.jl.git |
|
[
"MIT"
] | 0.1.3 | 7636444dde71ab19019a2f9c9b1bd244194c2572 | code | 3368 | export unwrap_fun, wrap_fun, unwrap_head, wrap_head, unwrap_fcall, wrap_fcall
################################################################
"""
head, body = unwrap_fun(fexpr)
fcall, wherestack, body = unwrap_fun(fexpr,true)
f, args, wherestack, body = unwrap_fun(fexpr, true, true)
Unwraps function expression.
"""
function unwrap_fun(expr::Expr)
if expr.head in (:function, :(=))
fexpr = expr
elseif expr.head == :block
fexpr = expr.args[2] # separate fexpr from block
else
error("Expression is not supported")
end
head = fexpr.args[1]
body = fexpr.args[2]
return head, body
end
function unwrap_fun(expr::Expr, should_unwrap_head::Bool)
if expr.head in (:function, :(=))
fexpr = expr
elseif expr.head == :block
fexpr = expr.args[2] # separate fexpr from block
else
error("Expression is not supported")
end
head = fexpr.args[1]
fcall, wherestack = unwrap_head(head)
body = fexpr.args[2]
return fcall, wherestack, body
end
function unwrap_fun(expr::Expr, should_unwrap_head::Bool, should_unwrap_fcall::Bool)
if expr.head in (:function, :(=))
fexpr = expr
elseif expr.head == :block
fexpr = expr.args[2] # separate fexpr from block
else
error("Expression is not supported")
end
head = fexpr.args[1]
fcall, wherestack = unwrap_head(head)
f, args = unwrap_fcall(fcall)
body = fexpr.args[2]
return f, args, wherestack, body
end
################################################################
"""
fexpr = wrap_fun(f, args, wherestack, body)
fexpr = wrap_fun(fcall, wherestack, body)
fexpr = wrap_fun(head, body)
fexpr = wrap_fun(fexpr)
Returns a function definition expression
"""
function wrap_fun(f, args, wherestack, body)
fcall = wrap_fcall(f, args)
head = wrap_head(fcall, wherestack)
return Expr(:function, head, Expr(:block, body))
end
function wrap_fun(fcall, wherestack, body)
head = wrap_head(fcall, wherestack)
return Expr(:function, head, Expr(:block, body))
end
function wrap_fun(head::Expr, body::Expr)
return Expr(:function, head, Expr(:block, body))
end
function wrap_fun(fexpr::Expr)
if fexpr.head in (:function, :(=))
return fexpr
elseif fexpr.head == :block
fexpr = fexpr.args[2] # separate fexpr from block
return fexpr
else
error("Expression is not supported")
end
end
################################################################
function unwrap_head(head)
wherestack = Any[]
while head isa Expr && head.head == :where
push!(wherestack, head.args[2])
head = head.args[1]
end
fcall = head
fcall, wherestack
end
function wrap_head(fcall, wherestack)
for w in Iterators.reverse(wherestack)
fcall = Expr(:where, fcall, w)
# fcall = Expr(:where, fcall, esc(w))
end
head = fcall
return head
end
################################################################
function unwrap_fcall(fcall::Expr)
if !(fcall.head == :call)
error("Expression is not supported")
end
f = fcall.args[1]
args = fcall.args[2:end]
return f, args
end
function wrap_fcall(f, args)
fcall = :($f($((args)...)))
return fcall
end
################################################################
| TypeTransform | https://github.com/aminya/TypeTransform.jl.git |
|
[
"MIT"
] | 0.1.3 | 7636444dde71ab19019a2f9c9b1bd244194c2572 | code | 544 | export allsubtypes, subtypes
import InteractiveUtils.subtypes
################################################################
function allsubtypes(T::Type)
t = Type[]
push!(t, T) # include itself
allsubtypes.(subtypes(T), Ref(t))
return unique(t)
end
function allsubtypes(T, t)
# T is an element
# recursive method
push!(t, T)
if isempty(subtypes(T))
return
else
allsubtypes.(subtypes(T), Ref(t))
end
end
allsubtypes(T::Symbol) = allsubtypes(Core.eval(Main, T))#convert Symbol to Type
| TypeTransform | https://github.com/aminya/TypeTransform.jl.git |
|
[
"MIT"
] | 0.1.3 | 7636444dde71ab19019a2f9c9b1bd244194c2572 | code | 337 | @testset "fexpr" begin
fexpr = :( function bar(a, b::T1, c::T2) where {T1<:AbstractArray} where {T2<:Int64}
print("vector method")
end)
f, args, wherestack, body = unwrap_fun(fexpr, true, true)
fexpr1 = wrap_fun(f, args, wherestack, body)
eval(fexpr1)
methods(bar)
# @test fexp1 == fexpr
end
| TypeTransform | https://github.com/aminya/TypeTransform.jl.git |
|
[
"MIT"
] | 0.1.3 | 7636444dde71ab19019a2f9c9b1bd244194c2572 | code | 75 | using TypeTransform
using Test
include("fexpr.jl")
include("specific.jl")
| TypeTransform | https://github.com/aminya/TypeTransform.jl.git |
|
[
"MIT"
] | 0.1.3 | 7636444dde71ab19019a2f9c9b1bd244194c2572 | code | 1199 | abstract type A end
abstract type B <:A end
abstract type C <:B end
@testset "subtypes" begin
@transform function foo(a, b::subtypes(A), c::T) where {T<:Int64}
println("a new method")
end
@test length(methods(foo)) == 1
end
@testset "allsubtypes" begin
@transform function foo_all(a, b::allsubtypes(A), c::T) where {T<:Int64}
println("a new method")
end
@test length(methods(foo_all)) == 3
end
@testset "curly" begin
@transform function foo_curly(a, b::Type{allsubtypes(A)}, c::T) where {T<:Int64}
println("a new method")
end
@test length(methods(foo_curly)) == 3
@transform function foo_curly2(a, b::Union{T,allsubtypes(A)}, c::T) where {T<:Int64}
println("a new method")
end
@test length(methods(foo_curly2)) == 3
end
@testset "noname arguemnts" begin
@transform function foo_noname(a, ::allsubtypes(A))
println("a new method")
end
@test length(methods(foo_noname)) == 3
end
@testset "array of functions" begin
@transform [:subtypes, :allsubtypes], function foo_array(a, b::allsubtypes(A))
println("a new method")
end
@test length(methods(foo_array)) == 3
end
| TypeTransform | https://github.com/aminya/TypeTransform.jl.git |
|
[
"MIT"
] | 0.1.3 | 7636444dde71ab19019a2f9c9b1bd244194c2572 | docs | 2271 | # TypeTransform
[](https://github.com/aminya/TypeTransform.jl/actions)
Transform the given type to another type during defining a method.
Use `@transform` and the function that transforms the type to another type. The function should return an `Array` of types that you want the method to be defined for.
For example, we use `allsubtypes()` type transform function to define specific methods for all of subtypes of a given type (fix ambiguity error!).
```julia
using TypeTransform
abstract type A end
abstract type B <:A end
abstract type C <:B end
@transform function foo(a, b::allsubtypes(A))
println("a new method")
end
```
Since `allsubtypes(A)` returns the array of types `[A, B, C]`, three methods are defined
```julia
julia> methods(foo)
# 3 methods for generic function "foo":
[1] foo(a, b::C) in Main at none:2
[2] foo(a, b::B) in Main at none:2
[3] foo(a, b::A) in Main at none:2
```
Note that you could use `subtypes()` instead of `allsubtypes()`, which defines methods only for the direct subtypes (`[B]` in this case).
If you want that only specific functions to be considered in transformation by `@transform`, give an `Array` of `Symbol`s that contains the function names you want to be transformed.
```julia
@transform [:subtypes, :allsubtypes], function foo_array(a, b::allsubtypes(A))
println("a new method")
end
```
It is possible to use the function names inside curly expressions like `Union{A, subtypes{B}}` or `Type{allsubtypes{A}}` or use arguments without a name:
```julia
@transform function foo_curly(a, ::Union{T,allsubtypes(A)}, c::T) where {T<:Int64}
println("a new method")
end
```
# Motivation
The first motivation for this package was to fix ambiguity error by defining specific methods.
If you run the following program
```julia
abstract type A end
abstract type B <:A end
# my general vector method
foo(a::Vector, b::Type{<:A}) = print("vector method")
# my special B mwthod
foo(a, b::Type{B}) = print("B method")
```
`foo([1,2], B)` will give an ambiguity error, while if you use `allsubtypes`, you can fix the issue.
```julia
# my general vector method
@transform foo(a::Vector, b::allsubtypes(A)) = print("vector method")
```
| TypeTransform | https://github.com/aminya/TypeTransform.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 621 | using NamedGraphs
using Documenter
DocMeta.setdocmeta!(NamedGraphs, :DocTestSetup, :(using NamedGraphs); recursive=true)
makedocs(;
modules=[NamedGraphs],
authors="Matthew Fishman <[email protected]> and contributors",
repo="https://github.com/mtfishman/NamedGraphs.jl/blob/{commit}{path}#{line}",
sitename="NamedGraphs.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://mtfishman.github.io/NamedGraphs.jl",
assets=String[],
),
pages=["Home" => "index.md"],
)
deploydocs(; repo="github.com/mtfishman/NamedGraphs.jl", devbranch="main")
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 4485 | #' # NamedGraphs
#' [](https://mtfishman.github.io/NamedGraphs.jl/stable)
#' [](https://mtfishman.github.io/NamedGraphs.jl/dev)
#' [](https://github.com/mtfishman/NamedGraphs.jl/actions/workflows/CI.yml?query=branch%3Amain)
#' [](https://codecov.io/gh/mtfishman/NamedGraphs.jl)
#' [](https://github.com/invenia/BlueStyle)
#' ## Installation
#' You can install the package using Julia's package manager:
#' ```julia
#' julia> ] add NamedGraphs
#' ```
#' ## Introduction
#' This packages introduces graph types with named vertices, which are built on top of the `Graph`/`SimpleGraph` type in the [Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl) package that only have contiguous integer vertices (i.e. linear indexing). The vertex names can be strings, tuples of integers, or other unique identifiers (anything that is hashable).
#' There is a supertype `AbstractNamedGraph` that defines an interface and fallback implementations of standard
#' Graphs.jl operations, and two implementations: `NamedGraph` and `NamedDiGraph`.
#' ## `NamedGraph`
#' `NamedGraph` simply takes a set of names for the vertices of the graph. For example:
#+ term=true
using Graphs: grid, has_edge, has_vertex, neighbors
using NamedGraphs: NamedGraph
using NamedGraphs.GraphsExtensions: ⊔, disjoint_union, subgraph, rename_vertices
g = NamedGraph(grid((4,)), ["A", "B", "C", "D"])
#'Common operations are defined as you would expect:
#+ term=true
has_vertex(g, "A")
has_edge(g, "A" => "B")
has_edge(g, "A" => "C")
neighbors(g, "B")
subgraph(g, ["A", "B"])
#' Internally, this type wraps a `SimpleGraph`, and stores a `Dictionary` from the [Dictionaries.jl](https://github.com/andyferris/Dictionaries.jl) package that maps the vertex names to the linear indices of the underlying `SimpleGraph`.
#' Graph operations are implemented by mapping back and forth between the generalized named vertices and the linear index vertices of the `SimpleGraph`.
#' It is natural to use tuples of integers as the names for the vertices of graphs with grid connectivities.
#' For example:
#+ term=true
dims = (2, 2)
g = NamedGraph(grid(dims), Tuple.(CartesianIndices(dims)))
#' In the future we will provide a shorthand notation for this, such as `cartesian_graph(grid((2, 2)), (2, 2))`.
#' Internally the vertices are all stored as tuples with a label in each dimension.
#' Vertices can be referred to by their tuples:
#+ term=true
has_vertex(g, (1, 1))
has_edge(g, (1, 1) => (2, 1))
has_edge(g, (1, 1) => (2, 2))
neighbors(g, (2, 2))
#' You can use vertex names to get [induced subgraphs](https://juliagraphs.org/Graphs.jl/dev/core_functions/operators/#Graphs.induced_subgraph-Union{Tuple{T},%20Tuple{U},%20Tuple{T,%20AbstractVector{U}}}%20where%20{U%3C:Integer,%20T%3C:AbstractGraph}):
#+ term=true
subgraph(v -> v[1] == 1, g)
subgraph(v -> v[2] == 2, g)
subgraph(g, [(1, 1), (2, 2)])
#' You can also take [disjoint unions](https://en.wikipedia.org/wiki/Disjoint_union) or concatenations of graphs:
#+ term=true
g₁ = g
g₂ = g
disjoint_union(g₁, g₂)
g₁ ⊔ g₂ # Same as above
#' The symbol `⊔` is just an alias for `disjoint_union` and can be written in the terminal
#' or in your favorite [IDE with the appropriate Julia extension](https://julialang.org/) with `\sqcup<tab>`
#' By default, this maps the vertices `v₁ ∈ vertices(g₁)` to `(v₁, 1)` and the vertices `v₂ ∈ vertices(g₂)`
#' to `(v₂, 2)`, so the resulting vertices of the unioned graph will always be unique.
#' The resulting graph will have no edges between vertices `(v₁, 1)` and `(v₂, 2)`, these would have to
#' be added manually.
#' The original graphs can be obtained from subgraphs:
#+ term=true
rename_vertices(first, subgraph(v -> v[2] == 1, g₁ ⊔ g₂))
rename_vertices(first, subgraph(v -> v[2] == 2, g₁ ⊔ g₂))
#' ## Generating this README
#' This file was generated with [Weave.jl](https://github.com/JunoLab/Weave.jl) with the following commands:
#+ eval=false
using NamedGraphs: NamedGraphs
using Weave: Weave
Weave.weave(
joinpath(pkgdir(NamedGraphs), "examples", "README.jl");
doctype="github",
out_path=pkgdir(NamedGraphs),
)
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 503 | using NamedGraphs.GraphsExtensions:
boundary_edges, boundary_vertices, inner_boundary_vertices, outer_boundary_vertices
using NamedGraphs.NamedGraphGenerators: named_grid
g = named_grid((5, 5))
subgraph_vertices = [(2, 2), (2, 3), (2, 4), (3, 2), (3, 3), (3, 4), (4, 2), (4, 3), (4, 4)]
vs = @show boundary_vertices(g, subgraph_vertices)
vs = @show inner_boundary_vertices(g, subgraph_vertices)
vs = @show outer_boundary_vertices(g, subgraph_vertices)
es = @show boundary_edges(g, subgraph_vertices)
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 353 | using Graphs: grid
using NamedGraphs: NamedGraph
using NamedGraphs.GraphsExtensions: ⊔, subgraph
g1 = NamedGraph(grid((2, 2)), Tuple.(CartesianIndices((2, 2))))
g2 = NamedGraph(grid((2, 2)), Tuple.(CartesianIndices((2, 2))))
g = ⊔("X" => g1, "Y" => g2)
@show g1
@show g2
@show g
@show subgraph(v -> v[1] == "X", g)
@show subgraph(v -> v[1] == "Y", g)
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 597 | using Graphs: path_graph
using NamedGraphs: NamedGraph
using NamedGraphs.GraphsExtensions: mincut_partitions
g = NamedGraph(path_graph(4), ["A", "B", "C", "D"])
part1, part2 = mincut_partitions(g)
@show part1, part2
# Requires `GraphsFlows` to be loaded.
using GraphsFlows: GraphsFlows
part1, part2 = mincut_partitions(g, "A", "D")
@show part1, part2
weights = Dict{Any,Float64}()
weights["A", "B"] = 3.0
weights["B", "C"] = 2.0
weights["C", "D"] = 3.0
part1, part2 = mincut_partitions(g, weights)
@show part1, part2
part1, part2 = mincut_partitions(g, "A", "D", weights)
@show part1, part2
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1144 | using Graphs: grid, has_edge, has_vertex, ne, nv
using NamedGraphs: NamedGraph
using NamedGraphs.GraphsExtensions: ⊔, subgraph
position_graph = grid((4,))
vs = ["A", "B", "C", "D"]
g = NamedGraph(position_graph, vs)
@show has_vertex(g, "A")
@show !has_vertex(g, "E")
@show has_edge(g, "A" => "B")
@show !has_edge(g, "A" => "C")
g_sub = subgraph(g, ["A"])
@show has_vertex(g_sub, "A")
@show !has_vertex(g_sub, "B")
@show !has_vertex(g_sub, "C")
@show !has_vertex(g_sub, "D")
g_sub = subgraph(g, ["A", "B"])
@show has_vertex(g_sub, "A")
@show has_vertex(g_sub, "B")
@show !has_vertex(g_sub, "C")
@show !has_vertex(g_sub, "D")
@show has_edge(g_sub, "A" => "B")
g_sub = subgraph(Returns(true), g)
@show has_vertex(g_sub, "A")
@show has_vertex(g_sub, "B")
@show has_vertex(g_sub, "C")
@show has_vertex(g_sub, "D")
g_union = g ⊔ g
@show nv(g_union) == 8
@show ne(g_union) == 6
@show has_vertex(g_union, ("A", 1))
@show has_vertex(g_union, ("A", 2))
# Error: vertex names are the same
# g_vcat = [g; g]
# TODO: Implement
## g_hcat = [g;; g]
##
## @show nv(g_hcat) == 8
## @show ne(g_hcat) == 6
##
## @show has_vertex(g_hcat, ("A", 1))
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1465 | using Graphs: grid, has_edge, has_vertex, nv
using NamedGraphs: NamedGraph
using NamedGraphs.GraphsExtensions: ⊔, subgraph
position_graph = grid((2, 2))
vs = [("X", 1), ("X", 2), ("Y", 1), ("Y", 2)]
g = NamedGraph(position_graph, vs)
@show has_vertex(g, ("X", 1))
@show has_edge(g, ("X", 1) => ("X", 2))
@show !has_edge(g, ("X", 2) => ("Y", 1))
@show has_edge(g, ("X", 2) => ("Y", 2))
g_sub = subgraph(g, [("X", 1)])
@show has_vertex(g_sub, ("X", 1))
@show !has_vertex(g_sub, ("X", 2))
@show !has_vertex(g_sub, ("Y", 1))
@show !has_vertex(g_sub, ("Y", 2))
g_sub = subgraph(g, [("X", 1), ("X", 2)])
@show has_vertex(g_sub, ("X", 1))
@show has_vertex(g_sub, ("X", 2))
@show !has_vertex(g_sub, ("Y", 1))
@show !has_vertex(g_sub, ("Y", 2))
# g_sub = g["X", :]
g_sub = subgraph(v -> v[1] == "X", g)
@show has_vertex(g_sub, ("X", 1))
@show has_vertex(g_sub, ("X", 2))
@show !has_vertex(g_sub, ("Y", 1))
@show !has_vertex(g_sub, ("Y", 2))
# g_sub = g[:, 2]
g_sub = subgraph(v -> v[2] == 2, g)
@show !has_vertex(g_sub, ("X", 1))
@show has_vertex(g_sub, ("X", 2))
@show !has_vertex(g_sub, ("Y", 1))
@show has_vertex(g_sub, ("Y", 2))
position_graph = grid((2, 2))
g1 = NamedGraph(position_graph, Tuple.(CartesianIndices((2, 2))))
g2 = NamedGraph(position_graph, Tuple.(CartesianIndices((2, 2))))
g_disjoint_union = g1 ⊔ g2
@show nv(g_disjoint_union) == 8
## g_vcat = [g1; g2]
##
## @show nv(g_vcat) == 8
##
## g_hcat = [g1;; g2]
##
## @show nv(g_hcat) == 8
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 659 | using Graphs: add_edge!, grid, has_edge, has_vertex, neighbors
using NamedGraphs: NamedGraph
using NamedGraphs.GraphsExtensions: subgraph
g = NamedGraph(grid((4,)), ["A", "B", "C", "D"])
@show has_vertex(g, "A")
@show has_vertex(g, "B")
@show has_vertex(g, "C")
@show has_vertex(g, "D")
@show has_edge(g, "A" => "B")
add_edge!(g, "A" => "C")
@show has_edge(g, "A" => "C")
@show issetequal(neighbors(g, "A"), ["B", "C"])
@show issetequal(neighbors(g, "B"), ["A", "C"])
g_sub = subgraph(g, ["A", "B"])
@show has_vertex(g_sub, "A")
@show has_vertex(g_sub, "B")
@show !has_vertex(g_sub, "C")
@show !has_vertex(g_sub, "D")
@show has_edge(g_sub, "A" => "B")
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1971 | module NamedGraphsGraphsFlowsExt
using Graphs: AbstractGraph, IsDirected
using GraphsFlows: GraphsFlows
using NamedGraphs:
NamedGraphs,
AbstractNamedGraph,
DefaultNamedCapacity,
_symmetrize,
dist_matrix_to_position_dist_matrix,
ordered_vertices,
position_graph,
vertex_positions
using NamedGraphs.GraphsExtensions: GraphsExtensions, directed_graph
using SimpleTraits: SimpleTraits, @traitfn
@traitfn function NamedGraphs.dist_matrix_to_position_dist_matrix(
graph::AbstractNamedGraph::IsDirected, dist_matrix::DefaultNamedCapacity
)
return GraphsFlows.DefaultCapacity(graph)
end
@traitfn function GraphsFlows.mincut(
graph::AbstractNamedGraph::IsDirected,
source,
target,
capacity_matrix=DefaultNamedCapacity(graph),
algorithm::GraphsFlows.AbstractFlowAlgorithm=GraphsFlows.PushRelabelAlgorithm(),
)
position_part1, position_part2, flow = GraphsFlows.mincut(
directed_graph(position_graph(graph)),
vertex_positions(graph)[source],
vertex_positions(graph)[target],
dist_matrix_to_position_dist_matrix(graph, capacity_matrix),
algorithm,
)
(part1, part2) = map((position_part1, position_part2)) do position_part
return map(v -> ordered_vertices(graph)[v], position_part)
end
return (part1, part2, flow)
end
@traitfn function GraphsFlows.mincut(
graph::AbstractNamedGraph::(!IsDirected),
source,
target,
capacity_matrix=DefaultNamedCapacity(graph),
algorithm::GraphsFlows.AbstractFlowAlgorithm=GraphsFlows.PushRelabelAlgorithm(),
)
return GraphsFlows.mincut(
directed_graph(graph), source, target, _symmetrize(capacity_matrix), algorithm
)
end
function GraphsExtensions.mincut_partitions(
graph::AbstractGraph,
source,
target,
capacity_matrix=DefaultNamedCapacity(graph),
algorithm::GraphsFlows.AbstractFlowAlgorithm=GraphsFlows.PushRelabelAlgorithm(),
)
part1, part2, flow = GraphsFlows.mincut(graph, source, target, capacity_matrix, algorithm)
return part1, part2
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1761 | module NamedGraphsKaHyParExt
using Graphs: AbstractSimpleGraph, incidence_matrix
using KaHyPar: KaHyPar
using NamedGraphs.GraphsExtensions: GraphsExtensions, @Backend_str
using SplitApplyCombine: groupfind
using Suppressor: @suppress
GraphsExtensions.set_partitioning_backend!(Backend"kahypar"())
# KaHyPar configuration options
#
# configurations = readdir(joinpath(pkgdir(KaHyPar), "src", "config"))
# "cut_kKaHyPar_sea20.ini"
# "cut_rKaHyPar_sea20.ini"
# "km1_kKaHyPar-E_sea20.ini"
# "km1_kKaHyPar_eco_sea20.ini"
# "km1_kKaHyPar_sea20.ini"
# "km1_rKaHyPar_sea20.ini"
#
const KAHYPAR_ALGS = Dict([
(objective="edge_cut", alg="kway") => "cut_kKaHyPar_sea20.ini",
(objective="edge_cut", alg="recursive") => "cut_rKaHyPar_sea20.ini",
(objective="connectivity", alg="kway") => "km1_kKaHyPar_sea20.ini",
(objective="connectivity", alg="recursive") => "km1_rKaHyPar_sea20.ini",
])
"""
partitioned_vertices(::Backend"kahypar", g::Graph, npartiations::Integer; objective="edge_cut", alg="kway", kwargs...)
- default_configuration => "cut_kKaHyPar_sea20.ini"
- :edge_cut => "cut_kKaHyPar_sea20.ini"
- :connectivity => "km1_kKaHyPar_sea20.ini"
- imbalance::Number=0.03
"""
function GraphsExtensions.partitioned_vertices(
::Backend"kahypar",
g::AbstractSimpleGraph,
npartitions::Integer;
objective="edge_cut",
alg="kway",
configuration=nothing,
kwargs...,
)
if isnothing(configuration)
configuration = joinpath(
pkgdir(KaHyPar), "src", "config", KAHYPAR_ALGS[(; objective=objective, alg=alg)]
)
end
# https://github.com/kahypar/KaHyPar.jl/issues/20
partitioned_verts = @suppress KaHyPar.partition(
incidence_matrix(g), npartitions; configuration, kwargs...
)
return groupfind(partitioned_verts .+ 1)
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 942 | module NamedGraphsMetisExt
using Graphs: AbstractSimpleGraph
using Metis: Metis
using NamedGraphs.GraphsExtensions: GraphsExtensions, @Backend_str
using SplitApplyCombine: groupfind
GraphsExtensions.set_partitioning_backend!(Backend"metis"())
# Metis configuration options
const METIS_ALGS = Dict(["kway" => :KWAY, "recursive" => :RECURSIVE])
"""
partitioned_vertices(::Backend"metis", g::AbstractGraph, npartitions::Integer; alg="recursive")
Partition the graph `G` in `n` parts.
The partition algorithm is defined by the `alg` keyword:
- :KWAY: multilevel k-way partitioning
- :RECURSIVE: multilevel recursive bisection
"""
function GraphsExtensions.partitioned_vertices(
::Backend"metis", g::AbstractSimpleGraph, npartitions::Integer; alg="recursive", kwargs...
)
metis_alg = METIS_ALGS[alg]
partitioned_verts = Metis.partition(g, npartitions; alg=metis_alg, kwargs...)
return groupfind(Int.(partitioned_verts))
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 265 | module NamedGraphsSymRCMExt
using Graphs: AbstractGraph, adjacency_matrix
using NamedGraphs.GraphsExtensions: GraphsExtensions
using SymRCM: SymRCM
function GraphsExtensions.symrcm_perm(graph::AbstractGraph)
return SymRCM.symrcm(adjacency_matrix(graph))
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 903 | module NamedGraphs
include("lib/SimilarType/src/SimilarType.jl")
include("lib/Keys/src/Keys.jl")
include("lib/OrdinalIndexing/src/OrdinalIndexing.jl")
include("lib/OrderedDictionaries/src/OrderedDictionaries.jl")
include("lib/GraphGenerators/src/GraphGenerators.jl")
include("lib/GraphsExtensions/src/GraphsExtensions.jl")
include("utils.jl")
include("abstractnamededge.jl")
include("namededge.jl")
include("abstractnamedgraph.jl")
include("decorate.jl")
include("shortestpaths.jl")
include("distance.jl")
include("distances_and_capacities.jl")
include("steiner_tree.jl")
include("dfs.jl")
include("namedgraph.jl")
include("lib/NamedGraphGenerators/src/NamedGraphGenerators.jl")
include("lib/PartitionedGraphs/src/PartitionedGraphs.jl")
export AbstractNamedGraphs, NamedDiGraph, NamedEdge, NamedGraph
using PackageExtensionCompat: @require_extensions
function __init__()
@require_extensions
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 2068 | using Graphs: Graphs, AbstractEdge, dst, src
using .GraphsExtensions: GraphsExtensions, convert_vertextype, rename_vertices
abstract type AbstractNamedEdge{V} <: AbstractEdge{V} end
Base.eltype(::Type{<:AbstractNamedEdge{V}}) where {V} = V
Graphs.src(e::AbstractNamedEdge) = not_implemented()
Graphs.dst(e::AbstractNamedEdge) = not_implemented()
AbstractNamedEdge(e::AbstractNamedEdge) = e
function GraphsExtensions.convert_vertextype(
::Type{V}, E::Type{<:AbstractNamedEdge{V}}
) where {V}
return E
end
function GraphsExtensions.convert_vertextype(::Type, E::Type{<:AbstractNamedEdge})
return not_implemented()
end
function Base.show(io::IO, mime::MIME"text/plain", e::AbstractNamedEdge)
show(io, src(e))
print(io, " => ")
show(io, dst(e))
return nothing
end
Base.show(io::IO, edge::AbstractNamedEdge) = show(io, MIME"text/plain"(), edge)
# Conversions
Base.Pair(e::AbstractNamedEdge) = Pair(src(e), dst(e))
Base.Tuple(e::AbstractNamedEdge) = (src(e), dst(e))
# Convenience functions
Base.reverse(e::AbstractNamedEdge) = typeof(e)(dst(e), src(e))
function Base.:(==)(e1::AbstractNamedEdge, e2::AbstractNamedEdge)
return (src(e1) == src(e2) && dst(e1) == dst(e2))
end
Base.hash(e::AbstractNamedEdge, h::UInt) = hash(src(e), hash(dst(e), h))
# TODO: Define generic version in `GraphsExtensions`.
# TODO: Define generic `set_vertices` in `GraphsExtensions`.
set_src(e::AbstractNamedEdge, src) = set_vertices(e, src, dst(e))
# TODO: Define generic version in `GraphsExtensions`.
# TODO: Define generic `set_vertices` in `GraphsExtensions`.
set_dst(e::AbstractNamedEdge, dst) = set_vertices(e, src(e), dst)
function GraphsExtensions.rename_vertices(f::Function, e::AbstractNamedEdge)
# TODO: Define generic `set_vertices` in `GraphsExtensions`.
return set_vertices(e, f(src(e)), f(dst(e)))
end
function GraphsExtensions.rename_vertices(e::AbstractEdge, name_map)
return rename_vertices(v -> name_map[v], e)
end
function GraphsExtensions.rename_vertices(f::Function, e::AbstractEdge)
return rename_vertices(f, AbstractNamedEdge(e))
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 18978 | using Dictionaries: set!
using Graphs:
Graphs,
AbstractGraph,
IsDirected,
a_star,
add_edge!,
adjacency_matrix,
bfs_parents,
boruvka_mst,
connected_components,
degree,
edges,
has_path,
indegree,
inneighbors,
is_connected,
is_cyclic,
kruskal_mst,
ne,
neighborhood,
neighborhood_dists,
nv,
outdegree,
prim_mst,
rem_edge!,
spfa_shortest_paths,
vertices,
weights
using Graphs.SimpleGraphs: SimpleDiGraph, SimpleEdge
using .GraphsExtensions:
GraphsExtensions,
directed_graph,
incident_edges,
partitioned_vertices,
rename_vertices,
subgraph
using SimpleTraits: SimpleTraits, Not, @traitfn
abstract type AbstractNamedGraph{V} <: AbstractGraph{V} end
#
# Required for interface
#
Graphs.vertices(graph::AbstractNamedGraph) = not_implemented()
position_graph(graph::AbstractNamedGraph) = not_implemented()
Graphs.rem_vertex!(graph::AbstractNamedGraph, vertex) = not_implemented()
Graphs.add_vertex!(graph::AbstractNamedGraph, vertex) = not_implemented()
GraphsExtensions.rename_vertices(f::Function, g::AbstractNamedGraph) = not_implemented()
# TODO: Is this a good definition? Maybe make it generic to any graph?
function GraphsExtensions.permute_vertices(graph::AbstractNamedGraph, permutation)
return subgraph(graph, map(v -> ordered_vertices(graph)[v], permutation))
end
# Outputs an object that when indexed by a vertex
# returns the position of that vertex in the parent
# graph `position_graph(graph::AbstractNamedGraph)`.
# Inverse map of `ordered_vertices`.
vertex_positions(graph::AbstractNamedGraph) = not_implemented()
# Outputs an object that when indexed by a vertex position
# returns the corresponding vertex.
ordered_vertices(graph::AbstractNamedGraph) = not_implemented()
Graphs.edgetype(graph::AbstractNamedGraph) = not_implemented()
# TODO: Define generic version in `GraphsExtensions`.
GraphsExtensions.directed_graph_type(G::Type{<:AbstractNamedGraph}) = not_implemented()
GraphsExtensions.undirected_graph_type(G::Type{<:AbstractNamedGraph}) = not_implemented()
# In terms of `position_graph_type`
# is_directed(::Type{<:AbstractNamedGraph}) = not_implemented()
GraphsExtensions.convert_vertextype(::Type, ::AbstractNamedGraph) = not_implemented()
# TODO: implement as:
#
# graph = set_position_graph(graph, copy(position_graph(graph)))
# graph = set_vertices(graph, copy(vertices(graph)))
#
# or:
#
# graph_copy = similar(typeof(graph))(vertices(graph))
# for e in edges(graph)
# add_edge!(graph_copy, e)
# end
Base.copy(graph::AbstractNamedGraph) = not_implemented()
function Graphs.merge_vertices!(
graph::AbstractNamedGraph, merge_vertices; merged_vertex=first(merge_vertices)
)
return not_implemented()
end
#
# Derived interface
#
position_graph_type(graph::AbstractNamedGraph) = typeof(position_graph(graph))
function Graphs.has_vertex(graph::AbstractNamedGraph, vertex)
# TODO: `vertices` should have fast lookup!
return vertex ∈ vertices(graph)
end
Graphs.SimpleDiGraph(graph::AbstractNamedGraph) = SimpleDiGraph(position_graph(graph))
Base.zero(G::Type{<:AbstractNamedGraph}) = G()
# TODO: Implement using `copyto!`?
function GraphsExtensions.directed_graph(graph::AbstractNamedGraph)
digraph = directed_graph_type(typeof(graph))(vertices(graph))
for e in edges(graph)
add_edge!(digraph, e)
add_edge!(digraph, reverse(e))
end
return digraph
end
# Default, can overload
Base.eltype(graph::AbstractNamedGraph) = eltype(vertices(graph))
# TODO: Rename `position_edges(graph::AbstractNamedGraph)`.
function edge_to_position_edge(graph::AbstractNamedGraph, edge::AbstractEdge)
return edgetype(position_graph(graph))(
vertex_positions(graph)[src(edge)], vertex_positions(graph)[dst(edge)]
)
end
# TODO: Rename `named_edges(graph::AbstractNamedGraph)`.
function position_edge_to_edge(graph::AbstractNamedGraph, position_edge::AbstractEdge)
return edgetype(graph)(
ordered_vertices(graph)[src(position_edge)], ordered_vertices(graph)[dst(position_edge)]
)
end
function Graphs.edges(graph::AbstractNamedGraph)
return map(e -> position_edge_to_edge(graph, e), edges(position_graph(graph)))
end
# TODO: write in terms of a generic function.
for f in [
:(Graphs.outneighbors),
:(Graphs.inneighbors),
:(Graphs.all_neighbors),
:(Graphs.neighbors),
]
@eval begin
function $f(graph::AbstractNamedGraph, vertex)
position_vertices = $f(position_graph(graph), vertex_positions(graph)[vertex])
return map(v -> ordered_vertices(graph)[v], position_vertices)
end
# Ambiguity errors with Graphs.jl
function $f(graph::AbstractNamedGraph, vertex::Integer)
position_vertices = $f(position_graph(graph), vertex_positions(graph)[vertex])
return map(v -> ordered_vertices(graph)[v], position_vertices)
end
end
end
function Graphs.common_neighbors(g::AbstractNamedGraph, u, v)
return intersect(neighbors(g, u), neighbors(g, v))
end
namedgraph_indegree(graph::AbstractNamedGraph, vertex) = length(inneighbors(graph, vertex))
function namedgraph_outdegree(graph::AbstractNamedGraph, vertex)
return length(outneighbors(graph, vertex))
end
Graphs.indegree(graph::AbstractNamedGraph, vertex) = namedgraph_indegree(graph, vertex)
Graphs.outdegree(graph::AbstractNamedGraph, vertex) = namedgraph_outdegree(graph, vertex)
# Fix for ambiguity error with `AbstractGraph` version
function Graphs.indegree(graph::AbstractNamedGraph, vertex::Integer)
return namedgraph_indegree(graph, vertex)
end
function Graphs.outdegree(graph::AbstractNamedGraph, vertex::Integer)
return namedgraph_outdegree(graph, vertex)
end
@traitfn function namedgraph_degree(graph::AbstractNamedGraph::IsDirected, vertex)
return indegree(graph, vertex) + outdegree(graph, vertex)
end
@traitfn namedgraph_degree(graph::AbstractNamedGraph::(!IsDirected), vertex) =
indegree(graph, vertex)
function Graphs.degree(graph::AbstractNamedGraph, vertex)
return namedgraph_degree(graph::AbstractNamedGraph, vertex)
end
# Fix for ambiguity error with `AbstractGraph` version
function Graphs.degree(graph::AbstractNamedGraph, vertex::Integer)
return namedgraph_degree(graph::AbstractNamedGraph, vertex)
end
function Graphs.degree_histogram(g::AbstractNamedGraph, degfn=degree)
hist = Dictionary{Int,Int}()
for v in vertices(g) # minimize allocations by
for d in degfn(g, v) # iterating over vertices
set!(hist, d, get(hist, d, 0) + 1)
end
end
return hist
end
function namedgraph_neighborhood(
graph::AbstractNamedGraph, vertex, d, distmx=weights(graph); dir=:out
)
position_distmx = dist_matrix_to_position_dist_matrix(graph, distmx)
position_vertices = neighborhood(
position_graph(graph), vertex_positions(graph)[vertex], d, position_distmx; dir
)
return [ordered_vertices(graph)[position_vertex] for position_vertex in position_vertices]
end
function Graphs.neighborhood(
graph::AbstractNamedGraph, vertex, d, distmx=weights(graph); dir=:out
)
return namedgraph_neighborhood(graph, vertex, d, distmx; dir)
end
# Fix for ambiguity error with `AbstractGraph` version
function Graphs.neighborhood(
graph::AbstractNamedGraph, vertex::Integer, d, distmx=weights(graph); dir=:out
)
return namedgraph_neighborhood(graph, vertex, d, distmx; dir)
end
# Fix for ambiguity error with `AbstractGraph` version
function Graphs.neighborhood(
graph::AbstractNamedGraph, vertex::Integer, d, distmx::AbstractMatrix{<:Real}; dir=:out
)
return namedgraph_neighborhood(graph, vertex, d, distmx; dir)
end
function namedgraph_neighborhood_dists(graph::AbstractNamedGraph, vertex, d, distmx; dir)
position_distmx = dist_matrix_to_position_dist_matrix(graph, distmx)
position_vertices_and_dists = neighborhood_dists(
position_graph(graph), vertex_positions(graph)[vertex], d, position_distmx; dir
)
return [
(ordered_vertices(graph)[position_vertex], dist) for
(position_vertex, dist) in position_vertices_and_dists
]
end
function Graphs.neighborhood_dists(
graph::AbstractNamedGraph, vertex, d, distmx=weights(graph); dir=:out
)
return namedgraph_neighborhood_dists(graph, vertex, d, distmx; dir)
end
# Fix for ambiguity error with `AbstractGraph` version
function Graphs.neighborhood_dists(
graph::AbstractNamedGraph, vertex::Integer, d, distmx=weights(graph); dir=:out
)
return namedgraph_neighborhood_dists(graph, vertex, d, distmx; dir)
end
# Fix for ambiguity error with `AbstractGraph` version
function Graphs.neighborhood_dists(
graph::AbstractNamedGraph, vertex::Integer, d, distmx::AbstractMatrix{<:Real}; dir=:out
)
return namedgraph_neighborhood_dists(graph, vertex, d, distmx; dir)
end
function namedgraph_mincut(graph::AbstractNamedGraph, distmx)
position_distmx = dist_matrix_to_position_dist_matrix(graph, distmx)
position_parity, bestcut = Graphs.mincut(position_graph(graph), position_distmx)
return Dictionary(vertices(graph), position_parity), bestcut
end
function Graphs.mincut(graph::AbstractNamedGraph, distmx=weights(graph))
return namedgraph_mincut(graph, distmx)
end
function Graphs.mincut(graph::AbstractNamedGraph, distmx::AbstractMatrix{<:Real})
return namedgraph_mincut(graph, distmx)
end
# TODO: Make this more generic?
function GraphsExtensions.partitioned_vertices(
graph::AbstractNamedGraph; npartitions=nothing, nvertices_per_partition=nothing, kwargs...
)
vertex_partitions = partitioned_vertices(
position_graph(graph); npartitions, nvertices_per_partition, kwargs...
)
# TODO: output the reverse of this dictionary (a Vector of Vector
# of the vertices in each partition).
# return Dictionary(vertices(g), partitions)
return map(vertex_partitions) do vertex_partition
return map(v -> ordered_vertices(graph)[v], vertex_partition)
end
end
function namedgraph_a_star(
graph::AbstractNamedGraph,
source,
destination,
distmx=weights(graph),
heuristic::Function=(v -> zero(eltype(distmx))),
edgetype_to_return=edgetype(graph),
)
position_distmx = dist_matrix_to_position_dist_matrix(graph, distmx)
position_shortest_path = a_star(
position_graph(graph),
vertex_positions(graph)[source],
vertex_positions(graph)[destination],
dist_matrix_to_position_dist_matrix(graph, distmx),
heuristic,
SimpleEdge,
)
return map(e -> position_edge_to_edge(graph, e), position_shortest_path)
end
function Graphs.a_star(graph::AbstractNamedGraph, source, destination, args...)
return namedgraph_a_star(graph, source, destination, args...)
end
# Fix ambiguity error with `AbstractGraph` version
function Graphs.a_star(
graph::AbstractNamedGraph{U}, source::Integer, destination::Integer, args...
) where {U<:Integer}
return namedgraph_a_star(graph, source, destination, args...)
end
# Fix ambiguity error with `AbstractGraph` version
function Graphs.a_star(
graph::AbstractNamedGraph, source::Integer, destination::Integer, args...
)
return namedgraph_a_star(graph, source, destination, args...)
end
function Graphs.spfa_shortest_paths(
graph::AbstractNamedGraph, vertex, distmx=weights(graph)
)
position_distmx = dist_matrix_to_position_dist_matrix(graph, distmx)
position_shortest_paths = spfa_shortest_paths(
position_graph(graph), vertex_positions(graph)[vertex], position_distmx
)
return Dictionary(vertices(graph), position_shortest_paths)
end
function Graphs.boruvka_mst(
g::AbstractNamedGraph, distmx::AbstractMatrix{<:Real}=weights(g); minimize=true
)
position_mst, weights = boruvka_mst(position_graph(g), distmx; minimize)
return map(e -> position_edge_to_edge(g, e), position_mst), weights
end
function Graphs.kruskal_mst(
g::AbstractNamedGraph, distmx::AbstractMatrix{<:Real}=weights(g); minimize=true
)
position_mst = kruskal_mst(position_graph(g), distmx; minimize)
return map(e -> position_edge_to_edge(g, e), position_mst)
end
function Graphs.prim_mst(g::AbstractNamedGraph, distmx::AbstractMatrix{<:Real}=weights(g))
position_mst = prim_mst(position_graph(g), distmx)
return map(e -> position_edge_to_edge(g, e), position_mst)
end
function Graphs.add_edge!(graph::AbstractNamedGraph, edge)
add_edge!(position_graph(graph), edge_to_position_edge(graph, edgetype(graph)(edge)))
return graph
end
Graphs.add_edge!(g::AbstractNamedGraph, src, dst) = add_edge!(g, edgetype(g)(src, dst))
function Graphs.rem_edge!(graph::AbstractNamedGraph, edge)
rem_edge!(position_graph(graph), edge_to_position_edge(graph, edgetype(graph)(edge)))
return graph
end
function Graphs.has_edge(graph::AbstractNamedGraph, edge::AbstractNamedEdge)
return has_edge(position_graph(graph), edge_to_position_edge(graph, edge))
end
# handles two-argument edge constructors like src,dst
Graphs.has_edge(g::AbstractNamedGraph, edge) = has_edge(g, edgetype(g)(edge))
Graphs.has_edge(g::AbstractNamedGraph, src, dst) = has_edge(g, edgetype(g)(src, dst))
function Graphs.has_path(
graph::AbstractNamedGraph, source, destination; exclude_vertices=vertextype(graph)[]
)
return has_path(
position_graph(graph),
vertex_positions(graph)[source],
vertex_positions(graph)[destination];
exclude_vertices=map(v -> vertex_positions(graph)[v], exclude_vertices),
)
end
function Base.union(graph1::AbstractNamedGraph, graph2::AbstractNamedGraph)
union_graph = promote_type(typeof(graph1), typeof(graph2))()
union_vertices = union(vertices(graph1), vertices(graph2))
for v in union_vertices
add_vertex!(union_graph, v)
end
for e in edges(graph1)
add_edge!(union_graph, e)
end
for e in edges(graph2)
add_edge!(union_graph, e)
end
return union_graph
end
function Base.union(
graph1::AbstractNamedGraph,
graph2::AbstractNamedGraph,
graph3::AbstractNamedGraph,
graph_rest::AbstractNamedGraph...,
)
return union(union(graph1, graph2), graph3, graph_rest...)
end
function Graphs.is_directed(graph_type::Type{<:AbstractNamedGraph})
return is_directed(position_graph_type(graph_type))
end
Graphs.is_directed(graph::AbstractNamedGraph) = is_directed(position_graph(graph))
Graphs.is_connected(graph::AbstractNamedGraph) = is_connected(position_graph(graph))
Graphs.is_cyclic(graph::AbstractNamedGraph) = is_cyclic(position_graph(graph))
@traitfn function Base.reverse(graph::AbstractNamedGraph::IsDirected)
return not_implemented()
end
@traitfn function Base.reverse!(g::AbstractNamedGraph::IsDirected)
return not_implemented()
end
# TODO: Move to `namedgraph.jl`, or make the output generic?
function Graphs.blockdiag(graph1::AbstractNamedGraph, graph2::AbstractNamedGraph)
new_position_graph = blockdiag(position_graph(graph1), position_graph(graph2))
new_vertices = vcat(vertices(graph1), vertices(graph2))
@assert allunique(new_vertices)
return GenericNamedGraph(new_position_graph, new_vertices)
end
# TODO: What `args` are needed?
Graphs.nv(graph::AbstractNamedGraph, args...) = nv(position_graph(graph), args...)
# TODO: What `args` are needed?
Graphs.ne(graph::AbstractNamedGraph, args...) = ne(position_graph(graph), args...)
# TODO: What `args` are needed?
function Graphs.adjacency_matrix(graph::AbstractNamedGraph, args...)
return adjacency_matrix(position_graph(graph), args...)
end
function Graphs.connected_components(graph::AbstractNamedGraph)
position_connected_components = connected_components(position_graph(graph))
return map(position_connected_components) do position_connected_component
return map(v -> ordered_vertices(graph)[v], position_connected_component)
end
end
function Graphs.merge_vertices(
graph::AbstractNamedGraph, merge_vertices; merged_vertex=first(merge_vertices)
)
merged_graph = copy(graph)
add_vertex!(merged_graph, merged_vertex)
for vertex in merge_vertices
for e in incident_edges(graph, vertex; dir=:both)
merged_edge = rename_vertices(v -> v == vertex ? merged_vertex : v, e)
if src(merged_edge) ≠ dst(merged_edge)
add_edge!(merged_graph, merged_edge)
end
end
end
for vertex in merge_vertices
if vertex ≠ merged_vertex
rem_vertex!(merged_graph, vertex)
end
end
return merged_graph
end
#
# Graph traversals
#
# Overload Graphs.tree. Used for bfs_tree and dfs_tree
# traversal algorithms.
function Graphs.tree(graph::AbstractNamedGraph, parents)
n = length(parents)
# TODO: Use `directed_graph` here to make more generic?
## t = GenericNamedGraph(DiGraph(n), vertices(graph))
t = directed_graph_type(typeof(graph))(vertices(graph))
for destination in eachindex(parents)
source = parents[destination]
if source != destination
add_edge!(t, source, destination)
end
end
return t
end
function namedgraph_bfs_tree(graph::AbstractNamedGraph, vertex; kwargs...)
return Graphs.tree(graph, bfs_parents(graph, vertex; kwargs...))
end
# Disambiguation from Graphs.bfs_tree
function Graphs.bfs_tree(graph::AbstractNamedGraph, vertex::Integer; kwargs...)
return namedgraph_bfs_tree(graph, vertex; kwargs...)
end
function Graphs.bfs_tree(graph::AbstractNamedGraph, vertex; kwargs...)
return namedgraph_bfs_tree(graph, vertex; kwargs...)
end
# Returns a Dictionary mapping a vertex to it's parent
# vertex in the traversal/spanning tree.
function namedgraph_bfs_parents(graph::AbstractNamedGraph, vertex; kwargs...)
position_bfs_parents = bfs_parents(
position_graph(graph), vertex_positions(graph)[vertex]; kwargs...
)
# Works around issue in this `Dictionary` constructor:
# https://github.com/andyferris/Dictionaries.jl/blob/v0.4.1/src/Dictionary.jl#L139-L145
# when `inds` has holes. This removes the holes.
# TODO: Raise an issue with `Dictionaries.jl`.
## vertices_graph = Indices(collect(vertices(graph)))
# This makes the vertices ordered according to the parent vertices.
vertices_graph = map(v -> ordered_vertices(graph)[v], vertices(position_graph(graph)))
return Dictionary(
vertices_graph, map(v -> ordered_vertices(graph)[v], position_bfs_parents)
)
end
# Disambiguation from Graphs.jl
function Graphs.bfs_parents(graph::AbstractNamedGraph, vertex::Integer; kwargs...)
return namedgraph_bfs_parents(graph, vertex; kwargs...)
end
function Graphs.bfs_parents(graph::AbstractNamedGraph, vertex; kwargs...)
return namedgraph_bfs_parents(graph, vertex; kwargs...)
end
#
# Printing
#
function Base.show(io::IO, mime::MIME"text/plain", graph::AbstractNamedGraph)
println(io, "$(typeof(graph)) with $(nv(graph)) vertices:")
show(io, mime, vertices(graph))
println(io, "\n")
println(io, "and $(ne(graph)) edge(s):")
for e in edges(graph)
show(io, mime, e)
println(io)
end
return nothing
end
Base.show(io::IO, graph::AbstractNamedGraph) = show(io, MIME"text/plain"(), graph)
#
# Convenience functions
#
function Base.:(==)(g1::AbstractNamedGraph, g2::AbstractNamedGraph)
issetequal(vertices(g1), vertices(g2)) || return false
for v in vertices(g1)
issetequal(inneighbors(g1, v), inneighbors(g2, v)) || return false
issetequal(outneighbors(g1, v), outneighbors(g2, v)) || return false
end
return true
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1057 | using Graphs: add_edge!, dst, edges, neighbors, rem_edge!, rem_vertex!, src, vertices
using Graphs.SimpleGraphs: SimpleGraph
using .GraphsExtensions: GraphsExtensions, add_edges!
function GraphsExtensions.decorate_graph_edges(
g::AbstractNamedGraph; edge_map::Function=Returns(NamedGraph(1))
)
g_dec = copy(g)
es = edges(g_dec)
for e in es
dec = edge_map(e)
dec = rename_vertices(v -> (v, e), dec)
g_dec = union(g_dec, dec)
add_edge!(g_dec, src(e) => first(vertices(dec)))
add_edge!(g_dec, dst(e) => last(vertices(dec)))
rem_edge!(g_dec, src(e) => dst(e))
end
return g_dec
end
function GraphsExtensions.decorate_graph_vertices(
g::AbstractNamedGraph; vertex_map::Function=Returns(NamedGraph(1))
)
g_dec = copy(g)
vs = vertices(g_dec)
for v in vs
vneighbors = neighbors(g_dec, v)
dec = vertex_map(v)
dec = rename_vertices(vdec -> (vdec, v), dec)
g_dec = union(g_dec, dec)
rem_vertex!(g_dec, v)
add_edges!(g_dec, [first(vertices(dec)) => vn for vn in vneighbors])
end
return g_dec
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1887 | using Graphs: Graphs, dfs_parents, dfs_tree, topological_sort_by_dfs
using SimpleTraits: SimpleTraits, Not, @traitfn
@traitfn function Graphs.topological_sort_by_dfs(g::AbstractNamedGraph::IsDirected)
return map(v -> ordered_vertices(g)[v], topological_sort_by_dfs(position_graph(g)))
end
function namedgraph_dfs_tree(graph::AbstractNamedGraph, vertex; kwargs...)
return Graphs.tree(graph, dfs_parents(graph, vertex; kwargs...))
end
function Graphs.dfs_tree(graph::AbstractNamedGraph, vertex::Integer; kwargs...)
return namedgraph_dfs_tree(graph, vertex; kwargs...)
end
function Graphs.dfs_tree(graph::AbstractNamedGraph, vertex; kwargs...)
return namedgraph_dfs_tree(graph, vertex; kwargs...)
end
# Returns a Dictionary mapping a vertex to it's parent
# vertex in the traversal/spanning tree.
function namedgraph_dfs_parents(graph::AbstractNamedGraph, vertex; kwargs...)
position_dfs_parents = dfs_parents(
position_graph(graph), vertex_positions(graph)[vertex]; kwargs...
)
# Works around issue in this `Dictionary` constructor:
# https://github.com/andyferris/Dictionaries.jl/blob/v0.4.1/src/Dictionary.jl#L139-L145
# when `inds` has holes. This removes the holes.
# TODO: Raise an issue with `Dictionaries.jl`.
## vertices_graph = Indices(collect(vertices(graph)))
# This makes the vertices ordered according to the parent vertices.
vertices_graph = map(v -> ordered_vertices(graph)[v], vertices(position_graph(graph)))
return Dictionary(
vertices_graph, map(v -> ordered_vertices(graph)[v], position_dfs_parents)
)
end
# Disambiguation from Graphs.dfs_parents
function Graphs.dfs_parents(graph::AbstractNamedGraph, vertex::Integer; kwargs...)
return namedgraph_dfs_parents(graph, vertex; kwargs...)
end
function Graphs.dfs_parents(graph::AbstractNamedGraph, vertex; kwargs...)
return namedgraph_dfs_parents(graph, vertex; kwargs...)
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 3086 | using Graphs: Graphs, dijkstra_shortest_paths, weights
using .GraphsExtensions: eccentricities
function namedgraph_eccentricity(graph::AbstractNamedGraph, vertex, distmx)
e = maximum(dijkstra_shortest_paths(graph, [vertex], distmx).dists)
e == typemax(e) && @warn("Infinite path length detected for vertex $vertex")
return e
end
function Graphs.eccentricity(graph::AbstractNamedGraph, vertex, distmx=weights(graph))
return namedgraph_eccentricity(graph, vertex, distmx)
end
# Fix for ambiguity error with `AbstractGraph`
function Graphs.eccentricity(
graph::AbstractNamedGraph, vertex::Integer, distmx::AbstractMatrix{<:Real}
)
return namedgraph_eccentricity(graph, vertex, distmx)
end
function Graphs.eccentricity(graph::AbstractNamedGraph, vertex, distmx::AbstractMatrix)
return namedgraph_eccentricity(graph, vertex, distmx)
end
function eccentricities_center(eccentricities)
rad = eccentricities_radius(eccentricities)
return filter(x -> eccentricities[x] == rad, keys(eccentricities))
end
function eccentricities_periphery(eccentricities)
diam = eccentricities_diameter(eccentricities)
return filter(x -> eccentricities[x] == diam, keys(eccentricities))
end
eccentricities_radius(eccentricities) = minimum(eccentricities)
eccentricities_diameter(eccentricities) = maximum(eccentricities)
function namedgraph_center(graph::AbstractNamedGraph, distmx)
return eccentricities_center(eccentricities(graph, vertices(graph), distmx))
end
function Graphs.center(graph::AbstractNamedGraph, distmx=weights(graph))
return namedgraph_center(graph, distmx)
end
# Fix for ambiguity error with `AbstractGraph`
function Graphs.center(graph::AbstractNamedGraph, distmx::AbstractMatrix)
return namedgraph_center(graph, distmx)
end
function namedgraph_radius(graph::AbstractNamedGraph, distmx)
return eccentricities_radius(eccentricities(graph, vertices(graph), distmx))
end
function Graphs.radius(graph::AbstractNamedGraph, distmx=weights(graph))
return namedgraph_radius(graph, distmx)
end
# Fix for ambiguity error with `AbstractGraph`
function Graphs.radius(graph::AbstractNamedGraph, distmx::AbstractMatrix)
return namedgraph_radius(graph, distmx)
end
function namedgraph_diameter(graph::AbstractNamedGraph, distmx)
return eccentricities_diameter(eccentricities(graph, vertices(graph), distmx))
end
function Graphs.diameter(graph::AbstractNamedGraph, distmx=weights(graph))
return namedgraph_diameter(graph, distmx)
end
# Fix for ambiguity error with `AbstractGraph`
function Graphs.diameter(graph::AbstractNamedGraph, distmx::AbstractMatrix)
return namedgraph_diameter(graph, distmx)
end
function namedgraph_periphery(graph::AbstractNamedGraph, distmx)
return eccentricities_periphery(eccentricities(graph, vertices(graph), distmx))
end
function Graphs.periphery(graph::AbstractNamedGraph, distmx=weights(graph))
return namedgraph_periphery(graph, distmx)
end
# Fix for ambiguity error with `AbstractGraph`
function Graphs.periphery(graph::AbstractNamedGraph, distmx::AbstractMatrix)
return namedgraph_periphery(graph, distmx)
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 2657 | using Dictionaries: AbstractDictionary
using Graphs: Graphs, IsDirected, dst, edges, nv, src
using .GraphsExtensions: directed_graph
using LinearAlgebra: Symmetric
using SimpleTraits: SimpleTraits, Not, @traitfn
using SparseArrays: sparse, spzeros
# TODO: Move to `GraphsExtensions`.
function _symmetrize(dist::AbstractMatrix)
return sparse(Symmetric(dist))
end
# TODO: Move to `GraphsExtensions`.
function _symmetrize(dist)
symmetrized_dist = copy(dist)
for k in keys(dist)
symmetrized_dist[reverse(k)] = dist[k]
end
return symmetrized_dist
end
# TODO: Move to `GraphsExtensions`.
function _symmetrize(dist::AbstractDictionary)
symmetrized_dist = copy(dist)
for k in keys(dist)
insert!(symmetrized_dist, reverse(k), dist[k])
end
return symmetrized_dist
end
getindex_dist_matrix(dist_matrix, I...) = dist_matrix[I...]
getindex_dist_matrix(dist_matrix::AbstractDictionary, I...) = dist_matrix[I]
function namedgraph_dist_matrix_to_position_dist_matrix(
graph::AbstractNamedGraph, dist_matrix
)
position_dist_matrix = spzeros(valtype(dist_matrix), nv(graph), nv(graph))
for e in edges(graph)
position_e = edge_to_position_edge(graph, e)
position_dist_matrix[src(position_e), dst(position_e)] = getindex_dist_matrix(
dist_matrix, src(e), dst(e)
)
end
return position_dist_matrix
end
@traitfn function dist_matrix_to_position_dist_matrix(
graph::AbstractNamedGraph::IsDirected, dist_matrix
)
return namedgraph_dist_matrix_to_position_dist_matrix(graph, dist_matrix)
end
@traitfn function dist_matrix_to_position_dist_matrix(
graph::AbstractNamedGraph::(!IsDirected), dist_matrix
)
return _symmetrize(namedgraph_dist_matrix_to_position_dist_matrix(graph, dist_matrix))
end
function dist_matrix_to_position_dist_matrix(
graph::AbstractNamedGraph, distmx::Graphs.DefaultDistance
)
return distmx
end
"""
DefaultNamedCapacity{T}
Structure that returns `1` if a forward edge exists in `flow_graph`, and `0` otherwise.
"""
struct DefaultNamedCapacity{G<:AbstractNamedGraph,T<:Integer} <: AbstractMatrix{T}
flow_graph::G
nv::T
end
DefaultNamedCapacity(graph::AbstractNamedGraph) = DefaultNamedCapacity(graph, nv(graph))
function _symmetrize(dist::DefaultNamedCapacity)
return DefaultNamedCapacity(directed_graph(dist.flow_graph))
end
# Base.getindex(d::DefaultNamedCapacity{T}, s, t) where {T} = has_edge(d.flow_graph, s, t) ? one(T) : zero(T)
# Base.size(d::DefaultNamedCapacity) = (Int(d.nv), Int(d.nv))
# Base.transpose(d::DefaultNamedCapacity) = DefaultNamedCapacity(reverse(d.flow_graph))
# Base.adjoint(d::DefaultNamedCapacity) = DefaultNamedCapacity(reverse(d.flow_graph))
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1143 | using Graphs: Graphs
using .GraphsExtensions: GraphsExtensions
struct NamedEdge{V} <: AbstractNamedEdge{V}
src::V
dst::V
NamedEdge{V}(src, dst) where {V} = new{V}(src, dst)
end
NamedEdge(src::V, dst::V) where {V} = NamedEdge{V}(src, dst)
NamedEdge(src, dst) = NamedEdge{promote_type(typeof(src), typeof(dst))}(src, dst)
function GraphsExtensions.convert_vertextype(vertextype::Type, ::Type{<:NamedEdge})
return NamedEdge{vertextype}
end
Graphs.src(e::NamedEdge) = e.src
Graphs.dst(e::NamedEdge) = e.dst
NamedEdge{V}(e::NamedEdge{V}) where {V} = e
NamedEdge(e::NamedEdge) = e
NamedEdge{V}(e::AbstractEdge) where {V} = NamedEdge{V}(src(e), dst(e))
NamedEdge(e::AbstractEdge) = NamedEdge(src(e), dst(e))
AbstractNamedEdge(e::AbstractEdge) = NamedEdge(e)
Base.convert(edgetype::Type{<:NamedEdge}, e::AbstractEdge) = edgetype(e)
NamedEdge(p::Tuple) = NamedEdge(p...)
NamedEdge(p::Pair) = NamedEdge(p...)
NamedEdge{V}(p::Pair) where {V} = NamedEdge{V}(p...)
NamedEdge{V}(p::Tuple) where {V} = NamedEdge{V}(p...)
# TODO: Define generic `set_vertices` in `GraphsExtensions`.
set_vertices(e::NamedEdge, src, dst) = NamedEdge(src, dst)
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 7692 | using Dictionaries: Dictionary
using Graphs:
Graphs,
AbstractGraph,
add_edge!,
add_vertex!,
edgetype,
has_edge,
is_directed,
outneighbors,
rem_vertex!,
vertices
using Graphs.SimpleGraphs: AbstractSimpleGraph, SimpleDiGraph, SimpleGraph
using .GraphsExtensions:
GraphsExtensions, vertextype, directed_graph_type, undirected_graph_type
using .OrderedDictionaries: OrderedDictionaries, OrderedIndices
using .OrdinalIndexing: th
struct GenericNamedGraph{V,G<:AbstractSimpleGraph{Int}} <: AbstractNamedGraph{V}
position_graph::G
vertices::OrderedIndices{V}
global function _GenericNamedGraph(position_graph, vertices)
@assert length(vertices) == nv(position_graph)
return new{eltype(vertices),typeof(position_graph)}(position_graph, vertices)
end
end
# AbstractNamedGraph required interface.
function position_graph_type(graph_type::Type{<:GenericNamedGraph})
return fieldtype(graph_type, :position_graph)
end
position_graph(graph::GenericNamedGraph) = getfield(graph, :position_graph)
function vertex_positions(graph::GenericNamedGraph)
return OrderedDictionaries.index_positions(vertices(graph))
end
function ordered_vertices(graph::GenericNamedGraph)
return OrderedDictionaries.ordered_indices(vertices(graph))
end
# TODO: Decide what this should output.
Graphs.vertices(graph::GenericNamedGraph) = getfield(graph, :vertices)
function Graphs.add_vertex!(graph::GenericNamedGraph, vertex)
if vertex ∈ vertices(graph)
return false
end
add_vertex!(position_graph(graph))
insert!(vertices(graph), vertex)
return true
end
function Graphs.rem_vertex!(graph::GenericNamedGraph, vertex)
if vertex ∉ vertices(graph)
return false
end
position_vertex = vertex_positions(graph)[vertex]
rem_vertex!(position_graph(graph), position_vertex)
delete!(vertices(graph), vertex)
return graph
end
function GraphsExtensions.rename_vertices(f::Function, graph::GenericNamedGraph)
# TODO: Fix broadcasting of `OrderedIndices`.
# return GenericNamedGraph(position_graph(graph), f.(vertices(graph)))
return GenericNamedGraph(position_graph(graph), map(f, vertices(graph)))
end
function GraphsExtensions.rename_vertices(f::Function, g::AbstractSimpleGraph)
return error(
"Can't rename the vertices of a graph of type `$(typeof(g)) <: AbstractSimpleGraph`, try converting to a named graph.",
)
end
function GraphsExtensions.convert_vertextype(vertextype::Type, graph::GenericNamedGraph)
return GenericNamedGraph(
position_graph(graph), convert(Vector{vertextype}, ordered_vertices(graph))
)
end
#
# Constructors from `AbstractSimpleGraph`
#
to_vertices(vertices) = vertices
to_vertices(vertices::AbstractArray) = vec(vertices)
to_vertices(vertices::Integer) = Base.OneTo(vertices)
# Inner constructor
# TODO: Is this needed?
function GenericNamedGraph{V,G}(
position_graph::G, vertices::OrderedIndices{V}
) where {V,G<:AbstractSimpleGraph{Int}}
return _GenericNamedGraph(position_graph, vertices)
end
function GenericNamedGraph{V,G}(
position_graph::AbstractSimpleGraph, vertices
) where {V,G<:AbstractSimpleGraph{Int}}
return GenericNamedGraph{V,G}(
convert(G, position_graph), OrderedIndices{V}(to_vertices(vertices))
)
end
function GenericNamedGraph{V}(position_graph::AbstractSimpleGraph, vertices) where {V}
return GenericNamedGraph{V,typeof(position_graph)}(position_graph, vertices)
end
function GenericNamedGraph{<:Any,G}(
position_graph::AbstractSimpleGraph, vertices
) where {G<:AbstractSimpleGraph{Int}}
return GenericNamedGraph{eltype(vertices),G}(position_graph, vertices)
end
function GenericNamedGraph{<:Any,G}(
position_graph::AbstractSimpleGraph
) where {G<:AbstractSimpleGraph{Int}}
return GenericNamedGraph{<:Any,G}(position_graph, vertices(position_graph))
end
function GenericNamedGraph(position_graph::AbstractSimpleGraph, vertices)
return GenericNamedGraph{eltype(vertices)}(position_graph, vertices)
end
function GenericNamedGraph(position_graph::AbstractSimpleGraph)
return GenericNamedGraph(position_graph, vertices(position_graph))
end
#
# Tautological constructors
#
function GenericNamedGraph{V,G}(
graph::GenericNamedGraph{V,G}
) where {V,G<:AbstractSimpleGraph{Int}}
return copy(graph)
end
#
# Constructors from vertex names
#
function GenericNamedGraph{V,G}(vertices) where {V,G<:AbstractSimpleGraph{Int}}
return GenericNamedGraph(G(length(to_vertices(vertices))), vertices)
end
function GenericNamedGraph{V}(vertices) where {V}
return GenericNamedGraph{V,SimpleGraph{Int}}(vertices)
end
function GenericNamedGraph{<:Any,G}(vertices) where {G<:AbstractSimpleGraph{Int}}
return GenericNamedGraph{eltype(vertices),G}(vertices)
end
function GenericNamedGraph(vertices)
return GenericNamedGraph{eltype(vertices)}(vertices)
end
#
# Empty constructors
#
GenericNamedGraph{V,G}() where {V,G<:AbstractSimpleGraph{Int}} = GenericNamedGraph{V,G}(V[])
GenericNamedGraph{V}() where {V} = GenericNamedGraph{V}(V[])
function GenericNamedGraph{<:Any,G}() where {G<:AbstractSimpleGraph{Int}}
return GenericNamedGraph{<:Any,G}(Any[])
end
GenericNamedGraph() = GenericNamedGraph(Any[])
function GenericNamedGraph(graph::GenericNamedGraph)
return GenericNamedGraph{vertextype(graph),position_graph_type(graph_type)}(graph)
end
function GenericNamedGraph{V}(graph::GenericNamedGraph) where {V}
return GenericNamedGraph{V,position_graph_type(graph_type)}(graph)
end
function GenericNamedGraph{<:Any,G}(
graph::GenericNamedGraph
) where {G<:AbstractSimpleGraph{Int}}
return GenericNamedGraph{vertextype(graph),G}(graph)
end
function GenericNamedGraph{V,G}(
graph::GenericNamedGraph
) where {V,G<:AbstractSimpleGraph{Int}}
return GenericNamedGraph{V,G}(copy(position_graph(graph)), copy(vertices(graph)))
end
function Base.convert(graph_type::Type{<:GenericNamedGraph}, graph::GenericNamedGraph)
return graph_type(graph)
end
# TODO: implement as:
# graph = set_position_graph(graph, copy(position_graph(graph)))
# graph = set_vertices(graph, copy(vertices(graph)))
function Base.copy(graph::GenericNamedGraph)
return GenericNamedGraph(copy(position_graph(graph)), copy(vertices(graph)))
end
Graphs.edgetype(graph_type::Type{<:GenericNamedGraph}) = NamedEdge{vertextype(graph_type)}
Graphs.edgetype(graph::GenericNamedGraph) = edgetype(typeof(graph))
function GraphsExtensions.directed_graph_type(graph_type::Type{<:GenericNamedGraph})
return GenericNamedGraph{
vertextype(graph_type),directed_graph_type(position_graph_type(graph_type))
}
end
function GraphsExtensions.undirected_graph_type(graph_type::Type{<:GenericNamedGraph})
return GenericNamedGraph{
vertextype(graph_type),undirected_graph_type(position_graph_type(graph_type))
}
end
function Graphs.is_directed(graph_type::Type{<:GenericNamedGraph})
return is_directed(position_graph_type(graph_type))
end
# TODO: Implement an edgelist version
function namedgraph_induced_subgraph(graph::AbstractGraph, subvertices)
subgraph = typeof(graph)(subvertices)
subvertices_set = Set(subvertices)
for src in subvertices
for dst in outneighbors(graph, src)
if dst in subvertices_set && has_edge(graph, src, dst)
add_edge!(subgraph, src => dst)
end
end
end
return subgraph, nothing
end
function Graphs.induced_subgraph(graph::AbstractNamedGraph, subvertices)
return namedgraph_induced_subgraph(graph, subvertices)
end
function Graphs.induced_subgraph(graph::AbstractNamedGraph, subvertices::Vector{<:Integer})
return namedgraph_induced_subgraph(graph, subvertices)
end
#
# Type aliases
#
const NamedGraph{V} = GenericNamedGraph{V,SimpleGraph{Int}}
const NamedDiGraph{V} = GenericNamedGraph{V,SimpleDiGraph{Int}}
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 3389 | using Dictionaries: Dictionary
using Graphs: Graphs, dijkstra_shortest_paths, weights
"""
struct NamedDijkstraState{V,T}
An [`AbstractPathState`](@ref) designed for Dijkstra shortest-paths calculations.
"""
struct NamedDijkstraState{V,T<:Real} <: Graphs.AbstractPathState
parents::Dictionary{V,V}
dists::Dictionary{V,T}
predecessors::Vector{Vector{V}}
pathcounts::Dictionary{V,Float64}
closest_vertices::Vector{V}
end
function NamedDijkstraState(parents, dists, predecessors, pathcounts, closest_vertices)
return NamedDijkstraState{keytype(parents),eltype(dists)}(
parents,
dists,
convert.(Vector{eltype(parents)}, predecessors),
pathcounts,
convert(Vector{eltype(parents)}, closest_vertices),
)
end
function position_path_state_to_path_state(
graph::AbstractNamedGraph, position_path_state::Graphs.DijkstraState
)
position_path_state_parents = map(eachindex(position_path_state.parents)) do i
pᵢ = position_path_state.parents[i]
return iszero(pᵢ) ? i : pᵢ
end
# Works around issue in this `Dictionary` constructor:
# https://github.com/andyferris/Dictionaries.jl/blob/v0.4.1/src/Dictionary.jl#L139-L145
# when `inds` has holes. This removes the holes.
# TODO: Raise an issue with `Dictionaries.jl`.
## graph_vertices = Indices(collect(vertices(graph)))
# This makes the vertices ordered according to the parent vertices.
graph_vertices = map(v -> ordered_vertices(graph)[v], vertices(position_graph(graph)))
return NamedDijkstraState(
Dictionary(
graph_vertices, map(v -> ordered_vertices(graph)[v], position_path_state_parents)
),
Dictionary(graph_vertices, position_path_state.dists),
map(x -> map(v -> ordered_vertices(graph)[v], x), position_path_state.predecessors),
Dictionary(graph_vertices, position_path_state.pathcounts),
map(v -> ordered_vertices(graph)[v], position_path_state.closest_vertices),
)
end
function namedgraph_dijkstra_shortest_paths(
graph::AbstractNamedGraph,
srcs,
distmx=weights(graph);
allpaths=false,
trackvertices=false,
)
position_path_state = dijkstra_shortest_paths(
position_graph(graph),
map(v -> vertex_positions(graph)[v], srcs),
dist_matrix_to_position_dist_matrix(graph, distmx);
allpaths,
trackvertices,
)
return position_path_state_to_path_state(graph, position_path_state)
end
function Graphs.dijkstra_shortest_paths(
graph::AbstractNamedGraph, srcs, distmx=weights(graph); kwargs...
)
return namedgraph_dijkstra_shortest_paths(graph, srcs, distmx; kwargs...)
end
# Fix ambiguity error with `AbstractGraph` version
function Graphs.dijkstra_shortest_paths(
graph::AbstractNamedGraph,
srcs::Vector{<:Integer},
distmx::AbstractMatrix{<:Real}=weights(graph);
kwargs...,
)
return namedgraph_dijkstra_shortest_paths(graph, srcs, distmx; kwargs...)
end
function Graphs.dijkstra_shortest_paths(
graph::AbstractNamedGraph, vertex::Integer, distmx::AbstractMatrix; kwargs...
)
return namedgraph_dijkstra_shortest_paths(graph, [vertex], distmx; kwargs...)
end
for f in [
:(Graphs.bellman_ford_shortest_paths),
:(Graphs.desopo_pape_shortest_paths),
:(Graphs.floyd_warshall_shortest_paths),
:(Graphs.johnson_shortest_paths),
:(Graphs.yen_k_shortest_paths),
]
@eval begin
function $f(graph::AbstractNamedGraph, args...; kwargs...)
return not_implemented()
end
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 571 | using Graphs: Graphs, IsDirected, nv, steiner_tree
using SimpleTraits: SimpleTraits, Not, @traitfn
@traitfn function Graphs.steiner_tree(
g::AbstractNamedGraph::(!IsDirected), term_vert, distmx=weights(g)
)
position_tree = steiner_tree(
position_graph(g),
map(v -> vertex_positions(g)[v], term_vert),
dist_matrix_to_position_dist_matrix(g, distmx),
)
tree = typeof(g)(position_tree, map(v -> ordered_vertices(g)[v], vertices(position_tree)))
for v in copy(vertices(tree))
iszero(degree(tree, v)) && rem_vertex!(tree, v)
end
return tree
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 45 | not_implemented() = error("Not implemented")
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1269 | module GraphGenerators
using Dictionaries: Dictionary
using Graphs: add_edge!, dst, edges, nv, src
using Graphs.SimpleGraphs: SimpleDiGraph, SimpleGraph, binary_tree
function comb_tree(dims::Tuple)
@assert length(dims) == 2
nx, ny = dims
return comb_tree(fill(ny, nx))
end
function comb_tree(tooth_lengths::Vector{<:Integer})
@assert all(>(0), tooth_lengths)
nv = sum(tooth_lengths)
nx = length(tooth_lengths)
ny = maximum(tooth_lengths)
vertex_coordinates = filter(Tuple.(CartesianIndices((nx, ny)))) do (jx, jy)
jy <= tooth_lengths[jx]
end
coordinate_to_vertex = Dictionary(vertex_coordinates, 1:nv)
graph = SimpleGraph(nv)
for (jx, jy) in vertex_coordinates
if jy == 1 && jx < nx
add_edge!(graph, coordinate_to_vertex[(jx, jy)], coordinate_to_vertex[(jx + 1, jy)])
end
if jy < tooth_lengths[jx]
add_edge!(graph, coordinate_to_vertex[(jx, jy)], coordinate_to_vertex[(jx, jy + 1)])
end
end
return graph
end
# TODO: More efficient implementation based
# on the implementation of `binary_tree`.
function binary_arborescence(k::Integer)
graph = binary_tree(k)
digraph = SimpleDiGraph(nv(graph))
for e in edges(graph)
@assert dst(e) > src(e)
add_edge!(digraph, e)
end
return digraph
end
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 265 | module GraphsExtensions
include("abstractgraph.jl")
include("abstracttrees.jl")
include("boundary.jl")
include("neighbors.jl")
include("shortestpaths.jl")
include("symrcm.jl")
include("partitioning.jl")
include("trees_and_forests.jl")
include("simplegraph.jl")
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 19248 | using Dictionaries: Dictionary, Indices, dictionary
using Graphs:
Graphs,
AbstractEdge,
AbstractGraph,
IsDirected,
Δ,
a_star,
add_edge!,
add_vertex!,
degree,
dfs_tree,
eccentricity,
edgetype,
has_edge,
has_vertex,
indegree,
induced_subgraph,
inneighbors,
is_connected,
is_cyclic,
is_directed,
is_tree,
outdegree,
outneighbors,
ne,
neighbors,
nv,
rem_edge!,
rem_vertex!,
weights
using SimpleTraits: SimpleTraits, Not, @traitfn
using SplitApplyCombine: groupfind
not_implemented() = error("Not implemented")
is_self_loop(e::AbstractEdge) = src(e) == dst(e)
is_self_loop(e::Pair) = first(e) == last(e)
directed_graph_type(::Type{<:AbstractGraph}) = not_implemented()
undirected_graph_type(::Type{<:AbstractGraph}) = not_implemented()
# TODO: Implement generic version for `IsDirected`
# directed_graph_type(G::Type{IsDirected}) = G
directed_graph_type(g::AbstractGraph) = directed_graph_type(typeof(g))
undirected_graph_type(g::AbstractGraph) = undirected_graph_type(typeof(g))
@traitfn directed_graph(graph::::IsDirected) = graph
convert_vertextype(::Type{V}, graph::AbstractGraph{V}) where {V} = graph
function convert_vertextype(V::Type, graph::AbstractGraph)
return not_implemented()
end
function graph_from_vertices(graph_type::Type{<:AbstractGraph}, vertices)
return graph_type(vertices)
end
# TODO: Handle metadata in a generic way
@traitfn function directed_graph(graph::::(!IsDirected))
digraph = graph_from_vertices(directed_graph_type(graph), vertices(graph))
for e in edges(graph)
add_edge!(digraph, e)
add_edge!(digraph, reverse(e))
end
return digraph
end
@traitfn undirected_graph(graph::::(!IsDirected)) = graph
# TODO: Handle metadata in a generic way
# Must have the same argument name as:
# @traitfn undirected_graph(graph::::(!IsDirected))
# to avoid method overwrite warnings, see:
# https://github.com/mauro3/SimpleTraits.jl#method-overwritten-warnings
@traitfn function undirected_graph(graph::::IsDirected)
undigraph = graph_from_vertices(undirected_graph_type(typeof(graph)), vertices(graph))
for e in edges(graph)
# TODO: Check for repeated edges?
add_edge!(undigraph, e)
end
return undigraph
end
# Similar to `eltype`, but `eltype` doesn't work on types
vertextype(::Type{<:AbstractGraph{V}}) where {V} = V
vertextype(graph::AbstractGraph) = vertextype(typeof(graph))
function has_vertices(graph::AbstractGraph, vertices)
return all(v -> has_vertex(graph, v), vertices)
end
function has_edges(graph::AbstractGraph, edges)
return all(e -> has_edge(graph, e), edges)
end
# Uniform interface for `outneighbors`, `inneighbors`, and `all_neighbors`
function _neighbors(graph::AbstractGraph, vertex; dir=:out)
if dir == :out
return outneighbors(graph, vertex)
elseif dir == :in
return inneighbors(graph, vertex)
elseif dir == :both
return all_neighbors(graph, vertex)
end
return error(
"`_neighbors(graph::AbstractGraph, vertex; dir)` with `dir = $(dir) not implemented. Use either `dir = :out`, `dir = :in`, or `dir = :both`.",
)
end
# Returns just the edges of a directed graph,
# but both edge directions of an undirected graph.
# TODO: Move to NamedGraphs.jl
@traitfn function all_edges(g::::IsDirected)
return edges(g)
end
@traitfn function all_edges(g::::(!IsDirected))
e = edges(g)
return Iterators.flatten(zip(e, reverse.(e)))
end
# Alternative syntax to `getindex` for getting a subgraph
# TODO: Should this preserve vertex names by
# converting to `NamedGraph` if indexed by
# something besides `Base.OneTo`?
function subgraph(graph::AbstractGraph, vertices)
return induced_subgraph(graph, vertices)[1]
end
# TODO: Should this preserve vertex names by
# converting to `NamedGraph`?
function subgraph(f::Function, graph::AbstractGraph)
return subgraph(graph, filter(f, vertices(graph)))
end
function degrees(graph::AbstractGraph, vertices=vertices(graph))
return map(vertex -> degree(graph, vertex), vertices)
end
function indegrees(graph::AbstractGraph, vertices=vertices(graph))
return map(vertex -> indegree(graph, vertex), vertices)
end
function outdegrees(graph::AbstractGraph, vertices=vertices(graph))
return map(vertex -> outdegree(graph, vertex), vertices)
end
# `Graphs.is_tree` only works on undirected graphs.
# TODO: Raise an issue.
@traitfn function is_ditree(graph::AbstractGraph::IsDirected)
# For directed graphs, `is_connected(graph)` returns `true`
# if `graph` is weakly connected.
return is_connected(graph) && ne(graph) == nv(graph) - 1
end
# TODO: Define in `Graphs.jl`.
# https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.tree.recognition.is_tree.html
# https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.tree.recognition.is_arborescence.html
# https://networkx.org/documentation/stable/_modules/networkx/algorithms/tree/recognition.html#is_arborescence
# https://networkx.org/documentation/stable/_modules/networkx/algorithms/tree/recognition.html#is_tree
# https://en.wikipedia.org/wiki/Arborescence_(graph_theory)
# directed rooted tree
@traitfn function is_arborescence(graph::AbstractGraph::IsDirected)
return is_ditree(graph) && all(v -> indegree(graph, v) ≤ 1, vertices(graph))
end
#
# Graph unions
#
# Function `f` maps original vertices `vᵢ` of `g`
# to new vertices `f(vᵢ)` of the output graph.
rename_vertices(f, g::AbstractGraph) = not_implemented()
# TODO: Does this relabel the vertices and/or change the adjacency matrix?
function permute_vertices(graph::AbstractGraph, permutation)
return not_implemented()
end
# https://en.wikipedia.org/wiki/Disjoint_union
# Input maps the new index being appended to the vertices
# to the associated graph.
function disjoint_union(graphs::Dictionary{<:Any,<:AbstractGraph})
return reduce(union, (rename_vertices(v -> (v, i), graphs[i]) for i in keys(graphs)))
end
function disjoint_union(graphs::Vector{<:AbstractGraph})
return disjoint_union(Dictionary(graphs))
end
disjoint_union(graph::AbstractGraph) = graph
function disjoint_union(graph1::AbstractGraph, graphs_tail::AbstractGraph...)
return disjoint_union(Dictionary([graph1, graphs_tail...]))
end
function disjoint_union(pairs::Pair...)
return disjoint_union([pairs...])
end
function disjoint_union(iter::Vector{<:Pair})
return disjoint_union(dictionary(iter))
end
function ⊔(graphs...; kwargs...)
return disjoint_union(graphs...; kwargs...)
end
"""
Check if an undirected graph is a path/linear graph:
https://en.wikipedia.org/wiki/Path_graph
but not a path/linear forest:
https://en.wikipedia.org/wiki/Linear_forest
"""
@traitfn function is_path_graph(graph::::(!IsDirected))
return is_tree(graph) && (Δ(graph) == 2)
end
"""
https://juliagraphs.org/Graphs.jl/dev/core_functions/simplegraphs_generators/#Graphs.SimpleGraphs.cycle_graph-Tuple%7BT%7D%20where%20T%3C:Integer
https://en.wikipedia.org/wiki/Cycle_graph
"""
@traitfn function is_cycle_graph(graph::::(!IsDirected))
return all(==(2), degrees(graph))
end
function out_incident_edges(graph::AbstractGraph, vertex)
return [
edgetype(graph)(vertex, neighbor_vertex) for
neighbor_vertex in outneighbors(graph, vertex)
]
end
function in_incident_edges(graph::AbstractGraph, vertex)
return [
edgetype(graph)(neighbor_vertex, vertex) for
neighbor_vertex in inneighbors(graph, vertex)
]
end
# TODO: Only return one set of `:out` edges for undirected graphs if `dir=:both`.
function all_incident_edges(graph::AbstractGraph, vertex)
return out_incident_edges(graph, vertex) ∪ in_incident_edges(graph, vertex)
end
# TODO: Same as `edges(subgraph(graph, [vertex; neighbors(graph, vertex)]))`.
# TODO: Only return one set of `:out` edges for undirected graphs if `dir=:both`.
"""
incident_edges(graph::AbstractGraph, vertex; dir=:out)
Edges incident to the vertex `vertex`.
`dir ∈ (:in, :out, :both)`, defaults to `:out`.
For undirected graphs, returns all incident edges.
Like: https://juliagraphs.org/Graphs.jl/v1.7/algorithms/linalg/#Graphs.LinAlg.adjacency_matrix
"""
function incident_edges(graph::AbstractGraph, vertex; dir=:out)
if dir == :out
return out_incident_edges(graph, vertex)
elseif dir == :in
return in_incident_edges(graph, vertex)
elseif dir == :both
return all_incident_edges(graph, vertex)
end
return error("dir = $dir not supported.")
end
# Get the leaf vertices of a tree-like graph
#
# For the directed case, could also use `AbstractTrees`:
#
# root_index = findfirst(vertex -> length(outneighbors(vertex)) == length(neighbors(vertex)), vertices(graph))
# root = vertices(graph)[root_index]
# map(nodevalue, Leaves(tree_graph_node(graph, root)))
#
@traitfn function is_leaf_vertex(graph::::(!IsDirected), vertex)
# @assert !is_cyclic(graph)
return isone(length(neighbors(graph, vertex)))
end
# Check if a vertex is a leaf.
# Assumes the graph is a DAG.
@traitfn function is_leaf_vertex(graph::::IsDirected, vertex)
# @assert !is_cyclic(graph)
return isempty(child_vertices(graph, vertex))
end
# Get the children of a vertex.
# Assumes the graph is a DAG.
@traitfn function child_vertices(graph::::IsDirected, vertex)
# @assert !is_cyclic(graph)
return outneighbors(graph, vertex)
end
# Get the edges from the input vertex towards the child vertices.
# Assumes the graph is a DAG.
@traitfn function child_edges(graph::::IsDirected, vertex)
# @assert !is_cyclic(graph)
return map(child -> edgetype(graph)(vertex, child), child_vertices(graph, vertex))
end
function leaf_vertices(graph::AbstractGraph)
# @assert !is_cyclic(graph)
return filter(v -> is_leaf_vertex(graph, v), vertices(graph))
end
"""
Determine if an edge involves a leaf (at src or dst)
"""
@traitfn function is_leaf_edge(g::::(!IsDirected), e::AbstractEdge)
return has_edge(g, e) && (is_leaf_vertex(g, src(e)) || is_leaf_vertex(g, dst(e)))
end
@traitfn function is_leaf_edge(g::::IsDirected, e::AbstractEdge)
return has_edge(g, e) && is_leaf_vertex(g, dst(e))
end
function is_leaf_edge(g::AbstractGraph, e::Pair)
return is_leaf_edge(g, edgetype(g)(e))
end
"""
Determine if a node has any neighbors which are leaves
"""
function has_leaf_neighbor(g::AbstractGraph, v)
return any(w -> is_leaf_vertex(g, w), neighbors(g, v))
end
"""
Get all edges which do not involve a leaf
https://en.wikipedia.org/wiki/Tree_(graph_theory)#Definitions
"""
function non_leaf_edges(g::AbstractGraph)
return Iterators.filter(e -> !is_leaf_edge(g, e), edges(g))
end
"""
Get distance of a vertex from a leaf
"""
function distance_to_leaves(g::AbstractGraph, v)
return map(Indices(leaf_vertices(g))) do leaf
v == leaf && return 0
path = a_star(g, v, leaf)
isempty(path) && return typemax(Int)
return length(path)
end
end
function minimum_distance_to_leaves(g::AbstractGraph, v)
return minimum(distance_to_leaves(g, v))
end
@traitfn function is_root_vertex(graph::::IsDirected, vertex)
return isempty(parent_vertices(graph, vertex))
end
@traitfn function is_rooted(graph::::IsDirected)
return isone(count(v -> is_root_vertex(graph, v), vertices(graph)))
end
@traitfn function is_binary_arborescence(graph::AbstractGraph::IsDirected)
(is_rooted(graph) && is_arborescence(graph)) || return false
for v in vertices(graph)
if length(child_vertices(graph, v)) > 2
return false
end
end
return true
end
"""
Return the root vertex of a rooted directed graph.
This will return the first root vertex that is found,
so won't error if there is more than one.
"""
@traitfn function root_vertex(graph::::IsDirected)
if is_cyclic(graph)
return error("Graph must not have any cycles.")
end
v = first(vertices(graph))
while !is_root_vertex(graph, v)
v = parent_vertex(graph, v)
end
return v
end
#
# Graph iteration
#
@traitfn function post_order_dfs_vertices(graph::::(!IsDirected), root_vertex)
dfs_tree_graph = dfs_tree(graph, root_vertex)
return post_order_dfs_vertices(dfs_tree_graph, root_vertex)
end
# Traverse the tree using a [post-order depth-first search](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search), returning the vertices.
# Assumes the graph is a [rooted directed tree](https://en.wikipedia.org/wiki/Tree_(graph_theory)#Rooted_tree)
@traitfn function post_order_dfs_vertices(graph::::IsDirected, root_vertex)
# @assert is_tree(graph)
# Outputs a rooted directed tree (https://en.wikipedia.org/wiki/Arborescence_(graph_theory))
return map(nodevalue, PostOrderDFS(tree_graph_node(graph, root_vertex)))
end
@traitfn function pre_order_dfs_vertices(graph::::(!IsDirected), root_vertex)
dfs_tree_graph = dfs_tree(graph, root_vertex)
return pre_order_dfs_vertices(dfs_tree_graph, root_vertex)
end
@traitfn function pre_order_dfs_vertices(graph::::IsDirected, root_vertex)
# @assert is_tree(graph)
return map(nodevalue, PreOrderDFS(tree_graph_node(graph, root_vertex)))
end
@traitfn function post_order_dfs_edges(graph::::(!IsDirected), root_vertex)
dfs_tree_graph = dfs_tree(graph, root_vertex)
return post_order_dfs_edges(dfs_tree_graph, root_vertex)
end
# Traverse the tree using a [post-order depth-first search](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search), returning the edges where the source is the current vertex and the destination is the parent vertex.
# Assumes the graph is a [rooted directed tree](https://en.wikipedia.org/wiki/Tree_(graph_theory)#Rooted_tree).
# Returns a list of edges directed **towards the root vertex**!
@traitfn function post_order_dfs_edges(graph::::IsDirected, root_vertex)
# @assert is_tree(graph)
vertices = post_order_dfs_vertices(graph, root_vertex)
# Remove the root vertex
pop!(vertices)
return map(vertex -> parent_edge(graph, vertex), vertices)
end
# Paths for undirected tree-like graphs
# TODO: Use `a_star`.
@traitfn function vertex_path(graph::::(!IsDirected), s, t)
# @assert is_tree(graph)
dfs_tree_graph = dfs_tree(graph, t)
return vertex_path(dfs_tree_graph, s, t)
end
# TODO: Use `a_star`.
@traitfn function edge_path(graph::::(!IsDirected), s, t)
# @assert is_tree(graph)
dfs_tree_graph = dfs_tree(graph, t)
return edge_path(dfs_tree_graph, s, t)
end
#
# Rooted directed tree/directed acyclic graph functions.
# [Rooted directed tree](https://en.wikipedia.org/wiki/Tree_(graph_theory)#Rooted_tree)
# [Directed acyclic graph (DAG)](https://en.wikipedia.org/wiki/Directed_acyclic_graph)
#
# Get the parent vertices of a vertex.
# Assumes the graph is a DAG.
@traitfn function parent_vertices(graph::::IsDirected, vertex)
# @assert !is_cyclic(graph)
return inneighbors(graph, vertex)
end
# Get the parent vertex of a vertex.
# Assumes the graph is a DAG.
@traitfn function parent_vertex(graph::::IsDirected, vertex)
# @assert !is_cyclic(graph)
parents = parent_vertices(graph, vertex)
return isempty(parents) ? nothing : only(parents)
end
# Returns the edges directed **towards the parent vertices**!
# Assumes the graph is a DAG.
@traitfn function parent_edges(graph::::IsDirected, vertex)
# @assert !is_cyclic(graph)
return map(parent -> edgetype(graph)(vertex, parent), parent_vertices(graph, vertex))
end
# Returns the edge directed **towards the parent vertex**!
# Assumes the graph is a DAG.
@traitfn function parent_edge(graph::::IsDirected, vertex)
parents = parent_edges(graph, vertex)
return isempty(parents) ? nothing : only(parents)
end
# Paths for directed tree-like graphs
# TODO: Use `a_star`, make specialized versions:
# `vertex_path(graph::::IsTree, ...)`
# or
# `tree_vertex_path(graph, ...)`
@traitfn function vertex_path(graph::::IsDirected, s, t)
# @assert is_tree(graph)
vertices = eltype(graph)[s]
while vertices[end] != t
parent = parent_vertex(graph, vertices[end])
isnothing(parent) && return nothing
push!(vertices, parent)
end
return vertices
end
# TODO: Use `a_star`, make specialized versions:
# `vertex_path(graph::::IsTree, ...)`
# or
# `tree_vertex_path(graph, ...)`
@traitfn function edge_path(graph::::IsDirected, s, t)
# @assert is_tree(graph)
vertices = vertex_path(graph, s, t)
isnothing(vertices) && return nothing
pop!(vertices)
return [edgetype(graph)(vertex, parent_vertex(graph, vertex)) for vertex in vertices]
end
function mincut_partitions(graph::AbstractGraph, distmx=weights(graph))
parts = groupfind(first(Graphs.mincut(graph, distmx)))
return parts[1], parts[2]
end
function add_vertex(g::AbstractGraph, vs)
g = copy(g)
add_vertex!(g, vs)
return g
end
function add_vertices!(graph::AbstractGraph, vs)
for vertex in vs
add_vertex!(graph, vertex)
end
return graph
end
function add_vertices(g::AbstractGraph, vs)
g = copy(g)
add_vertices!(g, vs)
return g
end
function rem_vertex(g::AbstractGraph, vs)
g = copy(g)
rem_vertex!(g, vs)
return g
end
"""Remove a list of vertices from a graph g"""
function rem_vertices!(g::AbstractGraph, vs)
for v in vs
rem_vertex!(g, v)
end
return g
end
function rem_vertices(g::AbstractGraph, vs)
g = copy(g)
rem_vertices!(g, vs)
return g
end
function add_edge(g::AbstractGraph, edge)
g = copy(g)
add_edge!(g, edgetype(g)(edge))
return g
end
"""Add a list of edges to a graph g"""
function add_edges!(g::AbstractGraph, edges)
for e in edges
add_edge!(g, edgetype(g)(e))
end
return g
end
function add_edges(g::AbstractGraph, edges)
g = copy(g)
add_edges!(g, edges)
return g
end
function rem_edge(g::AbstractGraph, edge)
g = copy(g)
rem_edge!(g, edgetype(g)(edge))
return g
end
"""Remove a list of edges from a graph g"""
function rem_edges!(g::AbstractGraph, edges)
for e in edges
rem_edge!(g, edgetype(g)(e))
end
return g
end
function rem_edges(g::AbstractGraph, edges)
g = copy(g)
rem_edges!(g, edges)
return g
end
eccentricities(graph::AbstractGraph) = eccentricities(graph, vertices(graph))
function eccentricities(graph::AbstractGraph, vs, distmx=weights(graph))
return map(vertex -> eccentricity(graph, vertex, distmx), vs)
end
function decorate_graph_edges(g::AbstractGraph; kwargs...)
return not_implemented()
end
function decorate_graph_vertices(g::AbstractGraph; kwargs...)
return not_implemented()
end
""" Do a BFS search to construct a tree, but do it with randomness to avoid generating the same tree. Based on Int. J. Comput. Their Appl. 15 pp 177-186 (2008). Edges will point away from source vertex s."""
function random_bfs_tree(g::AbstractGraph, s; maxiter=1000 * (nv(g) + ne(g)))
Q = [s]
d = map(v -> v == s ? 0.0 : Inf, Indices(vertices(g)))
visited = [s]
# TODO: This fails for `SimpleDiGraph`.
g_out = directed_graph_type(g)(vertices(g))
isempty_Q = false
for iter in 1:maxiter
v = rand(Q)
setdiff!(Q, [v])
for vn in neighbors(g, v)
if (d[vn] > d[v] + 1)
d[vn] = d[v] + 1
if (vn ∉ Q)
if (vn ∉ visited)
add_edge!(g_out, edgetype(g)(v, vn))
push!(visited, vn)
end
push!(Q, vn)
end
end
end
isempty_Q = isempty(Q)
if isempty_Q
break
end
end
if !isempty_Q
error("Search failed to cover the graph in time. Consider increasing maxiter.")
end
return g_out
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 2578 | # AbstractTreeGraph
# Tree view of a graph.
abstract type AbstractTreeGraph{V} <: AbstractGraph{V} end
position_graph_type(type::Type{<:AbstractTreeGraph}) = not_implemented()
position_graph(graph::AbstractTreeGraph) = not_implemented()
function Graphs.is_directed(type::Type{<:AbstractTreeGraph})
return is_directed(position_graph_type(type))
end
Graphs.edgetype(graph::AbstractTreeGraph) = edgetype(position_graph(graph))
function Graphs.outneighbors(graph::AbstractTreeGraph, vertex)
return outneighbors(position_graph(graph), vertex)
end
function Graphs.inneighbors(graph::AbstractTreeGraph, vertex)
return inneighbors(position_graph(graph), vertex)
end
Graphs.nv(graph::AbstractTreeGraph) = nv(position_graph(graph))
Graphs.ne(graph::AbstractTreeGraph) = ne(position_graph(graph))
Graphs.vertices(graph::AbstractTreeGraph) = vertices(position_graph(graph))
# AbstractTrees
using AbstractTrees:
AbstractTrees, IndexNode, PostOrderDFS, PreOrderDFS, children, nodevalue
# Used for tree iteration.
# Assumes the graph is a [rooted directed tree](https://en.wikipedia.org/wiki/Tree_(graph_theory)#Rooted_tree).
tree_graph_node(g::AbstractTreeGraph, vertex) = IndexNode(g, vertex)
function tree_graph_node(g::AbstractGraph, vertex)
return tree_graph_node(TreeGraph(g), vertex)
end
tree_graph_node(g::AbstractGraph) = tree_graph_node(g, root_vertex(g))
# Make an `AbstractTreeGraph` act as an `AbstractTree` starting at
# the root vertex.
AbstractTrees.children(g::AbstractTreeGraph) = children(tree_graph_node(g))
AbstractTrees.nodevalue(g::AbstractTreeGraph) = nodevalue(tree_graph_node(g))
AbstractTrees.rootindex(tree::AbstractTreeGraph) = root_vertex(tree)
function AbstractTrees.nodevalue(tree::AbstractTreeGraph, node_index)
return node_index
end
function AbstractTrees.childindices(tree::AbstractTreeGraph, node_index)
return child_vertices(tree, node_index)
end
function AbstractTrees.parentindex(tree::AbstractTreeGraph, node_index)
return parent_vertex(tree, node_index)
end
# TreeGraph
struct TreeGraph{V,G<:AbstractGraph{V}} <: AbstractTreeGraph{V}
graph::G
global function _TreeGraph(g::AbstractGraph)
# No check for being a tree
return new{vertextype(g),typeof(g)}(g)
end
end
@traitfn function TreeGraph(g::AbstractGraph::IsDirected)
@assert is_arborescence(g)
return _TreeGraph(g)
end
@traitfn function TreeGraph(g::AbstractGraph::(!IsDirected))
@assert is_tree(g)
return _TreeGraph(g)
end
position_graph(graph::TreeGraph) = getfield(graph, :graph)
position_graph_type(type::Type{<:TreeGraph}) = fieldtype(type, :graph)
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 2143 | using Graphs: AbstractGraph, dst, src, vertices
# https://en.wikipedia.org/wiki/Boundary_(graph_theory)
function boundary_edges(graph::AbstractGraph, subgraph_vertices; dir=:out)
E = edgetype(graph)
subgraph_vertices_set = Set(subgraph_vertices)
subgraph_complement = setdiff(Set(vertices(graph)), subgraph_vertices_set)
boundary_es = Vector{E}()
for subgraph_vertex in subgraph_vertices_set
for e in incident_edges(graph, subgraph_vertex; dir)
if src(e) ∈ subgraph_complement || dst(e) ∈ subgraph_complement
push!(boundary_es, e)
end
end
end
return boundary_es
end
# https://en.wikipedia.org/wiki/Boundary_(graph_theory)
# See implementation of `Graphs.neighborhood_dists` as a reference.
function inner_boundary_vertices(graph::AbstractGraph, subgraph_vertices; dir=:out)
V = vertextype(graph)
subgraph_vertices_set = Set(subgraph_vertices)
subgraph_complement = setdiff(Set(vertices(graph)), subgraph_vertices_set)
inner_boundary_vs = Vector{V}()
for subgraph_vertex in subgraph_vertices_set
for subgraph_vertex_neighbor in _neighbors(graph, subgraph_vertex; dir)
if subgraph_vertex_neighbor ∈ subgraph_complement
push!(inner_boundary_vs, subgraph_vertex)
break
end
end
end
return inner_boundary_vs
end
# https://en.wikipedia.org/wiki/Boundary_(graph_theory)
# See implementation of `Graphs.neighborhood_dists` as a reference.
function outer_boundary_vertices(graph::AbstractGraph, subgraph_vertices; dir=:out)
V = vertextype(graph)
subgraph_vertices_set = Set(subgraph_vertices)
subgraph_complement = setdiff(Set(vertices(graph)), subgraph_vertices_set)
outer_boundary_vs = Set{V}()
for subgraph_vertex in subgraph_vertices_set
for subgraph_vertex_neighbor in _neighbors(graph, subgraph_vertex; dir)
if subgraph_vertex_neighbor ∈ subgraph_complement
push!(outer_boundary_vs, subgraph_vertex_neighbor)
end
end
end
return [v for v in outer_boundary_vs]
end
function boundary_vertices(graph::AbstractGraph, subgraph_vertices; dir=:out)
return inner_boundary_vertices(graph, subgraph_vertices; dir)
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 341 | using Graphs: AbstractGraph, neighborhood_dists
function vertices_at_distance(g::AbstractGraph, vertex, distance::Int)
vertices_and_distances = neighborhood_dists(g, vertex, distance)
return map(first, filter(==(distance) ∘ last, vertices_and_distances))
end
next_nearest_neighbors(g::AbstractGraph, v) = vertices_at_distance(g, v, 2)
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 2069 | using Graphs: AbstractGraph, AbstractSimpleGraph, nv, vertices
using SplitApplyCombine: group
"""
Graph partitioning backend
"""
struct Backend{T} end
Backend(s::Symbol) = Backend{s}()
Backend(s::String) = Backend(Symbol(s))
Backend(backend::Backend) = backend
macro Backend_str(s)
return :(Backend{$(Expr(:quote, Symbol(s)))})
end
"""
Current default graph partitioning backend
"""
const CURRENT_PARTITIONING_BACKEND = Ref{Union{Missing,Backend}}(missing)
"""
Get the graph partitioning backend
"""
current_partitioning_backend() = CURRENT_PARTITIONING_BACKEND[]
"""
Set the graph partitioning backend
"""
function set_partitioning_backend!(backend::Union{Missing,Backend,String})
CURRENT_PARTITIONING_BACKEND[] = Backend(backend)
return nothing
end
function _npartitions(
g::AbstractGraph, npartitions::Integer, nvertices_per_partition::Nothing
)
return npartitions
end
function _npartitions(
g::AbstractGraph, npartitions::Nothing, nvertices_per_partition::Integer
)
return nv(g) ÷ nvertices_per_partition
end
function _npartitions(g::AbstractGraph, npartitions::Int, nvertices_per_partition::Int)
return error("Can't specify both `npartitions` and `nvertices_per_partition`")
end
function _npartitions(
g::AbstractGraph, npartitions::Nothing, nvertices_per_partition::Nothing
)
return error("Must specify either `npartitions` or `nvertices_per_partition`")
end
function partitioned_vertices(
g::AbstractSimpleGraph;
npartitions=nothing,
nvertices_per_partition=nothing,
backend=current_partitioning_backend(),
kwargs...,
)
# Metis cannot handle the edge case npartitions = 1, so we will fix it here for now.
# TODO: Check if this is still needed, or move to `NamedGraphsMetisExt`.
if (_npartitions(g, npartitions, nvertices_per_partition) == 1)
return group(v -> 1, collect(vertices(g)))
end
return partitioned_vertices(
Backend(backend), g, _npartitions(g, npartitions, nvertices_per_partition); kwargs...
)
end
function partitioned_vertices(g::AbstractGraph; kwargs...)
return not_implemented()
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 789 | using Graphs: Graphs, dijkstra_shortest_paths, edgetype, weights
function dijkstra_parents(graph::AbstractGraph, vertex, distmx=weights(graph))
return dijkstra_shortest_paths(
graph, [vertex], distmx; allpaths=false, trackvertices=false
).parents
end
function dijkstra_mst(graph::AbstractGraph, vertex, distmx=weights(graph))
parents =
dijkstra_shortest_paths(
graph, [vertex], distmx; allpaths=false, trackvertices=false
).parents
mst = Vector{edgetype(graph)}()
for src in eachindex(parents)
dst = parents[src]
if src ≠ dst
push!(mst, edgetype(graph)(src, dst))
end
end
return mst
end
function dijkstra_tree(graph::AbstractGraph, vertex, distmx=weights(graph))
return Graphs.tree(graph, dijkstra_parents(graph, vertex, distmx))
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 1107 | using Graphs.SimpleGraphs: AbstractSimpleGraph
function permute_vertices(graph::AbstractSimpleGraph, permutation)
return graph[permutation]
end
# https://github.com/JuliaGraphs/Graphs.jl/issues/365
function graph_from_vertices(graph_type::Type{<:AbstractSimpleGraph}, vertices)
@assert vertices == Base.OneTo(length(vertices))
return graph_type(length(vertices))
end
function convert_vertextype(vertextype::Type, graph::AbstractSimpleGraph)
return not_implemented()
end
using Graphs.SimpleGraphs: SimpleDiGraph, SimpleGraph
function convert_vertextype(vertextype::Type, graph::SimpleGraph)
return SimpleGraph{vertextype}(graph)
end
function convert_vertextype(vertextype::Type, graph::SimpleDiGraph)
return SimpleDiGraph{vertextype}(graph)
end
directed_graph_type(G::Type{<:SimpleGraph}) = SimpleDiGraph{vertextype(G)}
# TODO: Use traits to make this more general.
undirected_graph_type(G::Type{<:SimpleGraph}) = G
# TODO: Use traits to make this more general.
directed_graph_type(G::Type{<:SimpleDiGraph}) = G
undirected_graph_type(G::Type{<:SimpleDiGraph}) = SimpleGraph{vertextype(G)}
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 335 | # Symmetric sparse reverse Cuthill-McKee ordering
# https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm
# https://github.com/PetrKryslUCSD/SymRCM.jl
# https://github.com/rleegates/CuthillMcKee.jl
function symrcm_perm end
function symrcm_permute(graph::AbstractGraph)
return permute_vertices(graph, symrcm_perm(graph))
end
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
|
[
"MIT"
] | 0.6.2 | 34c4653b98832143d3f5f8621e91aa2008873fb0 | code | 2401 | using Graphs: IsDirected, bfs_tree, connected_components, edges, edgetype
using .GraphsExtensions: random_bfs_tree, rem_edges, undirected_graph
using SimpleTraits: SimpleTraits, Not, @traitfn
abstract type SpanningTreeAlgorithm end
struct BFS <: SpanningTreeAlgorithm end
struct RandomBFS <: SpanningTreeAlgorithm end
struct DFS <: SpanningTreeAlgorithm end
default_spanning_tree_alg() = BFS()
default_root_vertex(g) = last(findmax(eccentricities(g)))
function spanning_tree(
g::AbstractGraph; alg=default_spanning_tree_alg(), root_vertex=default_root_vertex(g)
)
return spanning_tree(alg, g; root_vertex)
end
@traitfn function spanning_tree(
::BFS, g::AbstractGraph::(!IsDirected); root_vertex=default_root_vertex(g)
)
return undirected_graph(bfs_tree(g, root_vertex))
end
@traitfn function spanning_tree(
::RandomBFS, g::AbstractGraph::(!IsDirected); root_vertex=default_root_vertex(g)
)
return undirected_graph(random_bfs_tree(g, root_vertex))
end
@traitfn function spanning_tree(
::DFS, g::AbstractGraph::(!IsDirected); root_vertex=default_root_vertex(g)
)
return undirected_graph(dfs_tree(g, root_vertex))
end
# Given a graph, split it into its connected components, construct a spanning tree, using the function spanning_tree, over each of them
# and take the union.
function spanning_forest(g::AbstractGraph; spanning_tree=spanning_tree)
return reduce(union, (spanning_tree(subgraph(g, vs)) for vs in connected_components(g)))
end
# TODO: Create a generic version in `GraphsExtensions`.
# Given an undirected graph g with vertex set V, build a set of forests (each with vertex set V) which covers all edges in g
# (see https://en.wikipedia.org/wiki/Arboricity) We do not find the minimum but our tests show this algorithm performs well
function forest_cover(g::AbstractGraph; spanning_tree=spanning_tree)
edges_collected = edgetype(g)[]
remaining_edges = edges(g)
forests = typeof(g)[]
while !isempty(remaining_edges)
g_reduced = rem_edges(g, edges_collected)
g_reduced_spanning_forest = spanning_forest(g_reduced; spanning_tree)
push!(edges_collected, edges(g_reduced_spanning_forest)...)
push!(forests, g_reduced_spanning_forest)
setdiff!(remaining_edges, edges(g_reduced_spanning_forest))
end
return forests
end
# TODO: Define in `NamedGraphs.PartitionedGraphs`.
# forest_cover(g::PartitionedGraph; kwargs...) = not_implemented()
| NamedGraphs | https://github.com/ITensor/NamedGraphs.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.