licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 1.1.1 | 81a321298aed95631447a1f3afc2ea83682d44a4 | code | 13466 | # Copyright (c) 2013: Steven G. Johnson and contributors
#
# Use of this source code is governed by an MIT-style license that can be found
# in the LICENSE.md file or at https://opensource.org/licenses/MIT.
module TestMOIWrapper
using NLopt
using Test
import MathOptInterface as MOI
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end
function test_runtests()
model = MOI.instantiate(
NLopt.Optimizer;
with_bridge_type = Float64,
with_cache_type = Float64,
)
MOI.set(model, MOI.RawOptimizerAttribute("algorithm"), :LD_SLSQP)
MOI.set(model, MOI.RawOptimizerAttribute("maxtime"), 10.0)
other_failures = Any[]
if Sys.WORD_SIZE == 32
push!(other_failures, r"^test_constraint_qcp_duplicate_diagonal$")
end
MOI.Test.runtests(
model,
MOI.Test.Config(;
optimal_status = MOI.LOCALLY_SOLVED,
atol = 1e-2,
rtol = 1e-2,
exclude = Any[
MOI.ConstraintBasisStatus,
MOI.ConstraintDual,
MOI.DualObjectiveValue,
MOI.ObjectiveBound,
MOI.NLPBlockDual,
MOI.VariableBasisStatus,
],
);
exclude = [
# Issues related to detecting infeasibility
r"^test_conic_NormInfinityCone_INFEASIBLE$",
r"^test_conic_NormOneCone_INFEASIBLE$",
r"^test_conic_linear_INFEASIBLE$",
r"^test_conic_linear_INFEASIBLE_2$",
r"^test_infeasible_MIN_SENSE$",
r"^test_infeasible_MIN_SENSE_offset$",
r"^test_linear_DUAL_INFEASIBLE$",
r"^test_linear_DUAL_INFEASIBLE_2$",
r"^test_linear_INFEASIBLE$",
r"^test_linear_INFEASIBLE_2$",
r"^test_solve_TerminationStatus_DUAL_INFEASIBLE$",
# ArgumentError: invalid NLopt arguments: too many equality constraints
r"^test_linear_VectorAffineFunction_empty_row$",
# Evaluated: MathOptInterface.ALMOST_LOCALLY_SOLVED == MathOptInterface.LOCALLY_SOLVED
r"^test_linear_add_constraints$",
# NLopt#31
r"^test_nonlinear_invalid$",
# TODO(odow): wrong solutions?
r"^test_quadratic_SecondOrderCone_basic$",
r"^test_quadratic_constraint_integration$",
# Perhaps an expected failure because the problem is non-convex
r"^test_quadratic_nonconvex_constraint_basic$",
r"^test_quadratic_nonconvex_constraint_integration$",
# A whole bunch of issues to diagnose here
"test_basic_VectorNonlinearFunction_",
# INVALID_OPTION?
r"^test_nonlinear_expression_hs109$",
other_failures...,
],
)
return
end
function test_list_of_model_attributes_set()
attr = MOI.ListOfModelAttributesSet()
model = NLopt.Optimizer()
ret = MOI.AbstractModelAttribute[]
@test MOI.get(model, attr) == ret
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
push!(ret, MOI.ObjectiveSense())
@test MOI.get(model, attr) == ret
x = MOI.add_variable(model)
MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), x)
push!(ret, MOI.ObjectiveFunction{MOI.VariableIndex}())
@test MOI.get(model, attr) == ret
return
end
function test_list_and_number_of_constraints()
model = NLopt.Optimizer()
x = MOI.add_variable(model)
F1, S1 = MOI.ScalarAffineFunction{Float64}, MOI.EqualTo{Float64}
F2, S2 = MOI.ScalarQuadraticFunction{Float64}, MOI.LessThan{Float64}
@test MOI.get(model, MOI.NumberOfConstraints{F1,S1}()) == 0
@test MOI.get(model, MOI.NumberOfConstraints{F2,S2}()) == 0
@test MOI.get(model, MOI.ListOfConstraintIndices{F1,S1}()) == []
@test MOI.get(model, MOI.ListOfConstraintIndices{F2,S2}()) == []
c1 = MOI.add_constraint(model, 1.0 * x, MOI.EqualTo(2.0))
@test MOI.get(model, MOI.NumberOfConstraints{F1,S1}()) == 1
@test MOI.get(model, MOI.NumberOfConstraints{F2,S2}()) == 0
@test MOI.get(model, MOI.ListOfConstraintIndices{F1,S1}()) == [c1]
@test MOI.get(model, MOI.ListOfConstraintIndices{F2,S2}()) == []
c2 = MOI.add_constraint(model, 1.0 * x * x, MOI.LessThan(2.0))
@test MOI.get(model, MOI.NumberOfConstraints{F1,S1}()) == 1
@test MOI.get(model, MOI.NumberOfConstraints{F2,S2}()) == 1
@test MOI.get(model, MOI.ListOfConstraintIndices{F1,S1}()) == [c1]
@test MOI.get(model, MOI.ListOfConstraintIndices{F2,S2}()) == [c2]
@test MOI.get(model, MOI.ConstraintSet(), c1) == MOI.EqualTo(2.0)
@test MOI.get(model, MOI.ConstraintSet(), c2) == MOI.LessThan(2.0)
return
end
function test_raw_optimizer_attribute()
model = NLopt.Optimizer()
attr = MOI.RawOptimizerAttribute("algorithm")
@test MOI.supports(model, attr)
@test MOI.get(model, attr) == :none
MOI.set(model, attr, :LD_MMA)
@test MOI.get(model, attr) == :LD_MMA
bad_attr = MOI.RawOptimizerAttribute("foobar")
@test !MOI.supports(model, bad_attr)
@test_throws MOI.GetAttributeNotAllowed MOI.get(model, bad_attr)
return
end
function test_list_of_variable_attributes_set()
model = NLopt.Optimizer()
@test MOI.get(model, MOI.ListOfVariableAttributesSet()) ==
MOI.AbstractVariableAttribute[]
x = MOI.add_variables(model, 2)
MOI.supports(model, MOI.VariablePrimalStart(), MOI.VariableIndex)
MOI.set(model, MOI.VariablePrimalStart(), x[2], 1.0)
@test MOI.get(model, MOI.ListOfVariableAttributesSet()) ==
MOI.AbstractVariableAttribute[MOI.VariablePrimalStart()]
@test MOI.get(model, MOI.VariablePrimalStart(), x[1]) === nothing
@test MOI.get(model, MOI.VariablePrimalStart(), x[2]) === 1.0
return
end
function test_list_of_constraint_attributes_set()
model = NLopt.Optimizer()
F, S = MOI.ScalarAffineFunction{Float64}, MOI.EqualTo{Float64}
@test MOI.get(model, MOI.ListOfConstraintAttributesSet{F,S}()) ==
MOI.AbstractConstraintAttribute[]
return
end
function test_raw_optimizer_attribute_in_optimize()
model = NLopt.Optimizer()
x = MOI.add_variables(model, 2)
f = (x[1] - 2.0) * (x[1] - 2.0) + (x[2] + 1.0)^2# * (x[2] + 1)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
for (k, v) in (
"algorithm" => :LD_SLSQP,
"stopval" => 1.0,
"ftol_rel" => 1e-6,
"ftol_abs" => 1e-6,
"xtol_rel" => 1e-6,
"xtol_abs" => 1e-6,
"maxeval" => 100,
"maxtime" => 60.0,
"initial_step" => [0.1, 0.1],
"population" => 10,
"seed" => 1234,
"vector_storage" => 3,
)
attr = MOI.RawOptimizerAttribute(k)
MOI.set(model, attr, v)
end
MOI.optimize!(model)
@test ≈(MOI.get.(model, MOI.VariablePrimal(), x), [2.0, -1.0]; atol = 1e-4)
return
end
function test_local_optimizer_Symbol()
model = NLopt.Optimizer()
x = MOI.add_variables(model, 2)
f = (x[1] - 2.0) * (x[1] - 2.0) + (x[2] + 1.0) * (x[2] + 1.0)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.RawOptimizerAttribute("algorithm"), :AUGLAG)
attr = MOI.RawOptimizerAttribute("local_optimizer")
@test MOI.get(model, attr) === nothing
MOI.set(model, attr, :LD_SLSQP)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) isa MOI.TerminationStatusCode
return
end
function test_local_optimizer_Opt()
model = NLopt.Optimizer()
x = MOI.add_variables(model, 2)
f = (x[1] - 2.0) * (x[1] - 2.0) + (x[2] + 1.0) * (x[2] + 1.0)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{typeof(f)}(), f)
MOI.set(model, MOI.RawOptimizerAttribute("algorithm"), :GD_MLSL)
attr = MOI.RawOptimizerAttribute("local_optimizer")
@test MOI.get(model, attr) === nothing
MOI.set(model, attr, NLopt.Opt(:LD_MMA, 2))
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) isa MOI.TerminationStatusCode
return
end
function test_get_objective_function()
model = NLopt.Optimizer()
x = MOI.add_variable(model)
MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), x)
@test MOI.get(model, MOI.ObjectiveFunction{MOI.VariableIndex}()) == x
F = MOI.ScalarAffineFunction{Float64}
@test isapprox(MOI.get(model, MOI.ObjectiveFunction{F}()), 1.0 * x)
return
end
function test_ScalarNonlinearFunction_mix_apis_nlpblock_last()
model = NLopt.Optimizer()
x = MOI.add_variable(model)
f = MOI.ScalarNonlinearFunction(:log, Any[x])
MOI.add_constraint(model, f, MOI.LessThan(1.0))
evaluator = MOI.Test.HS071(false, false)
bounds = MOI.NLPBoundsPair.([25.0, 40.0], [Inf, 40.0])
block = MOI.NLPBlockData(bounds, evaluator, true)
@test_throws(
ErrorException("Cannot mix the new and legacy nonlinear APIs"),
MOI.set(model, MOI.NLPBlock(), block),
)
return
end
function test_ScalarNonlinearFunction_mix_apis_nlpblock_first()
model = NLopt.Optimizer()
x = MOI.add_variable(model)
evaluator = MOI.Test.HS071(false, false)
bounds = MOI.NLPBoundsPair.([25.0, 40.0], [Inf, 40.0])
block = MOI.NLPBlockData(bounds, evaluator, true)
MOI.set(model, MOI.NLPBlock(), block)
f = MOI.ScalarNonlinearFunction(:log, Any[x])
@test_throws(
ErrorException("Cannot mix the new and legacy nonlinear APIs"),
MOI.add_constraint(model, f, MOI.LessThan(1.0)),
)
return
end
function test_ScalarNonlinearFunction_is_valid()
model = NLopt.Optimizer()
x = MOI.add_variable(model)
F, S = MOI.ScalarNonlinearFunction, MOI.EqualTo{Float64}
@test MOI.is_valid(model, MOI.ConstraintIndex{F,S}(1)) == false
f = MOI.ScalarNonlinearFunction(:sin, Any[x])
c = MOI.add_constraint(model, f, MOI.EqualTo(0.0))
@test c isa MOI.ConstraintIndex{F,S}
@test MOI.is_valid(model, c) == true
return
end
function test_ScalarNonlinearFunction_ObjectiveFunctionType()
model = NLopt.Optimizer()
x = MOI.add_variable(model)
f = MOI.ScalarNonlinearFunction(:log, Any[x])
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
F = MOI.ScalarNonlinearFunction
MOI.set(model, MOI.ObjectiveFunction{F}(), f)
@test MOI.get(model, MOI.ObjectiveFunctionType()) == F
return
end
function test_AutomaticDifferentiationBackend()
model = NLopt.Optimizer()
attr = MOI.AutomaticDifferentiationBackend()
@test MOI.supports(model, attr)
@test MOI.get(model, attr) == MOI.Nonlinear.SparseReverseMode()
MOI.set(model, attr, MOI.Nonlinear.ExprGraphOnly())
@test MOI.get(model, attr) == MOI.Nonlinear.ExprGraphOnly()
return
end
function test_ScalarNonlinearFunction_LessThan()
model = NLopt.Optimizer()
MOI.set(model, MOI.RawOptimizerAttribute("algorithm"), :LD_SLSQP)
x = MOI.add_variable(model)
# Needed for NLopt#31
MOI.set(model, MOI.VariablePrimalStart(), x, 1.0)
f = MOI.ScalarNonlinearFunction(:log, Any[x])
MOI.add_constraint(model, f, MOI.LessThan(2.0))
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), x)
MOI.optimize!(model)
@test isapprox(MOI.get(model, MOI.VariablePrimal(), x), exp(2); atol = 1e-4)
return
end
function test_ScalarNonlinearFunction_GreaterThan()
model = NLopt.Optimizer()
MOI.set(model, MOI.RawOptimizerAttribute("algorithm"), :LD_SLSQP)
x = MOI.add_variable(model)
# Needed for NLopt#31
MOI.set(model, MOI.VariablePrimalStart(), x, 1.0)
f = MOI.ScalarNonlinearFunction(:log, Any[x])
MOI.add_constraint(model, f, MOI.GreaterThan(2.0))
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), x)
MOI.optimize!(model)
@test isapprox(MOI.get(model, MOI.VariablePrimal(), x), exp(2); atol = 1e-4)
return
end
function test_ScalarNonlinearFunction_Interval()
model = NLopt.Optimizer()
MOI.set(model, MOI.RawOptimizerAttribute("algorithm"), :LD_SLSQP)
x = MOI.add_variable(model)
# Needed for NLopt#31
MOI.set(model, MOI.VariablePrimalStart(), x, 1.0)
f = MOI.ScalarNonlinearFunction(:log, Any[x])
MOI.add_constraint(model, f, MOI.Interval(1.0, 2.0))
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), x)
MOI.optimize!(model)
@test isapprox(MOI.get(model, MOI.VariablePrimal(), x), exp(2); atol = 1e-4)
return
end
function test_ScalarNonlinearFunction_derivative_free()
model = NLopt.Optimizer()
MOI.set(model, MOI.RawOptimizerAttribute("algorithm"), :LN_COBYLA)
x = MOI.add_variable(model)
# Needed for NLopt#31
MOI.set(model, MOI.VariablePrimalStart(), x, 1.0)
f = MOI.ScalarNonlinearFunction(:log, Any[x])
MOI.add_constraint(model, f, MOI.GreaterThan(2.0))
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), x)
MOI.optimize!(model)
@test isapprox(MOI.get(model, MOI.VariablePrimal(), x), exp(2); atol = 1e-4)
return
end
end # module
TestMOIWrapper.runtests()
| NLopt | https://github.com/jump-dev/NLopt.jl.git |
|
[
"MIT"
] | 1.1.1 | 81a321298aed95631447a1f3afc2ea83682d44a4 | code | 254 | # Copyright (c) 2013: Steven G. Johnson and contributors
#
# Use of this source code is governed by an MIT-style license that can be found
# in the LICENSE.md file or at https://opensource.org/licenses/MIT.
include("C_API.jl")
include("MOI_wrapper.jl")
| NLopt | https://github.com/jump-dev/NLopt.jl.git |
|
[
"MIT"
] | 1.1.1 | 81a321298aed95631447a1f3afc2ea83682d44a4 | docs | 20059 | # NLopt.jl
[](https://github.com/jump-dev/NLopt.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/jump-dev/NLopt.jl)
[NLopt.jl](https://github.com/jump-dev/NLopt.jl) is a wrapper for the
[NLopt](https://nlopt.readthedocs.io/en/latest/) library for nonlinear
optimization.
NLopt provides a common interface for many different optimization algorithms,
including:
* Both global and local optimization
* Algorithms using function values only (derivative-free) and also algorithms
exploiting user-supplied gradients.
* Algorithms for unconstrained optimization, bound-constrained optimization,
and general nonlinear inequality/equality constraints.
## License
`NLopt.jl` is licensed under the [MIT License](https://github.com/jump-dev/NLopt.jl/blob/master/LICENSE.md).
The underlying solver, [stevengj/nlopt](https://github.com/stevengj/nlopt), is
licensed under the [LGPL v3.0 license](https://github.com/stevengj/nlopt/blob/master/COPYING).
## Installation
Install `NLopt.jl` using the Julia package manager:
```julia
import Pkg
Pkg.add("NLopt")
```
In addition to installing the `NLopt.jl` package, this will also download and
install the NLopt binaries. You do not need to install NLopt separately.
## Tutorial
The following example code solves the nonlinearly constrained minimization
problem from the [NLopt Tutorial](https://nlopt.readthedocs.io/en/latest/NLopt_Tutorial/).
```julia
using NLopt
function my_objective_fn(x::Vector, grad::Vector)
if length(grad) > 0
grad[1] = 0
grad[2] = 0.5 / sqrt(x[2])
end
return sqrt(x[2])
end
function my_constraint_fn(x::Vector, grad::Vector, a, b)
if length(grad) > 0
grad[1] = 3 * a * (a * x[1] + b)^2
grad[2] = -1
end
return (a * x[1] + b)^3 - x[2]
end
opt = NLopt.Opt(:LD_MMA, 2)
NLopt.lower_bounds!(opt, [-Inf, 0.0])
NLopt.xtol_rel!(opt, 1e-4)
NLopt.min_objective!(opt, my_objective_fn)
NLopt.inequality_constraint!(opt, (x, g) -> my_constraint_fn(x, g, 2, 0), 1e-8)
NLopt.inequality_constraint!(opt, (x, g) -> my_constraint_fn(x, g, -1, 1), 1e-8)
min_f, min_x, ret = NLopt.optimize(opt, [1.234, 5.678])
num_evals = NLopt.numevals(opt)
println(
"""
objective value : $min_f
solution : $min_x
solution status : $ret
# function evaluation : $num_evals
"""
)
```
The output is:
```
objective value : 0.5443310477213124
solution : [0.3333333342139688, 0.29629628951338166]
solution status : XTOL_REACHED
# function evaluation : 11
```
## Use with JuMP
NLopt implements the [MathOptInterface interface](https://jump.dev/MathOptInterface.jl/stable/reference/nonlinear/)
for nonlinear optimization, which means that it can be used interchangeably with
other optimization packages from modeling packages like
[JuMP](https://github.com/jump-dev/JuMP.jl). Note that NLopt does not exploit
sparsity of Jacobians.
You can use NLopt with JuMP as follows:
```julia
using JuMP, NLopt
model = Model(NLopt.Optimizer)
set_attribute(model, "algorithm", :LD_MMA)
set_attribute(model, "xtol_rel", 1e-4)
set_attribute(model, "constrtol_abs", 1e-8)
@variable(model, x[1:2])
set_lower_bound(x[2], 0.0)
set_start_value.(x, [1.234, 5.678])
@NLobjective(model, Min, sqrt(x[2]))
@NLconstraint(model, (2 * x[1] + 0)^3 - x[2] <= 0)
@NLconstraint(model, (-1 * x[1] + 1)^3 - x[2] <= 0)
optimize!(model)
min_f, min_x, ret = objective_value(model), value.(x), raw_status(model)
println(
"""
objective value : $min_f
solution : $min_x
solution status : $ret
"""
)
```
The output is:
```
objective value : 0.5443310477213124
solution : [0.3333333342139688, 0.29629628951338166]
solution status : XTOL_REACHED
```
The `algorithm` attribute is required. The value must be one of the supported
[NLopt algorithms](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/).
Other parameters include `stopval`, `ftol_rel`, `ftol_abs`, `xtol_rel`,
`xtol_abs`, `constrtol_abs`, `maxeval`, `maxtime`, `initial_step`, `population`,
`seed`, and `vector_storage`.
The ``algorithm`` parameter is required, and all others are optional. The
meaning and acceptable values of all parameters, except `constrtol_abs`, match
the descriptions below from the specialized NLopt API.
The `constrtol_abs` parameter is an absolute feasibility tolerance applied to
all constraints.
## Automatic differentiation
Some algorithms in NLopt require derivatives, which you must manually provide
in the `if length(grad) > 0` branch of your objective and constraint functions.
To stay simple and lightweight, NLopt does not provide ways to automatically
compute derivatives. If you do not have analytic expressions for the derivatives,
use a package such as [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl)
to compute automatic derivatives.
Here is an example of how to wrap a function `f(x::Vector)` using ForwardDiff so
that it is compatible with NLopt:
```julia
using NLopt
import ForwardDiff
function autodiff(f::Function)
function nlopt_fn(x::Vector, grad::Vector)
if length(grad) > 0
# Use ForwardDiff to compute the gradient. Replace with your
# favorite Julia automatic differentiation package.
ForwardDiff.gradient!(grad, f, x)
end
return f(x)
end
end
# These functions do not implement `grad`:
my_objective_fn(x::Vector) = sqrt(x[2]);
my_constraint_fn(x::Vector, a, b) = (a * x[1] + b)^3 - x[2];
opt = NLopt.Opt(:LD_MMA, 2)
NLopt.lower_bounds!(opt, [-Inf, 0.0])
NLopt.xtol_rel!(opt, 1e-4)
# But we wrap them in autodiff before passing to NLopt:
NLopt.min_objective!(opt, autodiff(my_objective_fn))
NLopt.inequality_constraint!(opt, autodiff(x -> my_constraint_fn(x, 2, 0)), 1e-8)
NLopt.inequality_constraint!(opt, autodiff(x -> my_constraint_fn(x, -1, 1)), 1e-8)
min_f, min_x, ret = NLopt.optimize(opt, [1.234, 5.678])
# (0.5443310477213124, [0.3333333342139688, 0.29629628951338166], :XTOL_REACHED)
```
## Reference
The main purpose of this section is to document the syntax and unique features
of the Julia interface. For more detail on the underlying features, please refer
to the C documentation in the [NLopt Reference](https://nlopt.readthedocs.io/en/latest/NLopt_Reference/).
### Using the Julia API
To use NLopt in Julia, your Julia program should include the line:
```julia
using NLopt
```
which imports the NLopt module and its symbols. Alternatively, you can use
`import NLopt` if you want to keep all the NLopt symbols in their own namespace.
You would then prefix all functions below with `NLopt.`, for example `NLopt.Opt` and so
on.
### The `Opt` type
The NLopt API revolves around an object of type `Opt`.
The object should normally be created via the constructor:
```julia
opt = Opt(algorithm::Symbol, n::Int)
```
given an algorithm (see [NLopt Algorithms](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/)
for possible values) and the dimensionality of the problem (`n`, the number of
optimization parameters).
Whereas in C the algorithms are specified by `nlopt_algorithm` constants of the
form like `NLOPT_LD_MMA`, the Julia `algorithm` values are symbols of the form
`:LD_MMA` with the `NLOPT_` prefix replaced by `:` to create a Julia symbol.
There is also a `copy(opt::Opt)` function to make a copy of a given object
(equivalent to `nlopt_copy` in the C API).
If there is an error in these functions, an exception is thrown.
The algorithm and dimension parameters of the object are immutable (cannot be
changed without constructing a new object). Query them using:
```julia
ndims(opt::Opt)
algorithm(opt::Opt)
```
Get a string description of the algorithm via:
```julia
algorithm_name(opt::Opt)
```
### Objective function
The objective function is specified by calling one of:
```julia
min_objective!(opt::Opt, f::Function)
max_objective!(opt::Opt, f::Function)
```
depending on whether one wishes to minimize or maximize the objective function
`f`, respectively.
The function `f` must be of the form:
```julia
function f(x::Vector{Float64}, grad::Vector{Float64})
if length(grad) > 0
...set grad to gradient, in-place...
end
return ...value of f(x)...
end
```
The return value must be the value of the function at the point `x`, where `x`
is a `Vector{Float64}` array of length `n` of the optimization parameters.
In addition, if the argument `grad` is not empty (that is, `length(grad) > 0`),
then `grad` is a `Vector{Float64}` array of length `n` which should (upon
return) be set to the gradient of the function with respect to the optimization
parameters at `x`.
Not all of the optimization algorithms (below) use the gradient information: for
algorithms listed as "derivative-free," the `grad` argument will always be empty
and need never be computed. For algorithms that do use gradient information,
`grad` may still be empty for some calls.
Note that `grad` must be modified *in-place* by your function `f`. Generally,
this means using indexing operations `grad[...] = ...` to overwrite the contents
of `grad`. For example `grad = 2x` will *not* work, because it points `grad` to
a new array `2x` rather than overwriting the old contents; instead, use an
explicit loop or use `grad[:] = 2x`.
### Bound constraints
Add bound constraints with:
```julia
lower_bounds!(opt::Opt, lb::Union{AbstractVector,Real})
upper_bounds!(opt::Opt, ub::Union{AbstractVector,Real})
```
where `lb` and `ub` are real arrays of length `n` (the same as the dimension
passed to the `Opt` constructor).
For convenience, you can instead use a single scalar for `lb` or `ub` in order
to set the lower/upper bounds for all optimization parameters to a single
constant.
To retrieve the values of the lower or upper bounds, use:
```julia
lower_bounds(opt::Opt)
upper_bounds(opt::Opt)
```
both of which return `Vector{Float64}` arrays.
To specify an unbounded dimension, you can use `Inf` or `-Inf`.
### Nonlinear constraints
Specify nonlinear inequality and equality constraints by the functions:
```julia
inequality_constraint!(opt::Opt, f::Function, tol::Real = 0.0)
equality_constraint!(opt::Opt, f::Function, tol::Real = 0.0)
```
where the arguments `f` have the same form as the objective function above.
The optional `tol` arguments specify a tolerance (which defaults to zero) that
is used to judge feasibility for the purposes of stopping the optimization.
Each call to these function *adds* a new constraint to the set of constraints,
rather than replacing the constraints.
Remove all of the inequality and equality constraints from a given problem with:
```julia
remove_constraints!(opt::Opt)
```
### Vector-valued constraints
Specify vector-valued nonlinear inequality and equality constraints by the
functions:
```julia
inequality_constraint!(opt::Opt, f::Function, tol::AbstractVector)
equality_constraint!(opt::Opt, f::Function, tol::AbstractVector)
```
where `tol` is an array of the tolerances in each constraint dimension; the
dimensionality `m` of the constraint is determined by `length(tol)`.
The constraint function `f` must be of the form:
```julia
function f(result::Vector{Float64}, x::Vector{Float64}, grad::Matrix{Float64})
if length(grad) > 0
...set grad to gradient, in-place...
end
result[1] = ...value of c1(x)...
result[2] = ...value of c2(x)...
return
```
where `result` is a `Vector{Float64}` array whose length equals the
dimensionality `m` of the constraint (same as the length of `tol` above), which
upon return, should be set *in-place* to the constraint results at the point `x`.
Any return value of the function is ignored.
In addition, if the argument `grad` is not empty (that is, `length(grad) > 0`),
then `grad` is a matrix of size `n`×`m` which should (upon return) be
set in-place (see above) to the gradient of the function with respect to the
optimization parameters at `x`. That is, `grad[j,i]` should upon return contain
the partial derivative ∂f<sub>`i`</sub>/∂x<sub>`j`</sub>.
Not all of the optimization algorithms (below) use the gradient information: for
algorithms listed as "derivative-free," the `grad` argument will always be empty
and need never be computed. For algorithms that do use gradient information,
`grad` may still be empty for some calls.
You can add multiple vector-valued constraints and/or scalar constraints in the
same problem.
### Stopping criteria
As explained in the [C API Reference](https://nlopt.readthedocs.io/en/latest/NLopt_Reference/)
and the [Introduction](https://nlopt.readthedocs.io/en/latest/NLopt_Introduction/),
you have multiple options for different stopping criteria that you can specify.
(Unspecified stopping criteria are disabled; that is, they have innocuous
defaults.)
For each stopping criteria, there are two functions that you can use to get and
set the value of the stopping criterion.
```julia
stopval(opt::Opt) # return the current value of `stopval`
stopval!(opt::Opt, value) # set stopval to `value`
```
Stop when an objective value of at least `stopval` is found. (Defaults to `-Inf`.)
```julia
ftol_rel(opt::Opt)
ftol_rel!(opt::Opt, value)
```
Relative tolerance on function value. (Defaults to `0`.)
```julia
ftol_abs(opt::Opt)
ftol_abs!(opt::Opt, value)
```
Absolute tolerance on function value. (Defaults to `0`.)
```julia
xtol_rel(opt::Opt)
xtol_rel!(opt::Opt, value)
```
Relative tolerances on the optimization parameters. (Defaults to `0`.)
```julia
xtol_abs(opt::Opt)
xtol_abs!(opt::Opt, value)
```
Absolute tolerances on the optimization parameters. (Defaults to `0`.)
In the case of `xtol_abs`, you can either set it to a scalar (to use the same
tolerance for all inputs) or a vector of length `n` (the dimension specified in
the `Opt` constructor) to use a different tolerance for each parameter.
```julia
maxeval(opt::Opt)
maxeval!(opt::Opt, value)
```
Stop when the number of function evaluations exceeds `mev`. (0 or negative for
no limit, which is the default.)
```julia
maxtime(opt::Opt)
maxtime!(opt::Opt, value)
```
Stop when the optimization time (in seconds) exceeds `t`. (0 or negative for no
limit, which is the default.)
### Forced termination
In certain cases, the caller may wish to force the optimization to halt, for
some reason unknown to NLopt. For example, if the user presses Ctrl-C, or there
is an error of some sort in the objective function. You can do this by throwing
any exception inside your objective/constraint functions: the optimization will
be halted gracefully, and the same exception will be thrown to the caller. The
Julia equivalent of `nlopt_forced_stop` from the C API is to throw a `ForcedStop`
exception.
### Performing the optimization
Once all of the desired optimization parameters have been specified in a given
object `opt::Opt`, you can perform the optimization by calling:
```julia
optf, optx, ret = optimize(opt::Opt, x::AbstractVector)
```
On input, `x` is an array of length `n` (the dimension of the problem from the
`Opt` constructor) giving an initial guess for the optimization parameters. The
return value `optx` is a array containing the optimized values of the
optimization parameters. `optf` contains the optimized value of the objective
function, and `ret` contains a symbol indicating the NLopt return code (below).
Alternatively:
```julia
optf, optx, ret = optimize!(opt::Opt, x::Vector{Float64})
```
is the same but modifies `x` in-place (as well as returning `optx = x`).
### Return values
The possible return values are the same as the [return values in the C API](https://nlopt.readthedocs.io/en/latest/NLopt_Reference/#Return_values),
except that the `NLOPT_` prefix is replaced with `:`. That is, the return
values are like `:SUCCESS` instead `NLOPT_SUCCESS`.
### Local/subsidiary optimization algorithm
Some of the algorithms, especially `MLSL` and `AUGLAG`, use a different
optimization algorithm as a subroutine, typically for local optimization. You
can change the local search algorithm and its tolerances by setting:
```julia
local_optimizer!(opt::Opt, local_opt::Opt)
```
Here, `local_opt` is another `Opt` object whose parameters are used to determine
the local search algorithm, its stopping criteria, and other algorithm
parameters. (However, the objective function, bounds, and nonlinear-constraint
parameters of `local_opt` are ignored.) The dimension `n` of `local_opt` must
match that of `opt`.
This makes a copy of the `local_opt` object, so you can freely change your
original `local_opt` afterwards without affecting `opt`.
### Initial step size
Just [as in the C API](https://nlopt.readthedocs.io/en/latest/NLopt_Reference/#Initial_step_size),
you can set the initial step sizes for derivative-free optimization algorithms
with:
```julia
initial_step!(opt::Opt, dx::Vector)
```
Here, `dx` is an array of the (nonzero) initial steps for each dimension, or a
single number if you wish to use the same initial steps for all dimensions.
`initial_step(opt::Opt, x::AbstractVector)` returns the initial step that will
be used for a starting guess of `x` in `optimize(opt, x)`.
### Stochastic population
Just [as in the C API](https://nlopt.readthedocs.io/en/latest/NLopt_Reference/#Stochastic_population),
you can get and set the initial population for stochastic optimization with:
```julia
population(opt::Opt)
population!(opt::Opt, value)
```
A `population` of zero, the default, implies that the heuristic default will be
used as decided upon by individual algorithms.
### Pseudorandom numbers
For stochastic optimization algorithms, NLopt uses pseudorandom numbers
generated by the Mersenne Twister algorithm, based on code from Makoto Matsumoto.
By default, the seed for the random numbers is generated from the system time,
so that you will get a different sequence of pseudorandom numbers each time you
run your program. If you want to use a "deterministic" sequence of pseudorandom
numbers, that is, the same sequence from run to run, you can set the seed by
calling:
```julia
NLopt.srand(seed::Integer)
```
To reset the seed based on the system time, you can call `NLopt.srand_time()`.
Normally, you don't need to call this as it is called automatically. However, it
might be useful if you want to "re-randomize" the pseudorandom numbers after
calling `nlopt.srand` to set a deterministic seed.
### Vector storage for limited-memory quasi-Newton algorithms
Just [as in the C API](https://nlopt.readthedocs.io/en/latest/NLopt_Reference/#Vector_storage_for_limited-memory_quasi-Newton_algorithms),
you can get and set the number M of stored vectors for limited-memory
quasi-Newton algorithms, via integer-valued property
```julia
vector_storage(opt::Opt)
vector_storage!(opt::Opt, value)
```
The default is `0`, in which case NLopt uses a heuristic nonzero value as
determined by individual algorithms.
### Version number
The version number of NLopt is given by the global variable:
```julia
NLOPT_VERSION::VersionNumber
```
where `VersionNumber` is a built-in Julia type from the Julia standard library.
## Thread safety
The underlying NLopt library is threadsafe; however, re-using the same `Opt`
object across multiple threads is not.
As an example, instead of:
```julia
using NLopt
opt = Opt(:LD_MMA, 2)
# Define problem
solutions = Vector{Any}(undef, 10)
Threads.@threads for i in 1:10
# Not thread-safe because `opt` is re-used
solutions[i] = optimize(opt, rand(2))
end
```
Do instead:
```julia
solutions = Vector{Any}(undef, 10)
Threads.@threads for i in 1:10
# Thread-safe because a new `opt` is created for each thread
opt = Opt(:LD_MMA, 2)
# Define problem
solutions[i] = optimize(opt, rand(2))
end
```
## Author
This module was initially written by [Steven G. Johnson](http://math.mit.edu/~stevenj/),
with subsequent contributions by several other authors (see the git history).
| NLopt | https://github.com/jump-dev/NLopt.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 2748 | import Pkg
Pkg.activate(".") # reproducible environment included
Pkg.instantiate() # install dependencies
using WannierIO
hrdat = read_w90_hrdat("svo_hr.dat")
Rmin, Rmax = extrema(hrdat.Rvectors)
Rsize = Tuple(Rmax .- Rmin .+ 1)
n, m = size(first(hrdat.H))
using StaticArrays, OffsetArrays
H_R = OffsetArray(
Array{SMatrix{n,m,eltype(eltype(hrdat.H)),n*m}}(undef, Rsize...),
map(:, Rmin, Rmax)...
)
for (i, h, n) in zip(hrdat.Rvectors, hrdat.H, hrdat.Rdegens)
H_R[CartesianIndex(Tuple(i))] = h / n
end
using FourierSeriesEvaluators, LinearAlgebra
h = HermitianFourierSeries(FourierSeries(H_R, period=1.0))
η = 1e-2 # 10 meV (scattering amplitude)
ω_min = 10
ω_max = 15
p0 = (; η, ω=(ω_min + ω_max)/2) # initial parameters
greens_function(k, h_k, (; η, ω)) = tr(inv((ω+im*η)*I - h_k))
prototype = let k = FourierSeriesEvaluators.period(h)
greens_function(k, h(k), p0)
end
using AutoBZCore
bz = load_bz(CubicSymIBZ(), "svo.wout")
# bz = load_bz(IBZ(), "svo.wout") # works with SymmetryReduceBZ.jl installed
integrand = FourierIntegralFunction(greens_function, h, prototype)
prob_dos = AutoBZProblem(TrivialRep(), integrand, bz, p0; abstol=1e-3)
using HChebInterp
cheb_order = 15
function dos_solver(prob, alg)
solver = init(prob, alg)
ω -> begin
solver.p = (; solver.p..., ω)
solve!(solver).value
end
end
function threaded_dos_solver(prob, alg; nthreads=min(cheb_order, Threads.nthreads()))
solvers = [init(prob, alg) for _ in 1:nthreads]
BatchFunction() do ωs
out = Vector{typeof(prototype)}(undef, length(ωs))
Threads.@threads for i in 1:nthreads
solver = solvers[i]
for j in i:nthreads:length(ωs)
ω = ωs[j]
solver.p = (; solver.p..., ω)
out[j] = solve!(solver).value
end
end
return out
end
end
dos_solver_iai = dos_solver(prob_dos, IAI(QuadGKJL()))
@time greens_iai = hchebinterp(dos_solver_iai, ω_min, ω_max; atol=1e-2, order=cheb_order)
dos_solver_ptr = dos_solver(prob_dos, PTR(; npt=100))
@time greens_ptr = hchebinterp(dos_solver_ptr, ω_min, ω_max; atol=1e-2, order=cheb_order)
using CairoMakie
set_theme!(fontsize=24, linewidth=4)
fig1 = Figure()
ax1 = Axis(fig1[1,1], limits=((10,15), (0,6)), xlabel="ω (eV)", ylabel="SVO DOS (eV⁻¹)")
p1 = lines!(ax1, 10:η/100:15, ω -> -imag(greens_iai(ω))/pi/det(bz.B); label="IAI, η=$η")
axislegend(ax1)
save("iai_svo_dos.pdf", fig1)
fig2 = Figure()
ax2 = Axis(fig2[1,1], limits=((10,15), (0,6)), xlabel="ω (eV)", ylabel="SVO DOS (eV⁻¹)")
p2 = lines!(ax2, 10:η/100:15, ω -> -imag(greens_ptr(ω))/pi/det(bz.B); label="PTR, η=$η")
axislegend(ax2)
save("ptr_svo_dos.pdf", fig2)
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 776 | push!(LOAD_PATH, "../src/")
using Documenter, AutoBZCore
Documenter.HTML(
mathengine = MathJax3(Dict(
:loader => Dict("load" => ["[tex]/physics"]),
:tex => Dict(
"inlineMath" => [["\$","\$"], ["\\(","\\)"]],
"tags" => "ams",
"packages" => ["base", "ams", "autoload", "physics"],
),
)),
)
makedocs(
sitename="AutoBZCore.jl",
modules=[AutoBZCore],
pages = [
"Home" => "index.md",
"Examples" => "examples.md",
"Problems" => "problems.md",
"Integrands" => "integrands.md",
"Algorithms" => "algorithms.md",
"Reference" => "reference.md",
"Extensions" => "extensions.md",
],
)
deploydocs(
repo = "github.com/lxvm/AutoBZCore.jl.git",
)
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 1185 | module AtomsBaseExt
using StaticArrays: SMatrix
using AtomsBase
using AutoBZCore: AbstractBZ, FBZ, IBZ, canonical_reciprocal_basis
import AutoBZCore: load_bz
"""
load_bz(::AbstractBZ, ::AbstractSystem; kws...)
Automatically load a BZ using data from AtomsBase.jl-compatible `AbstractSystem`.
"""
function load_bz(bz::AbstractBZ, system::AbstractSystem)
@assert all(==(Periodic()), boundary_conditions(system))
bz_ = convert(AbstractBZ{n_dimensions(system)}, bz)
bb = bounding_box(system)
A = reinterpret(reshape, eltype(eltype(bb)), bb)
return load_bz(bz_, A)
end
load_bz(system::AbstractSystem) = load_bz(FBZ(), system)
function load_bz(bz::IBZ, system::AbstractSystem; kws...)
@assert all(==(Periodic()), boundary_conditions(system))
d = n_dimensions(system)
bz_ = convert(AbstractBZ{d}, bz)
bb = bounding_box(system)
A = SMatrix{d,d}(reinterpret(reshape, eltype(eltype(bb)), bb))
B = canonical_reciprocal_basis(A)
species = atomic_symbol(system)
pos = position(system)
atom_pos = reinterpret(reshape, eltype(eltype(pos)), pos)
return load_bz(bz_, A, B, species, atom_pos; kws..., coordinates="Cartesian")
end
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 4597 | module SymmetryReduceBZExt
using LinearAlgebra
using Polyhedra: Polyhedron, polyhedron, doubledescription, hrepiscomputed, hrep
using StaticArrays
using SymmetryReduceBZ
using AutoBZCore: canonical_reciprocal_basis, SymmetricBZ, IBZ, DefaultPolyhedron,
CubicLimits, AbstractIteratedLimits, load_limits
import AutoBZCore: IteratedIntegration.fixandeliminate, IteratedIntegration.segments
include("ibzlims.jl")
function get_segs(vert::AbstractMatrix)
rtol = atol = sqrt(eps(eltype(vert)))
uniquepts=Vector{eltype(vert)}(undef, size(vert, 1))
numpts = 0
for i in axes(vert,1)
v = vert[i,end]
test = isapprox(v, atol=atol, rtol=rtol)
if !any(test, @view(uniquepts[begin:begin+numpts-1,end]))
numpts += 1
uniquepts[numpts] = v
end
end
@assert numpts >= 2 uniquepts
resize!(uniquepts,numpts)
sort!(uniquepts)
return uniquepts
end
struct Polyhedron3{T<:Real} <: AbstractIteratedLimits{3,T}
face_coord::Vector{Matrix{T}}
segs::Vector{T}
end
function segments(ph::Polyhedron3, dim)
@assert dim == 3
return ph.segs
end
struct Polygon2{T<:Real} <: AbstractIteratedLimits{2,T}
vert::Matrix{T}
segs::Vector{T}
end
function segments(pg::Polygon2, dim)
@assert dim == 2
return pg.segs
end
function fixandeliminate(ph::Polyhedron3, z, ::Val{3})
pg_vert = pg_vert_from_zslice(z, ph.face_coord)
segs = get_segs(pg_vert)
return Polygon2(pg_vert, segs)
end
function fixandeliminate(pg::Polygon2, y, ::Val{2})
return CubicLimits(xlim_from_yslice(y, pg.vert)...)
end
function (::IBZ{n,Polyhedron})(real_latvecs, atom_types, atom_pos, coordinates; makeprim=false, convention="ordinary") where {n}
ibz_cart = calc_ibz(real_latvecs, atom_types, atom_pos, coordinates, makeprim, convention)
ibz_lat = real_latvecs' * ibz_cart # rotate Cartesian basis to lattice basis in reciprocal coordinates
hrepiscomputed(ibz_lat) || hrep(ibz_lat) # precompute hrep if it isn't already
return load_limits(ibz_lat)
end
function (::IBZ{3,DefaultPolyhedron})(real_latvecs, atom_types, atom_pos, coordinates; makeprim=false, convention="ordinary")
ibz_cart = calc_ibz(real_latvecs, atom_types, atom_pos, coordinates, makeprim, convention)
# tri_idx = hull.simplices
# ph_vert = hull.points * real_latvecs
# face_idx = faces_from_triangles(tri_idx, ph_vert)
# face_coord = face_coord_from_idx(face_idx, ph_vert)
ibz_lat = real_latvecs' * ibz_cart
ph_vert = permutedims(reduce(hcat, SymmetryReduceBZ.Utilities.vertices(ibz_lat)))
face_coord = map(x -> permutedims(reduce(hcat, x)), SymmetryReduceBZ.Utilities.get_uniquefacets(ibz_lat))
segs = get_segs(ph_vert)
return Polyhedron3(face_coord, segs)
end
fixsign(x) = iszero(x) ? abs(x) : x
function tidy_vertices!(points, digits)
for (i, p) in enumerate(points)
points[i] = fixsign(round(p; digits=digits))
end
return points
end
"""
load_ibz(::IBZ, A, B, species, positions; coordinates="lattice", rtol=nothing, atol=1e-9, digits=12)
Use `SymmetryReduceBZ` to automatically load the IBZ. Since this method lives in
an extension module, make sure you write `using SymmetryReduceBZ` before `using
AutoBZ`.
"""
function load_ibz(bz::IBZ{N}, A::SMatrix{N,N}, B::SMatrix{N,N}, species::AbstractVector, positions::AbstractMatrix;
coordinates="lattice", rtol=nothing, atol=1e-9, digits=12) where {N}
# we need to convert arguments to unit-free since SymmetryReduceBZ doesn't support them
# and our limits objects must be unitless
real_latvecs = A / oneunit(eltype(A))
atom_species = unique(species)
atom_types = map(e -> findfirst(==(e), atom_species) - 1, species)
atom_pos = positions / oneunit(eltype(positions))
# get symmetries
sg = SymmetryReduceBZ.Symmetry.calc_spacegroup(real_latvecs, atom_types, atom_pos, coordinates)
pg_ = SymmetryReduceBZ.Utilities.remove_duplicates(sg[2], rtol=something(rtol, sqrt(eps(float(maximum(real_latvecs))))), atol=atol)
pg = Ref(real_latvecs') .* pg_ .* Ref(inv(real_latvecs')) # rotate operator from Cartesian basis to lattice basis in reciprocal coordinates
syms = convert(Vector{SMatrix{3,3,Float64,9}}, pg) # deal with type instability in SymmetryReduceBZ
map!(s -> fixsign.(round.(s, digits=digits)), syms, syms) # clean up matrix elements
# get convex hull
hull = bz(real_latvecs, atom_types, atom_pos, coordinates)
# now limits and symmetries should be in reciprocal coordinates in the lattice basis
return SymmetricBZ(A, B, hull, syms)
end
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 407 | module UnitfulExt
using Unitful: Quantity, unit, ustrip
import AutoBZCore: canonical_reciprocal_basis, canonical_ptr_basis
function canonical_reciprocal_basis(A::AbstractMatrix{<:Quantity})
return canonical_reciprocal_basis(ustrip.(A)) / unit(eltype(A))
end
function canonical_ptr_basis(B::AbstractMatrix{<:Quantity})
return canonical_ptr_basis(ustrip.(B))
end
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 844 | module WannierIOExt
using WannierIO
using AutoBZCore: AbstractBZ, FBZ, IBZ
import AutoBZCore: load_bz
"""
load_bz(::AbstractBZ, filename; kws...)
Automatically load a BZ using data from a "seedname.wout" file.
"""
function load_bz(bz::AbstractBZ, filename::String; atol=1e-5)
out = WannierIO.read_wout(filename)
bz_ = convert(AbstractBZ{3}, bz)
return load_bz(bz_, out.lattice, out.recip_lattice; atol=atol)
end
load_bz(filename::String; kws...) = load_bz(FBZ(), filename; kws...)
function load_bz(bz::IBZ, filename::String; kws...)
out = WannierIO.read_wout(filename)
bz_ = convert(AbstractBZ{3}, bz)
atom_pos = reinterpret(reshape, eltype(eltype(out.atom_positions)), out.atom_positions)
return load_bz(bz_, out.lattice, out.recip_lattice, out.atom_labels, atom_pos; kws..., coordinates="lattice")
end
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 10954 | import SymmetryReduceBZ.Utilities: sortpts_perm
import SymmetryReduceBZ.Utilities: sortpts2D
using LinearAlgebra
"""
get_lim(vert)
Compute limits of integration for an integral over the final dimension of a
convex polytope of n vertices.
# Arguments
- `vert::Matrix{Float64}`: n x d array of vertices, where d is the final
dimension of the polytope, to be integrated over
# Returns
- `lim::Array{Float64, 1}`: 2-element array of limits of integration
"""
function get_lim(vert::Matrix{Float64})
return extrema(vert[:, end])
end
"""
faces_from_triangles(tri_idx, ph_vert)
Get faces of a polyhedron, represented by their indices, from its triangulation
# Arguments
- `tri_idx::Matrix{Int32}`: nt x 3 array of indices of vertices of nt triangles
forming a triangulation of the polyhedron
- `ph_vert::Matrix{Float64}`: nv x 3 array of coordinates of nv polyhedron
vertices. `ph_vert[tri_idx[i,j],:]` gives the xyz coordinates of the jth vertex
of the ith triangle.
# Returns
- `faces::Vector{Vector{Int32}}`: Vector of length nf of vectors of length nv_i
containing the unordered indices of the nv_i vertices contained in the ith face
of the polyhedron, where nf is the number of faces.
# Note
If you are obtaining the inputs to this function from a 3D Chull object, the
triangle indices tri_idx are given by the "simplices" attribute, and the vertex
coordinates ph_vert are given by the "points" attribute.
"""
function faces_from_triangles(tri_idx::Matrix{Int32}, ph_vert::Matrix{Float64})
# Step 1: Get the normal vectors for each triangle. Note that the triangle
# vertices are not necessarily given with respect to a consistent orientation,
# so we can only get the normal vectors up to a sign.
nvec = zeros(Float64, size(tri_idx))
for i = 1:size(tri_idx, 1)
# Get the coordinates of the vertices of the ith triangle
tri = ph_vert[tri_idx[i, :], :]
# Get the unit normal vector to the triangle
u = cross(tri[2, :] - tri[1, :], tri[3, :] - tri[1, :])
nvec[i, :] = u / norm(u)
end
# Step 2: Two triangles are in the same face if and only if (1) they share the
# same normal vector, up to a sign, and (2) they are connected via a
# continuous path through triangles which also have the same normal vector.
# (1) alone is insufficient because a polygon can contain two parallel faces,
# but (2) distinguishes this possibility. First, organize the triangles into
# groups with the same normal vector up to a sign.
# Get the unique unsigned normal vectors up to a tolerance
isclose(x, y) = norm(x - y) < 1e-10 || norm(x + y) < 1e-10 # Define what it means for two vectors to be close
nvec_unique = Vector{Vector{Float64}}() # Unique normal vectors
for i = 1:size(nvec, 1) # Loop through all normal vectors
# Check if ith normal vector is already in array of unique normal vectors
u = nvec[i, :]
nvec_is_unique = true # Initialize as true
for j = 1:length(nvec_unique)
if isclose(u, nvec_unique[j])
nvec_is_unique = false
break
end
end
if nvec_is_unique
push!(nvec_unique, u) # If not, add it
end
end
# Divide triangles into groups of with shared unsigned normal vectors
ngrp = size(nvec_unique, 1) # Number of groups
grp_idx = Vector{Vector{Int32}}(undef, ngrp) # Indices of triangles in each group
for i = 1:ngrp # Place each triangle into a group
grp_idx[i] = Vector{Int32}() # Initialize as empty
# Get indices of all triangles with ith unique normal vector
for j = 1:size(nvec, 1)
if isclose(nvec[j, :], nvec_unique[i])
push!(grp_idx[i], j)
end
end
end
# Step 3: Next, split each group into subgroups of triangles which share at
# least one vertex with another triangle in the group. Note that triangles in
# parallel faces of a polyhedron will not share any vertices. Note also that
# there can be at most two subgroups.
face_idx = Vector{Vector{Int32}}() # Indices of vertices in each face
for i = 1:ngrp # Loop through groups
grpi_idx = grp_idx[i] # Indices of triangles in ith group
# Place vertices of first triangle into subgroup 1 and remove from group
subgrp1_vert_idx = tri_idx[grpi_idx[1], :] # Union of vertices of triangles in subgroup 1
deleteat!(grpi_idx, 1)
# Determine subgroup 1 by repeatedly looping through triangles in group
placed_a_triangle = true # True if a triangle was placed in subgroup 1 in previous iteration
while (placed_a_triangle) # Keep looping through triangles in group until all are placed
placed_a_triangle = false # Initialize as false
for j = 1:length(grpi_idx) # Loop through remaining triangles
# If jth triangle has a vertex which appears in subgroup 1, add its
# vertices to subgroup 1 and delete it from group
if ((tri_idx[grpi_idx[j], 1]) in subgrp1_vert_idx) || ((tri_idx[grpi_idx[j], 2]) in subgrp1_vert_idx) || ((tri_idx[grpi_idx[j], 3]) in subgrp1_vert_idx)
subgrp1_vert_idx = union(subgrp1_vert_idx, tri_idx[grpi_idx[j], :])
deleteat!(grpi_idx, j)
placed_a_triangle = true
break
end
end
end
# Subgroup 1 is now complete and forms a face: add it to the list of faces
push!(face_idx, subgrp1_vert_idx)
# If there are any triangles left in the group, they are subgroup 2, and
# also form a face; add to the list of faces
if length(grpi_idx) > 0
push!(face_idx, unique(tri_idx[grpi_idx, :]))
end
end
return face_idx
end
"""
face_coord_from_idx(face_idx, ph_vert)
Get coordinates of the vertices of the faces of a polyhedron from their indices.
# Arguments
- `face_idx::Vector{Vector{Int32}}`: Vector of length nf of vectors of length
nv_i containing the unordered indices of the nv_i vertices contained in the ith
face of the polyhedron, where nf is the number of faces.
- `ph_vert::Matrix{Float64}`: nv x 3 array of coordinates of the nv polyhedron
vertices. `ph_vert[face_idx[i][j],:]` gives the xyz coordinates of the jth
vertex of the ith face of the polyhedron.
# Returns
- `face_coord::Vector{Matrix{Float64}}`: Vector of length nf of matrices of size
nv_i x 3 containing the (clockwise or counter-clockwise) ordered coordinates
of the vertices of the ith face.
"""
function face_coord_from_idx(face_idx::Vector{Vector{Int32}}, ph_vert::Matrix{Float64})
# Loop through face vertex index array and get the coordinates of the vertices
# of each face
face_coord = Matrix{Float64}[]
for i in 1:length(face_idx)
push!(face_coord, ph_vert[face_idx[i], :]) # Get coordinates of vertices
p = sortpts_perm(face_coord[i]') # Sort vertices
face_coord[i] = face_coord[i][p, :]
end
return face_coord
end
"""
pg_vert_from_zslice(z, face_coord)
Get vertices of polygon formed by the intersection of a plane of constant z with
the faces of a polyhedron.
# Arguments
- `z::Float64`: z coordinate of plane
- `face_coord::Vector{Matrix{Float64}}`: Vector of length nf of matrices of size
nv_i x 3 containing the (clockwise or counter-clockwise) ordered coordinates of
the vertices of the ith face.
# Returns
- `pg_vert::Matrix{Float64}`: nv x 2 matrix of xy coordinates of the nv
(clockwise or counter-clockwise) ordered vertices of the polygon formed by the
intersection of the z plane with the polyhedron
# Note
Vertices which are shared between faces should be identical in floating point
arithmetic; that is, they should have come from a common array listing the
unique vertices of the polyhedron.
z must be between the minimum and maximum z coordinates of the polyhedron, and
not equal to one of them.
"""
function pg_vert_from_zslice(z::Float64, face_coord::Vector{Matrix{Float64}})
pg_vert = Vector{Vector{Float64}}() # Matrix of vertices of the polygon
for i = 1:length(face_coord) # Loop through faces
face = face_coord[i]
# Loop through ordered pairs of vertices in the face, and check whether the
# line segment connecting them intersects the z plane.
nvi = size(face, 1)
for j = 1:nvi
jp1 = mod1(j + 1, nvi)
z1 = face[j, 3]
z2 = face[jp1, 3]
if (z1 <= z && z2 >= z) || (z1 >= z && z2 <= z)
# Find the point of intersection and add it to the list of polygon
# vertices
t = (z - z1) / (z2 - z1)
# dot syntax removes some allocations/array copies as do views
v = @. t * @view(face[jp1, 1:2]) + (1 - t) * @view(face[j, 1:2])
push!(pg_vert, v)
end
end
end
pg_vert1 = stack(pg_vert)' # new variable name since type may change
# pg_vert1 = hcat(pg_vert...)' # not type stable
# There will be redundant vertices in the verts array because of shared
# edges between faces; these should be exactly equal (in floating point
# arithmetic) because the vertices in each face should come from a common
# list of unique vertices of the polyhedron. Remove the redundant vertices.
pg_vert2 = unique(pg_vert1, dims=1)
# Sort the points in the polygon by their angle with respect to the centroid
p = sortpts2D(pg_vert2') # permutation which sorts the points
pg_vert3 = pg_vert2[p, :]
return pg_vert3
end
"""
xlim_from_yslice(y, pg_vert)
Get x coordinates of the intersection between a line of constant y and the
boundary of a polygon.
# Arguments
- `y::Float64`: y coordinate of line
- `pg_vert::Matrix{Float64}`: nv x 2 matrix of xy coordinates of the nv
(clockwise or counter-clockwise) ordered vertices of the polygon
# Returns
- `xlims::Vector{Float64}`: pair of x coordinates of intersection between the
line and the polygon boundary
# Note
y must be between the minimum and maximum y coordinates of the polygon, and not
equal to one of them.
"""
function xlim_from_yslice(y::Float64, pg_vert::Matrix{Float64})
# Loop through ordered pairs of vertices, and check whether the line segment
# connecting them intersects the line of constant y
lb = NaN # undefined Float64
k = 0
nv = size(pg_vert, 1)
for j = 1:nv
jp1 = mod1(j + 1, nv)
y1 = pg_vert[j, 2]
y2 = pg_vert[jp1, 2]
# If y1 and y2 are on opposite sides of y, then line intersects the edge. If
# y = y1, then line intersects a vertex and we include it. If y = y2, then
# line intersects a vertex, but we don't include it to avoid double-counting.
if (y1 < y && y2 > y) || (y1 > y && y2 < y) || y1 == y
# Find the point of intersection and add it to the list of intersection points
t = (y - y1) / (y2 - y1)
k += 1
lim = t * pg_vert[jp1, 1] + (1 - t) * pg_vert[j, 1]
if k == 1
lb = lim
elseif (k == 2) # Found two unique intersection points
return extrema((lb, lim))
end
end
end
# If we reach the end and have only found one intersection point, then the
# line is tangent to a single vertex, and both x limits are the same
@assert k == 1 "could not find intersection with polygon"
return (lb, lb)
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 3551 | """
A package providing a common interface to integration algorithms intended for applications
including Brillouin-zone integration and Wannier interpolation. Its design is influenced by
high-level libraries like Integrals.jl to implement the CommonSolve.jl interface, and it makes use of Julia's multiple dispatch to
provide the same interface for integrands with optimized inplace, batched, and Fourier
series evaluation.
### Quickstart
As a first example, we integrate sine over [0,1] as a function of its period.
```
julia> using AutoBZCore
julia> prob = IntegralProblem((x,p) -> sin(p*x), (0, 1), 0.3);
julia> solve(prob, QuadGKJL()).value # solves the integral of sin(p*x) over [0,1] with p=0.3
0.14887836958131329
```
Notice that we construct an [`IntegralProblem`](@ref) object that we can [`solve`](@ref) at
with a choice of algorithm. For more examples, see the
documentation.
### Features
Special integrand interfaces
- [`IntegralFunction`](@ref): generic user integrand of the form `f(x, p)`
- [`InplaceIntegralFunction`](@ref): allows an integrand to write its result inplace to an array
- [`InplaceBatchIntegralFunction`](@ref): allows user-side parallelization on e.g. shared memory,
distributed memory, or the gpu
- [`CommonSolveIntegralFunction`](@ref): define an integrand that also solves a problem
- [`FourierIntegralFunction`](@ref): efficient evaluation of Fourier series for cubatures with
hierachical grids
Quadrature algorithms:
- Trapezoidal rule and FastGaussQuadrature.jl: [`QuadratureFunction`](@ref)
- h-adaptive quadrature (Gauss-Kronrod): [`QuadGKJL`](@ref)
- h-adaptive cubature (Genz-Malik): [`HCubatureJL`](@ref)
- p-adaptive, symmetrized Monkhorst-Pack: [`AutoSymPTRJL`](@ref)
Meta-Algorithms:
- Iterated integration: [`NestedQuad`](@ref)
# Extended help
If you experience issues with AutoBZCore.jl, please report a bug on the [GitHub
page](https://github.com/lxvm/AutoBZCore.jl) to contact the developers.
"""
module AutoBZCore
using LinearAlgebra: I, norm, det, checksquare, isdiag, Diagonal, tr, diag, eigen, Hermitian
using StaticArrays: SVector, SMatrix, sacollect
using FunctionWrappers: FunctionWrapper
using ChunkSplitters: chunks, getchunk
using AutoSymPTR
using FourierSeriesEvaluators
using IteratedIntegration
using QuadGK: quadgk, quadgk!, BatchIntegrand
using HCubature: hcubature
using FourierSeriesEvaluators: workspace_allocate, workspace_contract!, workspace_evaluate!, workspace_evaluate, period
using IteratedIntegration: limit_iterate, interior_point
using HCubature: hcubature, hquadrature
using CommonSolve: solve
import CommonSolve: init, solve!
export init, solve!, solve
include("domains.jl")
export IntegralFunction, InplaceIntegralFunction, InplaceBatchIntegralFunction
export CommonSolveIntegralFunction
export IntegralProblem
include("interfaces.jl")
export QuadGKJL, HCubatureJL, QuadratureFunction
include("algorithms.jl")
export AuxQuadGKJL, ContQuadGKJL, MeroQuadGKJL
include("algorithms_iterated.jl")
export MonkhorstPack, AutoSymPTRJL
include("algorithms_autosymptr.jl")
export NestedQuad#, AbsoluteEstimate, EvalCounter
include("algorithms_meta.jl")
export SymmetricBZ, nsyms
export load_bz, FBZ, IBZ, InversionSymIBZ, CubicSymIBZ
export AbstractSymRep, UnknownRep, TrivialRep
export AutoBZProblem
export IAI, PTR, AutoPTR, TAI
include("brillouin.jl")
export FourierIntegralFunction, CommonSolveFourierIntegralFunction
include("fourier.jl")
export DOSProblem
include("dos_interfaces.jl")
export GGR
include("dos_algorithms.jl")
include("dos_ggr.jl")
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 9103 | # Methods an algorithm must define
# - init_cacheval
# - do_integral
"""
QuadGKJL(; order = 7, norm = norm)
Duplicate of the QuadGKJL provided by Integrals.jl.
"""
struct QuadGKJL{F} <: IntegralAlgorithm
order::Int
norm::F
end
function QuadGKJL(; order = 7, norm = norm)
return QuadGKJL(order, norm)
end
function init_midpoint_scale(a::T, b::T) where {T}
# we try to reproduce the initial midpoint used by QuadGK, and scale just needs right units
s = float(oneunit(T))
if one(T) isa Real
x = if (infa = isinf(a)) & (infb = isinf(b))
float(zero(T))
elseif infa
float(b - oneunit(b))
elseif infb
float(a + oneunit(a))
else
(a+b)/2
end
return x, s
else
return (a+b)/2, s
end
end
init_midpoint_scale(dom::PuncturedInterval) = init_midpoint_scale(endpoints(dom)...)
function init_segbuf(prototype, segs, alg)
x, s = init_midpoint_scale(segs)
u = x/oneunit(x)
TX = typeof(u)
fx_s = prototype * s/oneunit(s)
TI = typeof(fx_s)
TE = typeof(alg.norm(fx_s))
return IteratedIntegration.alloc_segbuf(TX, TI, TE)
end
function init_cacheval(f::IntegralFunction, dom, p, alg::QuadGKJL; kws...)
segs = PuncturedInterval(dom)
prototype = get_prototype(f, get_prototype(segs), p)
return init_segbuf(prototype, segs, alg)
end
function init_cacheval(f::InplaceIntegralFunction, dom, p, alg::QuadGKJL; kws...)
segs = PuncturedInterval(dom)
prototype = get_prototype(f, get_prototype(segs), p)
return init_segbuf(prototype, segs, alg), similar(prototype)
end
function init_cacheval(f::InplaceBatchIntegralFunction, dom, p, alg::QuadGKJL; kws...)
segs = PuncturedInterval(dom)
pt = get_prototype(segs)
prototype = get_prototype(f, pt, p)
prototype isa AbstractVector || throw(ArgumentError("QuadGKJL only supports batch integrands with vector outputs"))
pts = zeros(typeof(pt), 2*alg.order+1)
upts = pts / pt
return init_segbuf(first(prototype), segs, alg), similar(prototype), pts, upts
end
function init_cacheval(f::CommonSolveIntegralFunction, dom, p, alg::QuadGKJL; kws...)
segs = PuncturedInterval(dom)
x = get_prototype(segs)
cache, integrand, prototype = _init_commonsolvefunction(f, dom, p; x)
return init_segbuf(prototype, segs, alg), cache, integrand
end
function do_integral(f, dom, p, alg::QuadGKJL, cacheval;
reltol = nothing, abstol = nothing, maxiters = typemax(Int))
# we need to strip units from the limits since infinity transformations change the units
# of the limits, which can break the segbuf
u = oneunit(eltype(dom))
usegs = map(x -> x/u, dom)
atol = isnothing(abstol) ? abstol : abstol/u
val, err = call_quadgk(f, p, u, usegs, cacheval; maxevals = maxiters, rtol = reltol, atol, order = alg.order, norm = alg.norm)
value = u*val
retcode = err < max(something(atol, zero(err)), alg.norm(val)*something(reltol, isnothing(atol) ? sqrt(eps(one(eltype(usegs)))) : 0)) ? Success : Failure
stats = (; error=u*err)
return IntegralSolution(value, retcode, stats)
end
function call_quadgk(f::IntegralFunction, p, u, usegs, cacheval; kws...)
quadgk(x -> f.f(u*x, p), usegs...; kws..., segbuf=cacheval)
end
function call_quadgk(f::InplaceIntegralFunction, p, u, usegs, cacheval; kws...)
# TODO allocate everything in the QuadGK.InplaceIntegrand in the cacheval
quadgk!((y, x) -> f.f!(y, u*x, p), cacheval[2], usegs...; kws..., segbuf=cacheval[1])
end
function call_quadgk(f::InplaceBatchIntegralFunction, p, u, usegs, cacheval; kws...)
pts = cacheval[3]
g = BatchIntegrand((y, x) -> f.f!(y, resize!(pts, length(x)) .= u .* x, p), cacheval[2], cacheval[4]; max_batch=f.max_batch)
quadgk(g, usegs...; kws..., segbuf=cacheval[1])
end
function call_quadgk(f::CommonSolveIntegralFunction, p, u, usegs, cacheval; kws...)
# cache = cacheval[2] could call do_solve!(cache, f, x, p) to fully specialize
integrand = cacheval[3]
quadgk(x -> integrand(u * x, p), usegs...; kws..., segbuf=cacheval[1])
end
"""
HCubatureJL(; norm=norm, initdiv=1)
Multi-dimensional h-adaptive cubature from HCubature.jl.
"""
struct HCubatureJL{N} <: IntegralAlgorithm
norm::N
initdiv::Int
end
HCubatureJL(; norm=norm, initdiv=1) = HCubatureJL(norm, initdiv)
function init_cacheval(f::IntegralFunction, dom, p, ::HCubatureJL; kws...)
# TODO utilize hcubature_buffer
return
end
function init_cacheval(f::CommonSolveIntegralFunction, dom, p, ::HCubatureJL; kws...)
cache, integrand, = _init_commonsolvefunction(f, dom, p)
return cache, integrand
end
function do_integral(f, dom, p, alg::HCubatureJL, cacheval; reltol = 0, abstol = 0, maxiters = typemax(Int))
a, b = endpoints(dom)
g = hcubature_integrand(f, p, a, b, cacheval)
routine = a isa Number ? hquadrature : hcubature
value, error = routine(g, a, b; norm = alg.norm, initdiv = alg.initdiv, atol=abstol, rtol=reltol, maxevals=maxiters)
retcode = error < max(something(abstol, zero(error)), alg.norm(value)*something(reltol, isnothing(abstol) ? sqrt(eps(eltype(a))) : abstol)) ? Success : Failure
stats = (; error)
return IntegralSolution(value, retcode, stats)
end
function hcubature_integrand(f::IntegralFunction, p, a, b, cacheval)
x -> f.f(x, p)
end
function hcubature_integrand(f::CommonSolveIntegralFunction, p, a, b, cacheval)
integrand = cacheval[2]
return x -> integrand(x, p)
end
"""
trapz(n::Integer)
Return the weights and nodes on the standard interval [-1,1] of the [trapezoidal
rule](https://en.wikipedia.org/wiki/Trapezoidal_rule).
"""
function trapz(n::Integer)
@assert n > 1
r = range(-1, 1, length=n)
x = collect(r)
halfh = step(r)/2
h = step(r)
w = [ (i == 1) || (i == n) ? halfh : h for i in 1:n ]
return (x, w)
end
"""
QuadratureFunction(; fun=trapz, npt=50, nthreads=1)
Quadrature rule for the standard interval [-1,1] computed from a function `x, w = fun(npt)`.
The nodes and weights should be set so the integral of `f` on [-1,1] is `sum(w .* f.(x))`.
The default quadrature rule is [`trapz`](@ref), although other packages provide rules, e.g.
using FastGaussQuadrature
alg = QuadratureFunction(fun=gausslegendre, npt=100)
`nthreads` sets the numbers of threads used to parallelize the quadrature only when the
integrand is a , in which case the user must parallelize the
integrand evaluations. For no threading set `nthreads=1`.
"""
struct QuadratureFunction{F} <: IntegralAlgorithm
fun::F
npt::Int
nthreads::Int
end
QuadratureFunction(; fun=trapz, npt=50, nthreads=1) = QuadratureFunction(fun, npt, nthreads)
function init_rule(dom, alg::QuadratureFunction)
x, w = alg.fun(alg.npt)
return [(w,x) for (w,x) in zip(w,x)]
end
function init_autosymptr_cache(f::IntegralFunction, dom, p, bufsize; kws...)
return (; buffer=nothing)
end
function init_autosymptr_cache(f::InplaceIntegralFunction, dom, p, bufsize; kws...)
x = get_prototype(dom)
proto = get_prototype(f, x, p)
y = similar(proto)
ytmp = similar(proto)
I = y * prod(x)
Itmp = similar(I)
return (; buffer=nothing, I, Itmp, y, ytmp)
end
function init_autosymptr_cache(f::InplaceBatchIntegralFunction, dom, p, bufsize; kws...)
x0 = get_prototype(dom)
proto=get_prototype(f, x0, p)
return (; buffer=similar(proto, bufsize), y=similar(proto, bufsize), x=Vector{typeof(x0)}(undef, bufsize))
end
function init_autosymptr_cache(f::CommonSolveIntegralFunction, dom, p, bufsize; kws...)
cache, integrand, = _init_commonsolvefunction(f, dom, p)
return (; buffer=nothing, cache, integrand)
end
function init_cacheval(f, dom, p, alg::QuadratureFunction; kws...)
rule = init_rule(dom, alg)
cache = init_autosymptr_cache(f, dom, p, alg.npt; kws...)
return (; rule, cache...)
end
function do_integral(f, dom, p, alg::QuadratureFunction, cacheval;
reltol = nothing, abstol = nothing, maxiters = typemax(Int))
rule = cacheval.rule; buffer=cacheval.buffer
segs = segments(dom)
g = autosymptr_integrand(f, p, segs, cacheval)
A = sum(1:length(segs)-1) do i
a, b = segs[i], segs[i+1]
s = (b-a)/2
arule = AutoSymPTR.AffineQuad(rule, s, a, 1, s)
return AutoSymPTR.quadsum(arule, g, s, buffer)
end
return IntegralSolution(A, Success, (; numevals = length(cacheval.rule)*(length(segs)-1)))
end
function autosymptr_integrand(f::IntegralFunction, p, segs, cacheval)
x -> f.f(x, p)
end
function autosymptr_integrand(f::InplaceIntegralFunction, p, segs, cacheval)
AutoSymPTR.InplaceIntegrand((y,x) -> f.f!(y,x,p), cacheval.I, cacheval.Itmp, cacheval.y, cacheval.ytmp)
end
function autosymptr_integrand(f::InplaceBatchIntegralFunction, p, segs, cacheval)
AutoSymPTR.BatchIntegrand((y,x) -> f.f!(y,x,p), cacheval.y, cacheval.x, max_batch=f.max_batch)
end
function autosymptr_integrand(f::CommonSolveIntegralFunction, p, segs, cacheval)
integrand = cacheval.integrand
return x -> integrand(x, p)
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 3900 | # We could move these into an extension, although QuadratureFunction also uses AutoSymPTR.jl
# for evaluation
"""
MonkhorstPack(; npt=50, syms=nothing, nthreads=1)
Periodic trapezoidal rule with a fixed number of k-points per dimension, `npt`,
using the `PTR` rule from [AutoSymPTR.jl](https://github.com/lxvm/AutoSymPTR.jl).
`nthreads` sets the numbers of threads used to parallelize the quadrature only when the
integrand is a , in which case the user must parallelize the
integrand evaluations. For no threading set `nthreads=1`.
**The caller should check that the integral is converged w.r.t. `npt`**.
"""
struct MonkhorstPack{S} <: IntegralAlgorithm
npt::Int
syms::S
nthreads::Int
end
MonkhorstPack(; npt=50, syms=nothing, nthreads=1) = MonkhorstPack(npt, syms, nthreads)
function init_rule(dom, alg::MonkhorstPack)
# rule = AutoSymPTR.MonkhorstPackRule(alg.syms, alg.a, alg.nmin, alg.nmax, alg.n₀, alg.Δn)
# return rule(eltype(dom), Val(ndims(dom)))
if alg.syms === nothing
return AutoSymPTR.PTR(eltype(dom), Val(ndims(dom)), alg.npt)
else
return AutoSymPTR.MonkhorstPack(eltype(dom), Val(ndims(dom)), alg.npt, alg.syms)
end
end
function init_cacheval(f, dom, p, alg::MonkhorstPack; kws...)
b = get_basis(dom)
rule = init_rule(b, alg)
cache = init_autosymptr_cache(f, b, p, alg.nthreads; kws...)
return (; rule, cache...)
end
function do_integral(f, dom, p, alg::MonkhorstPack, cacheval;
reltol = nothing, abstol = nothing, maxiters = typemax(Int))
b = get_basis(dom)
g = autosymptr_integrand(f, p, b, cacheval)
value = cacheval.rule(g, b, cacheval.buffer)
retcode = Success
stats = (; numevals=length(cacheval.rule))
return IntegralSolution(value, retcode, stats)
end
"""
AutoSymPTRJL(; norm=norm, a=1.0, nmin=50, nmax=1000, n₀=6, Δn=log(10), keepmost=2, nthreads=1)
Periodic trapezoidal rule with automatic convergence to tolerances passed to the
solver with respect to `norm` using the routine `autosymptr` from
[AutoSymPTR.jl](https://github.com/lxvm/AutoSymPTR.jl).
`nthreads` sets the numbers of threads used to parallelize the quadrature only when the
integrand is a in which case the user must parallelize the
integrand evaluations. For no threading set `nthreads=1`.
**This algorithm is the most efficient for smooth integrands**.
"""
struct AutoSymPTRJL{F,S} <: IntegralAlgorithm
norm::F
a::Float64
nmin::Int
nmax::Int
n₀::Float64
Δn::Float64
keepmost::Int
syms::S
nthreads::Int
end
function AutoSymPTRJL(; norm=norm, a=1.0, nmin=50, nmax=1000, n₀=6.0, Δn=log(10), keepmost=2, syms=nothing, nthreads=1)
return AutoSymPTRJL(norm, a, nmin, nmax, n₀, Δn, keepmost, syms, nthreads)
end
function init_rule(dom, alg::AutoSymPTRJL)
return AutoSymPTR.MonkhorstPackRule(alg.syms, alg.a, alg.nmin, alg.nmax, alg.n₀, alg.Δn)
end
function init_cacheval(f, dom, p, alg::AutoSymPTRJL; kws...)
b = get_basis(dom)
rule = init_rule(dom, alg)
rule_cache = AutoSymPTR.alloc_cache(eltype(dom), Val(ndims(dom)), rule)
cache = init_autosymptr_cache(f, b, p, alg.nthreads; kws...)
return (; rule, rule_cache, cache...)
end
function do_integral(f, dom, p, alg::AutoSymPTRJL, cacheval;
reltol = nothing, abstol = nothing, maxiters = typemax(Int))
g = autosymptr_integrand(f, p, dom, cacheval)
bas = get_basis(dom)
value, error = autosymptr(g, bas; syms = alg.syms, rule = cacheval.rule, cache = cacheval.rule_cache, keepmost = alg.keepmost,
abstol = abstol, reltol = reltol, maxevals = maxiters, norm=alg.norm, buffer=cacheval.buffer)
retcode = error < max(something(abstol, zero(error)), alg.norm(value)*something(reltol, isnothing(abstol) ? sqrt(eps(eltype(bas))) : abstol)) ? Success : Failure
stats = (; error)
return IntegralSolution(value, retcode, stats)
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 9001 | """
AuxQuadGKJL(; order = 7, norm = norm)
Generalization of the QuadGKJL provided by Integrals.jl that allows for `AuxValue`d
integrands for auxiliary integration and multi-threaded evaluation with the `batch` argument
to `IntegralProblem`
"""
struct AuxQuadGKJL{F} <: IntegralAlgorithm
order::Int
norm::F
end
function AuxQuadGKJL(; order = 7, norm = norm)
return AuxQuadGKJL(order, norm)
end
function init_cacheval(f::IntegralFunction, dom, p, alg::AuxQuadGKJL; kws...)
segs = PuncturedInterval(dom)
prototype = get_prototype(f, get_prototype(segs), p)
return init_segbuf(prototype, segs, alg)
end
function init_cacheval(f::InplaceIntegralFunction, dom, p, alg::AuxQuadGKJL; kws...)
segs = PuncturedInterval(dom)
prototype = get_prototype(f, get_prototype(segs), p)
return init_segbuf(prototype, segs, alg), similar(prototype)
end
function init_cacheval(f::InplaceBatchIntegralFunction, dom, p, alg::AuxQuadGKJL; kws...)
segs = PuncturedInterval(dom)
pt = get_prototype(segs)
prototype = get_prototype(f, pt, p)
prototype isa AbstractVector || throw(ArgumentError("QuadGKJL only supports batch integrands with vector outputs"))
pts = zeros(typeof(pt), 2*alg.order+1)
upts = pts / pt
return init_segbuf(first(prototype), segs, alg), similar(prototype), pts, upts
end
function init_cacheval(f::CommonSolveIntegralFunction, dom, p, alg::AuxQuadGKJL; kws...)
segs = PuncturedInterval(dom)
x = get_prototype(segs)
cache, integrand, prototype = _init_commonsolvefunction(f, dom, p; x)
return init_segbuf(prototype, segs, alg), cache, integrand
end
function do_integral(f, dom, p, alg::AuxQuadGKJL, cacheval;
reltol = nothing, abstol = nothing, maxiters = typemax(Int))
# we need to strip units from the limits since infinity transformations change the units
# of the limits, which can break the segbuf
u = oneunit(eltype(dom))
usegs = map(x -> x/u, dom)
atol = isnothing(abstol) ? abstol : abstol/u
val, err = call_auxquadgk(f, p, u, usegs, cacheval; maxevals = maxiters, rtol = reltol, atol, order = alg.order, norm = alg.norm)
value = u*val
retcode = err < max(something(atol, zero(err)), alg.norm(val)*something(reltol, isnothing(atol) ? sqrt(eps(one(eltype(usegs)))) : 0)) ? Success : Failure
stats = (; error=u*err)
return IntegralSolution(value, retcode, stats)
end
function call_auxquadgk(f::IntegralFunction, p, u, usegs, cacheval; kws...)
auxquadgk(x -> f.f(u*x, p), usegs...; kws..., segbuf=cacheval)
end
function call_auxquadgk(f::InplaceIntegralFunction, p, u, usegs, cacheval; kws...)
# TODO allocate everything in the AuxQuadGK.InplaceIntegrand in the cacheval
auxquadgk!((y, x) -> f.f!(y, u*x, p), cacheval[2], usegs...; kws..., segbuf=cacheval[1])
end
function call_auxquadgk(f::InplaceBatchIntegralFunction, p, u, usegs, cacheval; kws...)
pts = cacheval[3]
g = IteratedIntegration.AuxQuadGK.BatchIntegrand((y, x) -> f.f!(y, resize!(pts, length(x)) .= u .* x, p), cacheval[2], cacheval[4]; max_batch=f.max_batch)
auxquadgk(g, usegs...; kws..., segbuf=cacheval[1])
end
function call_auxquadgk(f::CommonSolveIntegralFunction, p, u, usegs, cacheval; kws...)
# cache = cacheval[2] could call do_solve!(cache, f, x, p) to fully specialize
integrand = cacheval[3]
auxquadgk(x -> integrand(u*x, p), usegs...; kws..., segbuf=cacheval[1])
end
"""
ContQuadGKJL(; order = 7, norm = norm, rho = 1.0, rootmeth = IteratedIntegration.ContQuadGK.NewtonDeflation())
A 1d contour deformation quadrature scheme for scalar, complex-valued integrands. It
defaults to regular `quadgk` behavior on the real axis, but if it finds a root of 1/f
nearby, in the sense of Bernstein ellipse for the standard segment `[-1,1]` with semiaxes
`cosh(rho)` and `sinh(rho)`, on either the upper/lower half planes, then it dents the
contour away from the presumable pole.
"""
struct ContQuadGKJL{F,M} <: IntegralAlgorithm
order::Int
norm::F
rho::Float64
rootmeth::M
end
function ContQuadGKJL(; order = 7, norm = norm, rho = 1.0, rootmeth = IteratedIntegration.ContQuadGK.NewtonDeflation())
return ContQuadGKJL(order, norm, rho, rootmeth)
end
function init_csegbuf(prototype, dom, alg::ContQuadGKJL)
segs = PuncturedInterval(dom)
a, b = endpoints(segs)
x, s = (a+b)/2, (b-a)/2
TX = typeof(x)
convert(ComplexF64, prototype)
fx_s = one(ComplexF64) * s # currently the integrand is forcibly written to a ComplexF64 buffer
TI = typeof(fx_s)
TE = typeof(alg.norm(fx_s))
r_segbuf = IteratedIntegration.ContQuadGK.PoleSegment{TX,TI,TE}[]
fc_s = prototype * complex(s) # the regular evalrule is used on complex segments
TCX = typeof(complex(x))
TCI = typeof(fc_s)
TCE = typeof(alg.norm(fc_s))
c_segbuf = IteratedIntegration.ContQuadGK.Segment{TCX,TCI,TCE}[]
return (r=r_segbuf, c=c_segbuf)
end
function init_cacheval(f::IntegralFunction, dom, p, alg::ContQuadGKJL; kws...)
segs = PuncturedInterval(dom)
prototype = get_prototype(f, get_prototype(segs), p)
init_csegbuf(prototype, dom, alg)
end
function init_cacheval(f::CommonSolveIntegralFunction, dom, p, alg::ContQuadGKJL; kws...)
segs = PuncturedInterval(dom)
cache, integrand, prototype = _init_commonsolvefunction(f, dom, p; x=get_prototype(segs))
segbufs = init_csegbuf(prototype, dom, alg)
return (; segbufs..., cache, integrand)
end
function do_integral(f, dom, p, alg::ContQuadGKJL, cacheval;
reltol = nothing, abstol = nothing, maxiters = typemax(Int))
value, err = call_contquadgk(f, p, dom, cacheval; maxevals = maxiters, rho = alg.rho, rootmeth = alg.rootmeth,
rtol = reltol, atol = abstol, order = alg.order, norm = alg.norm, r_segbuf=cacheval.r, c_segbuf=cacheval.c)
retcode = err < max(something(abstol, zero(err)), alg.norm(value)*something(reltol, isnothing(abstol) ? sqrt(eps(one(eltype(dom)))) : 0)) ? Success : Failure
stats = (; error=err)
return IntegralSolution(value, retcode, stats)
end
function call_contquadgk(f::IntegralFunction, p, segs, cacheval; kws...)
contquadgk(x -> f.f(x, p), segs; kws...)
end
function call_contquadgk(f::CommonSolveIntegralFunction, p, segs, cacheval; kws...)
integrand = cacheval.integrand
contquadgk(x -> integrand(x, p), segs...; kws...)
end
"""
MeroQuadGKJL(; order = 7, norm = norm, rho = 1.0, rootmeth = IteratedIntegration.MeroQuadGK.NewtonDeflation())
A 1d pole subtraction quadrature scheme for scalar, complex-valued integrands that are
meromorphic. It defaults to regular `quadgk` behavior on the real axis, but if it finds
nearby roots of 1/f, in the sense of Bernstein ellipse for the standard segment `[-1,1]`
with semiaxes `cosh(rho)` and `sinh(rho)`, it attempts pole subtraction on that segment.
"""
struct MeroQuadGKJL{F,M} <: IntegralAlgorithm
order::Int
norm::F
rho::Float64
rootmeth::M
end
function MeroQuadGKJL(; order = 7, norm = norm, rho = 1.0, rootmeth = IteratedIntegration.MeroQuadGK.NewtonDeflation())
return MeroQuadGKJL(order, norm, rho, rootmeth)
end
function init_msegbuf(prototype, dom, alg::MeroQuadGKJL; kws...)
segs = PuncturedInterval(dom)
a, b = endpoints(segs)
x, s = (a + b)/2, (b-a)/2
convert(ComplexF64, prototype)
fx_s = one(ComplexF64) * s # ignore the actual integrand since it is written to CF64 array
err = alg.norm(fx_s)
return IteratedIntegration.alloc_segbuf(typeof(x), typeof(fx_s), typeof(err))
end
function init_cacheval(f::IntegralFunction, dom, p, alg::MeroQuadGKJL; kws...)
segs = PuncturedInterval(dom)
prototype = get_prototype(f, get_prototype(segs), p)
segbuf = init_msegbuf(prototype, dom, alg)
return (; segbuf)
end
function init_cacheval(f::CommonSolveIntegralFunction, dom, p, alg::MeroQuadGKJL; kws...)
segs = PuncturedInterval(dom)
cache, integrand, prototype = _init_commonsolvefunction(f, dom, p; x=get_prototype(segs))
segbuf = init_msegbuf(prototype, dom, alg)
return (; segbuf, cache, integrand)
end
function do_integral(f, dom, p, alg::MeroQuadGKJL, cacheval;
reltol = nothing, abstol = nothing, maxiters = typemax(Int))
value, err = call_meroquadgk(f, p, dom, cacheval; maxevals = maxiters, rho = alg.rho, rootmeth = alg.rootmeth,
rtol = reltol, atol = abstol, order = alg.order, norm = alg.norm, segbuf=cacheval.segbuf)
retcode = err < max(something(abstol, zero(err)), alg.norm(value)*something(reltol, isnothing(abstol) ? sqrt(eps(one(eltype(dom)))) : 0)) ? Success : Failure
stats = (; error=err)
return IntegralSolution(value, retcode, stats)
end
function call_meroquadgk(f::IntegralFunction, p, segs, cacheval; kws...)
meroquadgk(x -> f.f(x, p), segs; kws...)
end
function call_meroquadgk(f::CommonSolveIntegralFunction, p, segs, cacheval; kws...)
integrand = cacheval.integrand
meroquadgk(x -> integrand(x, p), segs...; kws...)
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 7369 | """
NestedQuad(alg::IntegralAlgorithm)
NestedQuad(algs::IntegralAlgorithm...)
Nested integration by repeating one quadrature algorithm or composing a list of algorithms.
The domain of integration must be an `AbstractIteratedLimits` from the
IteratedIntegration.jl package. Analogous to `nested_quad` from IteratedIntegration.jl.
The integrand should expect `SVector` inputs. Do not use this for very high-dimensional
integrals, since the compilation time scales very poorly with respect to dimensionality.
In order to improve the compilation time, FunctionWrappers.jl is used to enforce type
stability of the integrand, so you should always pick the widest integration limit type so
that inference works properly. For example, if [`ContQuadGKJL`](@ref) is used as an
algorithm in the nested scheme, then the limits of integration should be made complex.
"""
struct NestedQuad{T,S} <: IntegralAlgorithm
algs::T
specialize::S
NestedQuad(alg::IntegralAlgorithm, specialize::AbstractSpecialization=FunctionWrapperSpecialize()) = new{typeof(alg),typeof(specialize)}(alg, specialize)
NestedQuad(algs::Tuple{Vararg{IntegralAlgorithm}}, specialize::Tuple{Vararg{AbstractSpecialization}}=ntuple(_->FunctionWrapperSpecialize(), length(algs))) = new{typeof(algs),typeof(specialize)}(algs, specialize)
end
NestedQuad(algs::IntegralAlgorithm...) = NestedQuad(algs)
# TODO add a parallelization option for use when it is safe to do so
function _update!(cache, x, (; p, lims_state))
segs, lims, state = limit_iterate(lims_state..., x)
len = segs[end] - segs[begin]
kws = cache.kwargs
cache.p = p
cache.cacheval.dom = segs
cache.cacheval.kwargs = haskey(kws, :abstol) ? merge(kws, (abstol=kws.abstol/len,)) : kws
cache.cacheval.p = (; cache.cacheval.p..., lims_state=(lims, state))
return
end
_postsolve(sol, x, p) = sol.value
function init_cacheval(f, nextdom, p, alg::NestedQuad; kws...)
x0, (segs, lims, state) = if nextdom isa AbstractIteratedLimits
interior_point(nextdom), limit_iterate(nextdom)
else
nextdom
end
algs = alg.algs isa IntegralAlgorithm ? ntuple(i -> alg.algs, Val(ndims(lims))) : alg.algs
spec = alg.specialize isa AbstractSpecialization ? ntuple(i -> alg.specialize, Val(ndims(lims))) : alg.specialize
if ndims(lims) == 1
func, ws = inner_integralfunction(f, x0, p)
else
integrand, ws, update!, postsolve = outer_integralfunction(f, x0, p)
proto = get_prototype(integrand, x0, p)
a, b, = segs
x = (a+b)/2
next = (x0[begin:end-1], limit_iterate(lims, state, x))
kws = NamedTuple(kws)
len = segs[end] - segs[begin]
kwargs = haskey(kws, :abstol) ? merge(kws, (abstol=kws.abstol/len,)) : kws
subprob = IntegralProblem(integrand, next, p; kwargs...)
func = CommonSolveIntegralFunction(subprob, NestedQuad(algs[1:ndims(lims)-1], spec[1:ndims(lims)-1]), update!, postsolve, proto*x^(ndims(lims)-1), spec[ndims(lims)])
end
prob = IntegralProblem(func, segs, (; p, lims_state=(lims, state), ws); kws...)
return init(prob, algs[ndims(lims)])
# the order of updates is somewhat tricky. I think some could be simplified if instead
# we use an IntegralProblem modified to contain lims_state, instead of passing the
# parameter as well
end
function do_integral(f, dom, p, alg::NestedQuad, cacheval; kws...)
cacheval.p = (; cacheval.p..., p)
cacheval.kwargs = (; cacheval.kwargs..., kws...)
return solve!(cacheval)
end
function inner_integralfunction(f::IntegralFunction, x0, p)
proto = get_prototype(f, x0, p)
func = IntegralFunction(proto) do x, (; p, lims_state)
f.f(limit_iterate(lims_state..., x), p)
end
ws = nothing
return func, ws
end
function outer_integralfunction(f::IntegralFunction, x0, p)
proto = get_prototype(f, x0, p)
func = IntegralFunction(f.f, proto)
ws = nothing
return func, ws, _update!, _postsolve
end
#=
"""
AbsoluteEstimate(est_alg, abs_alg; kws...)
Most algorithms are efficient when using absolute error tolerances, but how do you know the
size of the integral? One option is to estimate it using second algorithm.
A multi-algorithm to estimate an integral using an `est_alg` to generate a rough estimate of
the integral that is combined with a user's relative tolerance to re-calculate the integral
to higher accuracy using the `abs_alg`. The keywords passed to the algorithm may include
`reltol`, `abstol` and `maxiters` and are given to the `est_alg` solver. They should limit
the amount of work of `est_alg` so as to only generate an order-of-magnitude estimate of the
integral. The tolerances passed to `abs_alg` are `abstol=max(abstol,reltol*norm(I))` and
`reltol=0`.
"""
struct AbsoluteEstimate{E<:IntegralAlgorithm,A<:IntegralAlgorithm,F,K<:NamedTuple} <: IntegralAlgorithm
est_alg::E
abs_alg::A
norm::F
kws::K
end
function AbsoluteEstimate(est_alg, abs_alg; norm=norm, kwargs...)
kws = NamedTuple(kwargs)
checkkwargs(kws)
return AbsoluteEstimate(est_alg, abs_alg, norm, kws)
end
function init_cacheval(f, dom, p, alg::AbsoluteEstimate)
return (est=init_cacheval(f, dom, p, alg.est_alg),
abs=init_cacheval(f, dom, p, alg.abs_alg))
end
function do_solve(f, dom, p, alg::AbsoluteEstimate, cacheval;
abstol=nothing, reltol=nothing, maxiters=typemax(Int))
sol = do_solve(f, dom, p, alg.est_alg, cacheval.est; alg.kws...)
val = alg.norm(sol.u) # has same units as sol
rtol = reltol === nothing ? sqrt(eps(one(val))) : reltol # use the precision of the solution to set the default relative tolerance
atol = max(abstol === nothing ? zero(val) : abstol, rtol*val)
return do_solve(f, dom, p, alg.abs_alg, cacheval.abs;
abstol=atol, reltol=zero(rtol), maxiters=maxiters)
end
"""
EvalCounter(::IntegralAlgorithm)
An algorithm which counts the evaluations used by another algorithm.
The count is stored in the `sol.numevals` field.
"""
struct EvalCounter{T<:IntegralAlgorithm} <: IntegralAlgorithm
alg::T
end
function init_cacheval(f, dom, p, alg::EvalCounter)
return init_cacheval(f, dom, p, alg.alg)
end
function do_solve(f, dom, p, alg::EvalCounter, cacheval; kws...)
if f isa InplaceIntegrand
ni::Int = 0
gi = (y, x, p) -> (ni += 1; f.f!(y, x, p))
soli = do_solve(InplaceIntegrand(gi, f.I), dom, p, alg.alg, cacheval; kws...)
return IntegralSolution(soli.u, soli.resid, soli.retcode, ni)
elseif f isa BatchIntegrand
nb::Int = 0
gb = (y, x, p) -> (nb += length(x); f.f!(y, x, p))
solb = do_solve(BatchIntegrand(gb, f.y, f.x, max_batch=f.max_batch), dom, p, alg.alg, cacheval; kws...)
return IntegralSolution(solb.u, solb.resid, solb.retcode, nb)
elseif f isa NestedBatchIntegrand
# TODO allocate a bunch of accumulators associated with the leaves of the nested
# integrand or rewrap the algorithms in NestedQuad
error("NestedBatchIntegrand not yet supported with EvalCounter")
else
n::Int = 0
g = (x, p) -> (n += 1; f(x, p)) # we need let to prevent Core.Box around the captured variable
sol = do_solve(g, dom, p, alg.alg, cacheval; kws...)
return IntegralSolution(sol.u, sol.resid, sol.retcode, n)
end
end
=#
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 20266 | # utilities
function lattice_bz_limits(B::AbstractMatrix)
T = SVector{checksquare(B),typeof(one(eltype(B)))}
CubicLimits(zero(T), ones(T)) # unitless canonical bz
end
function check_bases_canonical(A::AbstractMatrix, B::AbstractMatrix, atol)
norm(A'B - 2pi*I) < atol || throw("Real and reciprocal Bravais lattice bases non-orthogonal to tolerance $atol")
end
canonical_reciprocal_basis(A::AbstractMatrix) = A' \ (pi*(one(A)+one(A)))
canonical_ptr_basis(B) = Basis(one(B))
# main data type
"""
SymmetricBZ(A, B, lims::AbstractIteratedLimits, syms; atol=sqrt(eps()))
Data type representing a Brillouin zone reduced by a set of symmetries, `syms`
with iterated integration limits `lims`, both of which are assumed to be in the
lattice basis (since the Fourier series is). `A` and `B` should be
identically-sized square matrices containing the real and reciprocal basis
vectors in their columns.
!!! note "Convention"
This type assumes all integration limit data is in the reciprocal lattice
basis with fractional coordinates, where the FBZ is just the hypercube
spanned by the vertices (0,…,0) & (1,…,1). If necessary, use `A` or `B` to
rotate these quantities into the convention.
`lims` should be limits compatible with
[IteratedIntegration.jl](https://github.com/lxvm/IteratedIntegration.jl).
`syms` should be an iterable collection of point group symmetries compatible
with [AutoSymPTR.jl](https://github.com/lxvm/AutoSymPTR.jl).
"""
struct SymmetricBZ{S,L,d,TA,TB,d2}
A::SMatrix{d,d,TA,d2}
B::SMatrix{d,d,TB,d2}
lims::L
syms::S
function SymmetricBZ(A::MA, B::MB, lims::L, syms::S) where {d,TA,TB,d2,MA<:SMatrix{d,d,TA,d2},MB<:SMatrix{d,d,TB,d2},L,S}
return new{S,L,d,TA,TB,d2}(A, B, lims, syms)
end
end
nsyms(bz::SymmetricBZ) = length(bz.syms)
const FullBZ = SymmetricBZ{Nothing}
nsyms(::FullBZ) = 1
Base.summary(bz::SymmetricBZ) = string(checksquare(bz.A), "-dimensional Brillouin zone with ", bz isa FullBZ ? "trivial" : nsyms(bz), " symmetries")
Base.show(io::IO, bz::SymmetricBZ) = print(io, summary(bz))
Base.ndims(::SymmetricBZ{S,L,d}) where {S,L,d} = d
Base.eltype(::Type{<:SymmetricBZ{S,L,d,TA,TB}}) where {S,L,d,TA,TB} = TB
get_prototype(bz::SymmetricBZ) = interior_point(bz.lims)
# Define traits for symmetrization based on symmetry representations
"""
AbstractSymRep
Abstract supertype of symmetry representation traits.
"""
abstract type AbstractSymRep end
"""
UnknownRep()
Fallback symmetry representation for array types without a user-defined `SymRep`.
Will perform FBZ integration regardless of available BZ symmetries.
"""
struct UnknownRep <: AbstractSymRep end
"""
TrivialRep()
Symmetry representation of objects with trivial transformation under the group.
"""
struct TrivialRep <: AbstractSymRep end
"""
symmetrize(rep::AbstractSymRep, ::SymmetricBZ, x)
Transform `x` by the representation of the symmetries of the point group used to reduce the
domain, thus mapping the value of `x` on to the full Brillouin zone.
"""
symmetrize(rep, bz::SymmetricBZ, x) = symmetrize_(rep, bz, x)
symmetrize(_, ::FullBZ, x) = x
symmetrize_(rep, bz, x) = symmetrize__(rep, bz, x)
symmetrize_(rep, bz, x::AuxValue) = AuxValue(symmetrize__(rep, bz, x.val), symmetrize__(rep, bz, x.aux))
symmetrize__(::TrivialRep, bz, x) = nsyms(bz)*x
symmetrize__(::UnknownRep, bz, x) = error("unknown representation cannot be symmetrized")
struct SymmetricRule{R,U,B}
rule::R
rep::U
bz::B
end
Base.getindex(r::SymmetricRule, i) = getindex(r.rule, i)
Base.eltype(::Type{SymmetricRule{R,U,B}}) where {R,U,B} = eltype(R)
Base.length(r::SymmetricRule) = length(r.rule)
Base.iterate(r::SymmetricRule, args...) = iterate(r.rule, args...)
function (r::SymmetricRule)(f::F, args...) where {F}
out = r.rule(f, args...)
val = symmetrize(r.rep, r.bz, out)
return val
end
struct SymmetricRuleDef{R,U,B}
rule::R
rep::U
bz::B
end
AutoSymPTR.nsyms(r::SymmetricRuleDef) = AutoSymPTR.nsyms(r.r)
function (r::SymmetricRuleDef)(::Type{T}, v::Val{d}) where {T,d}
return SymmetricRule(r.rule(T, v), r.rep, r.bz)
end
function AutoSymPTR.nextrule(r::SymmetricRule, ruledef::SymmetricRuleDef)
return SymmetricRule(AutoSymPTR.nextrule(r.rule, ruledef.rule), ruledef.rep, ruledef.bz)
end
# Here we provide utilities to build BZs
"""
AbstractBZ{d}
Abstract supertype for all Brillouin zone data types parametrized by dimension.
"""
abstract type AbstractBZ{d} end
"""
load_bz(bz::AbstractBZ, [T::Type=Float64])
load_bz(bz::AbstractBZ, A::AbstractMatrix, [B::AbstractMatrix])
Interface to loading Brillouin zones.
## Arguments
- `bz::AbstractBZ`: a kind of Brillouin zone to construct, e.g. [`FBZ`](@ref) or
[`IBZ`](@ref)
- `T::Type`: a numeric type to set the precision of the domain (default: `Float64`)
- `A::AbstractMatrix`: a ``d \\times d`` matrix whose columns are the real-space lattice
vectors of a ``d``-dimensional crystal
- `B::AbstractMatrix`: a ``d \\times d`` matrix whose columns are the reciprocal-space
lattice vectors of a ``d``-dimensional Brillouin zone (default: `A' \\ 2πI`)
!!! note "Assumptions"
`AutoBZCore` assumes that all calculations occur in the reciprocal
lattice basis, since that is the basis in which Wannier interpolants are most
efficiently described. See [`SymmetricBZ`](@ref) for details. We also assume that the
integrands are cheap to evaluate, which is why we provide adaptive methods in the first
place, so that return types can be determined at runtime (and mechanisms are in place
for compile time as well)
"""
function load_bz end
function load_bz(bz::AbstractBZ{N}, A::AbstractMatrix{T}, B::AbstractMatrix{S}=canonical_reciprocal_basis(A); atol=nothing) where {N,T,S}
(d = checksquare(A)) == checksquare(B) ||
throw(DimensionMismatch("Bravais lattices $A and $B must have the same shape"))
bz_ = if N isa Integer
@assert d == N
bz
else
convert(AbstractBZ{d}, bz)
end
check_bases_canonical(A, B, something(atol, sqrt(eps(oneunit(T)*oneunit(S)))))
MA = SMatrix{d,d,T,d^2}; MB = SMatrix{d,d,S,d^2}
return load_bz(bz_, convert(MA, A), convert(MB, B))
end
function load_bz(bz::AbstractBZ{d}, ::Type{T}=Float64) where {d,T}
d isa Integer || throw(ArgumentError("BZ dimension must be integer"))
A = oneunit(SMatrix{d,d,T,d^2})
return load_bz(bz, A)
end
"""
FBZ{N} <: AbstractBZ
Singleton type representing first/full Brillouin zones of `N` dimensions.
By default, `N` is nothing and the dimension is obtained from input files.
"""
struct FBZ{N} <: AbstractBZ{N} end
FBZ(n=nothing) = FBZ{n}()
Base.convert(::Type{AbstractBZ{d}}, ::FBZ) where {d} = FBZ{d}()
function load_bz(::FBZ{N}, A::SMatrix{N,N}, B::SMatrix{N,N}) where {N}
lims = lattice_bz_limits(B)
return SymmetricBZ(A, B, lims, nothing)
end
"""
IBZ <: AbstractBZ
Singleton type representing irreducible Brillouin zones. Load
[SymmetryReduceBZ.jl](https://github.com/jerjorg/SymmetryReduceBZ.jl) to use this.
"""
struct IBZ{d,P} <: AbstractBZ{d} end
struct DefaultPolyhedron end
IBZ(n=nothing,) = IBZ{n,DefaultPolyhedron}()
Base.convert(::Type{AbstractBZ{d}}, ::IBZ{N,P}) where {d,N,P} = IBZ{d,P}()
"""
load_bz(::IBZ, A, B, species::AbstractVector, positions::AbstractMatrix; kws...)
`species` must have distinct labels for each atom type (e.g. can be any string or integer)
and `positions` must be a matrix whose columns give the coordinates of the atom of the
corresponding species.
"""
function load_bz(bz::IBZ, A, B, species, positions; kws...)
ext = Base.get_extension(@__MODULE__(), :SymmetryReduceBZExt)
if ext !== nothing
return ext.load_ibz(bz, A, B, species, positions; kws...)
else
error("SymmetryReduceBZ extension not loaded")
end
end
function load_bz(bz::IBZ{N}, A::SMatrix{N,N}, B::SMatrix{N,N}) where {N}
return load_bz(bz, A, B, nothing, nothing)
end
checkorthog(A::AbstractMatrix) = isdiag(transpose(A)*A)
sign_flip_tuples(n::Val{d}) where {d} = Iterators.product(ntuple(_ -> (1,-1), n)...)
sign_flip_matrices(n::Val{d}) where {d} = (Diagonal(SVector{d,Int}(A)) for A in sign_flip_tuples(n))
n_sign_flips(d::Integer) = 2^d
"""
InversionSymIBZ{N} <: AbstractBZ
Singleton type representing Brillouin zones with full inversion symmetry
!!! warning "Assumptions"
Only expect this to work for systems with orthogonal lattice vectors
"""
struct InversionSymIBZ{N} <: AbstractBZ{N} end
InversionSymIBZ(n=nothing) = InversionSymIBZ{n}()
Base.convert(::Type{AbstractBZ{d}}, ::InversionSymIBZ) where {d} = InversionSymIBZ{d}()
function load_bz(::InversionSymIBZ{N}, A::SMatrix{N,N}, B::SMatrix{N,N,TB}) where {N,TB}
checkorthog(A) || @warn "Non-orthogonal lattice vectors detected with InversionSymIBZ. Unexpected behavior may occur"
t = one(TB); V = SVector{N,typeof(t)}
lims = CubicLimits(zero(V), fill(1//2, V))
syms = map(S -> t*S, sign_flip_matrices(Val(N)))
return SymmetricBZ(A, B, lims, syms)
end
function permutation_matrices(t::Val{n}) where {n}
permutations = permutation_tuples(ntuple(identity, t))
(sacollect(SMatrix{n,n,Int,n^2}, ifelse(j == p[i], 1, 0) for i in 1:n, j in 1:n) for p in permutations)
end
permutation_tuples(C::NTuple{N}) where {N} = @inbounds((C[i], p...)::typeof(C) for i in eachindex(C) for p in permutation_tuples(C[[j for j in eachindex(C) if j != i]]))
permutation_tuples(C::NTuple{1}) = C
n_permutations(n::Integer) = factorial(n)
"""
cube_automorphisms(::Val{d}) where d
return a generator of the symmetries of the cube in `d` dimensions including the
identity.
"""
cube_automorphisms(n::Val{d}) where {d} = (S*P for S in sign_flip_matrices(n), P in permutation_matrices(n))
n_cube_automorphisms(d) = n_sign_flips(d) * n_permutations(d)
"""
CubicSymIBZ{N} <: AbstractBZ
Singleton type representing Brillouin zones with full cubic symmetry
!!! warning "Assumptions"
Only expect this to work for systems with orthogonal lattice vectors
"""
struct CubicSymIBZ{N} <: AbstractBZ{N} end
CubicSymIBZ(n=nothing) = CubicSymIBZ{n}()
Base.convert(::Type{AbstractBZ{d}}, ::CubicSymIBZ) where {d} = CubicSymIBZ{d}()
function load_bz(::CubicSymIBZ{N}, A::SMatrix{N,N}, B::SMatrix{N,N,TB}) where {N,TB}
checkorthog(A) || @warn "Non-orthogonal lattice vectors detected with CubicSymIBZ. Unexpected behavior may occur"
t = one(TB)
lims = TetrahedralLimits(fill(1//2, SVector{N,typeof(t)}))
syms = map(S -> t*S, cube_automorphisms(Val{N}()))
return SymmetricBZ(A, B, lims, syms)
end
# Now we provide the BZ integration algorithms effectively as aliases to the libraries
"""
AutoBZAlgorithm
Abstract supertype for Brillouin zone integration algorithms.
All integration problems on the BZ get rescaled to fractional coordinates so that the
Brillouin zone becomes `[0,1]^d`, and integrands should have this periodicity. If the
integrand depends on the Brillouin zone basis, then it may have to be transformed to the
Cartesian coordinates as a post-processing step.
These algorithms also use the symmetries of the Brillouin zone and the integrand.
"""
abstract type AutoBZAlgorithm <: IntegralAlgorithm end
"""
AutoBZProblem([rep], f, bz, [p]; kwargs...)
Construct a BZ integration problem.
## Arguments
- `rep::AbstractSymRep`: The symmetry representation of `f` (default: `UnknownRep()`)
- `f::AbstractIntegralFunction`: The integrand
- `bz::SymmetricBZ`: The Brillouin zone to integrate over
- `p`: parameters for the integrand (default: `NullParameters()`)
## Keywords
Additional keywords are passed directly to the solver
"""
struct AutoBZProblem{R<:AbstractSymRep,F<:AbstractIntegralFunction,BZ<:SymmetricBZ,P,K<:NamedTuple}
rep::R
f::F
bz::BZ
p::P
kwargs::K
end
const WARN_UNKNOWN_SYMMETRY = """
A symmetric BZ was used with an integrand whose symmetry representation is unknown.
For correctness, the calculation will proceed on the full BZ, i.e. without symmetry.
To integrate with symmetry, define an AbstractSymRep for your integrand.
"""
function AutoBZProblem(rep::AbstractSymRep, f::AbstractIntegralFunction, bz::SymmetricBZ, p=NullParameters(); kws...)
proto = get_prototype(f, get_prototype(bz), p)
if rep isa UnknownRep && !(bz isa FullBZ)
@warn WARN_UNKNOWN_SYMMETRY
fbz = SymmetricBZ(bz.A, bz.B, lattice_bz_limits(bz.B), nothing)
return AutoBZProblem(rep, f, fbz, p, NamedTuple(kws))
else
return AutoBZProblem(rep, f, bz, p, NamedTuple(kws))
end
end
function AutoBZProblem(f::AbstractIntegralFunction, bz::SymmetricBZ, p=NullParameters(); kws...)
return AutoBZProblem(UnknownRep(), f, bz, p; kws...)
end
function AutoBZProblem(f, bz::SymmetricBZ, p=NullParameters(); kws...)
return AutoBZProblem(IntegralFunction(f), bz, p; kws...)
end
mutable struct AutoBZCache{R,F,BZ,P,A,C,K}
rep::R
f::F
bz::BZ
p::P
alg::A
cacheval::C
kwargs::K
end
function init(prob::AutoBZProblem, alg::AutoBZAlgorithm; kwargs...)
rep = prob.rep; f = prob.f; bz = prob.bz; p = prob.p
kws = (; prob.kwargs..., kwargs...)
checkkwargs(kws)
cacheval = init_cacheval(rep, f, bz, p, alg; kws...)
return AutoBZCache(rep, f, bz, p, alg, cacheval, kws)
end
"""
solve(::AutoBZProblem, ::AutoBZAlgorithm; kws...)::IntegralSolution
"""
solve(prob::AutoBZProblem, alg::AutoBZAlgorithm; kwargs...)
"""
solve!(::IntegralCache)::IntegralSolution
Compute the solution to an [`IntegralProblem`](@ref) constructed from [`init`](@ref).
"""
function solve!(c::AutoBZCache)
return do_solve_autobz(c.rep, c.f, c.bz, c.p, c.alg, c.cacheval; c.kwargs...)
end
function init_cacheval(rep, f, bz::SymmetricBZ, p, bzalg::AutoBZAlgorithm; kws...)
prob, alg = bz_to_standard(f, bz, p, bzalg; kws...)
return init(prob, alg)
end
function do_solve_autobz(rep, f, bz, p, bzalg::AutoBZAlgorithm, cacheval; _kws...)
j = abs(det(bz.B)) # rescale tolerance to (I)BZ coordinate and get the right number of digits
kws = NamedTuple(_kws)
cacheval.f = f
cacheval.p = p
cacheval.kwargs = haskey(kws, :abstol) ? merge(kws, (abstol=kws.abstol / (j * nsyms(bz)),)) : kws
sol = solve!(cacheval)
value = j*symmetrize(rep, bz, sol.value)
stats = (; sol.stats...)
# err = sol.resid === nothing ? nothing : j*symmetrize(f, bz_, sol.resid)
return IntegralSolution(value, sol.retcode, stats)
end
# AutoBZAlgorithms must implement:
# - bz_to_standard: (transformed) bz, unitless domain, standard algorithm
"""
IAI(alg::IntegralAlgorithm=AuxQuadGKJL())
IAI(algs::IntegralAlgorithm...)
Iterated-adaptive integration using `nested_quad` from
[IteratedIntegration.jl](https://github.com/lxvm/IteratedIntegration.jl).
**This algorithm is the most efficient for localized integrands**.
"""
struct IAI{T,S} <: AutoBZAlgorithm
algs::T
specialize::S
IAI(alg::IntegralAlgorithm=AuxQuadGKJL(), specialize::AbstractSpecialization=FunctionWrapperSpecialize()) = new{typeof(alg),typeof(specialize)}(alg, specialize)
IAI(algs::Tuple{Vararg{IntegralAlgorithm}}, specialize::Tuple{Vararg{AbstractSpecialization}}=ntuple(_->FunctionWrapperSpecialize(),length(algs))) = new{typeof(algs),typeof(specialize)}(algs, specialize)
end
IAI(algs::IntegralAlgorithm...) = IAI(algs)
function bz_to_standard(f, bz, p, bzalg::IAI; kws...)
return IntegralProblem(f, bz.lims, p; kws...), NestedQuad(bzalg.algs, bzalg.specialize)
end
"""
PTR(; npt=50, nthreads=1)
Periodic trapezoidal rule with a fixed number of k-points per dimension, `npt`,
using the routine `ptr` from [AutoSymPTR.jl](https://github.com/lxvm/AutoSymPTR.jl).
**The caller should check that the integral is converged w.r.t. `npt`**.
"""
struct PTR <: AutoBZAlgorithm
npt::Int
nthreads::Int
end
PTR(; npt=50, nthreads=1) = PTR(npt, nthreads)
function bz_to_standard(f, bz, p, alg::PTR; kws...)
return IntegralProblem(f, canonical_ptr_basis(bz.B), p; kws...), MonkhorstPack(npt=alg.npt, syms=bz.syms, nthreads=alg.nthreads)
end
"""
AutoPTR(; norm=norm, a=1.0, nmin=50, nmax=1000, n₀=6, Δn=log(10), keepmost=2, nthreads=1)
Periodic trapezoidal rule with automatic convergence to tolerances passed to the
solver with respect to `norm` using the routine `autosymptr` from
[AutoSymPTR.jl](https://github.com/lxvm/AutoSymPTR.jl).
**This algorithm is the most efficient for smooth integrands**.
"""
struct AutoPTR{F} <: AutoBZAlgorithm
norm::F
a::Float64
nmin::Int
nmax::Int
n₀::Float64
Δn::Float64
keepmost::Int
nthreads::Int
end
function AutoPTR(; norm=norm, a=1.0, nmin=50, nmax=1000, n₀=6.0, Δn=log(10), keepmost=2, nthreads=1)
return AutoPTR(norm, a, nmin, nmax, n₀, Δn, keepmost, nthreads)
end
struct RepBZ{R,B}
rep::R
bz::B
end
Base.ndims(dom::RepBZ) = ndims(dom.bz)
Base.eltype(::Type{RepBZ{R,B}}) where {R,B} = eltype(B)
get_prototype(dom::RepBZ) = get_prototype(dom.bz)
function init_cacheval(rep, f, bz::SymmetricBZ, p, bzalg::AutoPTR; kws...)
prob = IntegralProblem(f, RepBZ(rep, bz), p; kws...)
alg = AutoSymPTRJL(norm=bzalg.norm, a=bzalg.a, nmin=bzalg.nmin, nmax=bzalg.nmax, n₀=bzalg.n₀, Δn=bzalg.Δn, keepmost=bzalg.keepmost, syms=bz.syms, nthreads=bzalg.nthreads)
return init(prob, alg)
end
get_basis(dom::RepBZ) = canonical_ptr_basis(dom.bz.B)
function init_rule(dom::RepBZ, alg::AutoSymPTRJL)
B = get_basis(dom)
rule = init_rule(B, alg)
return SymmetricRuleDef(rule, dom.rep, dom.bz)
end
# The spectral convergence of the PTR for integrands with non-trivial symmetry action
# requires symmetrizing inside the quadrature
function do_solve_autobz(rep, f, bz, p, bzalg::AutoPTR, cacheval; _kws...)
j = abs(det(bz.B)) # rescale tolerance to (I)BZ coordinate and get the right number of digits
kws = NamedTuple(_kws)
cacheval.f = f
cacheval.p = p
cacheval.kwargs = haskey(kws, :abstol) ? merge(kws, (abstol=kws.abstol / j,)) : kws
sol = solve!(cacheval)
value = j*sol.value
stats = (; sol.stats..., error=sol.stats.error*j)
return IntegralSolution(value, sol.retcode, stats)
end
"""
TAI(; norm=norm, initdivs=1)
Tree-adaptive integration using `hcubature` from
[HCubature.jl](https://github.com/JuliaMath/HCubature.jl). This routine is
limited to integration over hypercube domains and may not use all symmetries.
"""
struct TAI{N} <: AutoBZAlgorithm
norm::N
initdiv::Int
end
TAI(; norm=norm, initdiv=1) = TAI(norm, initdiv)
function bz_to_standard(f, bz, p, alg::TAI; kws...)
@assert bz.lims isa CubicLimits "TAI can only integrate rectangular regions"
return IntegralProblem(f, HyperCube(bz.lims.a, bz.lims.b), p; kws...), HCubatureJL(norm=alg.norm, initdiv = alg.initdiv)
end
#=
"""
PTR_IAI(; ptr=PTR(), iai=IAI())
Multi-algorithm that returns an `IAI` calculation with an `abstol` determined
from the given `reltol` and a `PTR` estimate, `I`, as `reltol*norm(I)`.
This addresses the issue that `IAI` does not currently use a globally-adaptive
algorithm and may not have the expected scaling with localization length unless
an `abstol` is used since computational effort may be wasted via a `reltol` with
the naive `nested_quadgk`.
"""
PTR_IAI(; ptr=PTR(), iai=IAI(), kws...) = AbsoluteEstimate(ptr, iai; kws...)
"""
AutoPTR_IAI(; reltol=1.0, ptr=AutoPTR(), iai=IAI())
Multi-algorithm that returns an `IAI` calculation with an `abstol` determined
from an `AutoPTR` estimate, `I`, computed to `reltol` precision, and the `rtol`
given to the solver as `abstol=rtol*norm(I)`.
This addresses the issue that `IAI` does not currently use a globally-adaptive
algorithm and may not have the expected scaling with localization length unless
an `abstol` is used since computational effort may be wasted via a `reltol` with
the naive `nested_quadgk`.
"""
AutoPTR_IAI(; reltol=1.0, ptr=AutoPTR(), iai=IAI(), kws...) = AbsoluteEstimate(ptr, iai; reltol=reltol, kws...)
function count_bz_to_standard(bz, alg)
_bz, dom, _alg = bz_to_standard(bz, alg)
return _bz, dom, EvalCounter(_alg)
end
function do_solve(f, bz::SymmetricBZ, p, alg::EvalCounter{<:AutoBZAlgorithm}, cacheval; kws...)
return do_solve_autobz(count_bz_to_standard, f, bz, p, alg.alg, cacheval; kws...)
end
=#
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 1771 | endpoints(dom) = (first(dom), last(dom))
breakpoints(dom) = dom[begin+1:end-1] # or Iterators.drop(Iterators.take(dom, length(dom)-1), 1)
segments(dom) = dom
function get_prototype(dom)
a, b, = dom
return (a+b)/2
end
function get_prototype(B::Basis)
return B * zero(SVector{ndims(B),float(eltype(B))})
end
get_basis(B::Basis) = B
get_basis(B::AbstractMatrix) = Basis(B)
"""
PuncturedInterval(s)
Represent an interval `(a, b)` with interior points deleted by `s = (a, c1, ..., cN, b)`, so
that the integration algorithm can avoid the points `c1, ..., cN` for e.g. discontinuities.
`s` must be a tuple or vector.
"""
struct PuncturedInterval{T,S}
s::S
PuncturedInterval(s::S) where {N,S<:NTuple{N}} = new{eltype(s),S}(s)
PuncturedInterval(s::S) where {T,S<:AbstractVector{T}} = new{T,S}(s)
end
PuncturedInterval(s::PuncturedInterval) = s
Base.eltype(::Type{PuncturedInterval{T,S}}) where {T,S} = T
segments(p::PuncturedInterval) = p.s
endpoints(p::PuncturedInterval) = (p.s[begin], p.s[end])
function get_prototype(p::PuncturedInterval)
a, b, = segments(p)
return (a + b)/2
end
"""
HyperCube(a, b)
Represents a hypercube spanned by the vertices `a, b`, which must be iterables of the same length.
"""
struct HyperCube{d,T}
a::SVector{d,T}
b::SVector{d,T}
end
function HyperCube(a::NTuple{d}, b::NTuple{d}) where {d}
F = promote_type(eltype(a), eltype(b))
return HyperCube{d,F}(SVector{d,F}(a), SVector{d,F}(b))
end
HyperCube(a, b) = HyperCube(promote(a...), promote(b...))
Base.eltype(::Type{HyperCube{d,T}}) where {d,T} = T
endpoints(c::HyperCube) = (c.a, c.b)
function get_prototype(p::HyperCube)
a, b = endpoints(p)
return (a + b)/2
end
get_prototype(l::AbstractIteratedLimits) = interior_point(l)
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 713 | # BCD
# - number of k points
# - H_R or DFT data
# LTM
# - number of k points
# - any function or H_R
"""
GGR(; npt=50)
Generalized Gilat-Raubenheimer method as in ["Generalized Gilat–Raubenheimer method for
density-of-states calculation in photonic
crystals"](https://doi.org/10.1088/2040-8986/aaae52).
This method requires the Hamiltonian and its derivatives, and performs a linear
extrapolation at each k-point in an equispace grid. The algorithm is expected to show
second-order convergence and suffer reduced error at band crossings compared to
interpolatory methods.
## Arguments
- `npt`: the number of k-points per dimension
"""
struct GGR <: DOSAlgorithm
npt::Int
end
GGR(; npt=50) = GGR(npt)
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 3869 | function init_cacheval(h, domain, p, alg::GGR)
h isa FourierSeries || throw(ArgumentError("GGR currently supports Fourier series Hamiltonians"))
p isa SymmetricBZ || throw(ArgumentError("GGR supports BZ parameters from load_bz"))
bz = p
j = JacobianSeries(h)
w = workspace_allocate(j, period(j))
kalg = MonkhorstPack(npt=alg.npt, syms=bz.syms)
dom = canonical_ptr_basis(bz.B)
rule = init_fourier_rule(w, dom, kalg)
return get_ggr_data(rule, period(j))
end
function get_ggr_data(rule, t)
next = iterate(rule)
isnothing(next) && throw(ArgumentError("GGR - no data in rule"))
(w0, x0), state = next
h0, V0 = x0.s
e0, U0 = eigen(Hermitian(h0))
v0 = map(*, map(real∘diag, map(v -> U0'*v*U0, V0)), t) # multiply by period to get standardized velocities
n = 1
energies = Vector{typeof(e0)}(undef, length(rule))
velocities = Vector{typeof(v0)}(undef, length(rule))
weights = Vector{typeof(w0)}(undef, length(rule))
energies[n] = e0
velocities[n] = v0
weights[n] = w0
n += 1
next = iterate(rule, state)
while !isnothing(next)
(w, x), state = next
h, V = x.s
e, U = eigen(Hermitian(h))
v = map(*, map(real∘diag, map(v -> U'*v*U, V)), t)
energies[n] = e
velocities[n] = v
weights[n] = w
n += 1
next = iterate(rule, state)
end
return weights, energies, velocities
end
function dos_solve(h, domain, p, alg::GGR, cacheval;
abstol=nothing, reltol=nothing, maxiters=nothing)
domain isa Number || throw(ArgumentError("GGR supports domains of individual eigenvalues"))
p isa SymmetricBZ || throw(ArgumentError("GGR supports BZ parameters from load_bz"))
E = domain
bz = p
A = sum_ggr(ndims(bz.lims), alg.npt, E, cacheval...)
return DOSSolution(A, Success, (;))
end
function sum_ggr(ndim, npt, E, weights, energies, velocities)
@assert ndim == length(first(velocities))
b = 1/2npt
formula = (args...) -> ggr_formula(b, E, args...)
mapreduce(+, weights, energies, velocities) do w, es, vs
AutoSymPTR.mymul(w, mapreduce(formula, +, es, vs...))
end
end
ggr_formula(b, E, e) = throw(ArgumentError("GGR implemented for up to 3d BZ"))
ggr_formula(b, E, e, vs...) = ggr_formula(b, E, e)
# TODO: in higher dimensions, we can compute the area of the equi-frequency surface in a
# box with polyhedral manipulations, i.e.:
# - in plane coordinates, e+t*v, compute the convex hull in t due to intersection with E and
# bounds by sides of box
# - use any method to compute area of the convex hull in t, such as iterated integration.
function ggr_formula(b, E, e, v1)
v1 = abs(v1)
Δω = abs(E - e)
ω₁ = b * v1
return zero(E) <= Δω <= ω₁ ? 1/v1 : zero(1/v1)
end
function ggr_formula(b, E, e, v1, v2)
v2, v1 = extrema((abs(v1), abs(v2)))
Δω = abs(E - e)
ω₁ = b * abs(v1 - v2)
ω₃ = b * (v1 + v2)
# 2b is the line element
return zero(E) <= Δω <= ω₁ ? 2b/v1 :
ω₁ <= Δω <= ω₃ ? (b*(v1 + v2) - Δω)/(v1*v2) : zero(4b/v1)
end
function ggr_formula(b, E, e, v1, v2, v3)
v3, v2, v1 = sort(SVector(abs(v1), abs(v2), abs(v3)))
Δω = abs(E - e)
ω₁ = b * abs(v1 - v2 - v3)
ω₂ = b * (v1 - v2 + v3)
ω₃ = b * (v1 + v2 - v3)
ω₄ = b * (v1 + v2 + v3)
v = hypot(v1, v2, v3)
# 4b^2 is the area element
(v1 >= v2 + v3 && zero(E) <= Δω <= ω₁) ? 4b^2/v1 :
(v1 <= v2 + v3 && zero(E) <= Δω <= ω₁) ? (2b^2*(v1*v2 + v2*v3 + v3*v1) - (Δω^2 + (v*b)^2))/(v1*v2*v3) :
ω₁ <= Δω <= ω₂ ? (b^2*(v1*v2 + 3*v2*v3 + v3*v1) - b*Δω*(-v1 + v2 + v3) - (Δω^2 + (v*b)^2)/2)/(v1*v2*v3) :
ω₂ <= Δω <= ω₃ ? 2b*(b*(v1+v2) - Δω)/(v1*v2) : # this formula was incorrect in the Liu et al paper, correct in the Gilat paper
ω₃ <= Δω <= ω₄ ? (b*(v1+v2+v3) - Δω)^2/(2*v1*v2*v3) : zero(4b^2/v1)
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 3307 | """
DOSAlgorithm
Abstract supertype for algorithms for computing density of states
"""
abstract type DOSAlgorithm end
"""
DOSProblem(H, domain, [p=NullParameters()])
Define a problem for the density of states of a Hermitian or self-adjoint
operator depending on a parameter, H(p), on a given `domain` in its spectrum.
The mathematical definition we use is
```math
D(E) = \\sum_{k \\in p} \\sum_{\\lambda \\in \\text{spectrum}(H(k))} \\delta(E - \\lambda)
```
where ``E \\in \\text{domain}`` and ``\\delta`` is the Dirac Delta distribution.
## Arguments
- `H`: a linear operator depending on a parameter, H(p), that is finite
dimensional (e.g: tight binding model) or infinite dimensional (e.g. DFT data)
- `domain`: a set in the spectrum for which an approximation of the
density-of-states is desired. Can be a single point, in which case the
solution will return the estimated density of states at that eigenvalue, or an
interval, in which case the solution will return a function approximation to
the density of states on that interval in the spectrum that should be
understood as a distribution or measure.
- `p`: optional parameters on which `H` depends for which the density of states
should sum over. Can be discrete (e.g. for `H` a Hamiltonian with spin degrees
of freedom) or continuous (e.g. for `H` a Hamiltonian parameterized by crystal
momentum).
"""
struct DOSProblem{H,D,P,K<:NamedTuple}
H::H
domain::D
p::P
kwargs::K
end
function DOSProblem(H, domain, p=NullParameters(); kws...)
return DOSProblem(H, domain, p, NamedTuple(kws))
end
struct DOSSolution{V,S}
value::V
retcode::ReturnCode
stats::S
end
# store the data in a mutable cache so that the user can update the cache and
# compute the result again without setting up new problems.
mutable struct DOSCache{H,D,P,A,C,K}
H::H
domain::D
p::P
alg::A
cacheval::C
isfresh::Bool # true if H has been replaced/modified, otherwise false
kwargs::K
end
function Base.setproperty!(cache::DOSCache, name::Symbol, item)
if name === :H
setfield!(cache, :isfresh, true)
end
return setfield!(cache, name, item)
end
# by default, algorithms won't have anything in the cache
init_cacheval(h, dom, p, ::DOSAlgorithm) = nothing
# check same keywords as for integral problems: abstol, reltol, maxiters
checkkwargs_dos(kws) = checkkwargs(kws)
"""
init(::DOSProblem, ::DOSAlgorithm; kwargs...)::DOSCache
Create a cache of the data used by an algorithm to solve the given problem.
"""
function init(prob::DOSProblem, alg::DOSAlgorithm; kwargs...)
h = prob.H; dom = prob.domain; p = prob.p
kws = (; prob.kwargs..., kwargs...)
checkkwargs_dos(kws)
cacheval = init_cacheval(h, dom, p, alg)
return DOSCache(h, dom, p, alg, cacheval, false, kws)
end
"""
solve!(::DOSCache)::DOSSolution
Compute the solution of a problem from the initialized cache
"""
function solve!(c::DOSCache)
if c.isfresh
c.cacheval = init_cacheval(c.H, c.domain, c.p, c.alg)
c.isfresh = false
end
return dos_solve(c.H, c.domain, c.p, c.alg, c.cacheval; c.kwargs...)
end
function dos_solve end
"""
solve(::DOSProblem, ::DOSAlgorithm; kws...)::DOSSolution
"""
solve(prob::DOSProblem, alg::DOSAlgorithm; kwargs...)
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 23526 | # Here we provide optimizations of multidimensional Fourier series evaluation for the
# various algorithms. It could be a package extension, but we keep it in the main library
# because it provides the infrastructure of the main application of library
# In multiple dimensions, these specialized rules can provide a benefit over batch
# integrands since the multidimensional structure of the quadrature rule can be lost when
# batching many points together and passing them to an integrand to solve simultaneously. In
# some cases we can also cache the rule with series evaluations and apply it to different
# integrands, which again could only be achieved with a batched and vector-valued integrand.
# The ethos of this package is to let the user provide the kernel for the integrand and to
# have the library take care of the details of fast evaluation and such. Automating batched
# and vector-valued integrands is another worthwhile approach, but it is not well
# established in existing Julia libraries or Integrals.jl, so in the meantime I strive to
# provide these efficient rules for Wannier interpolation. In the long term, the batched and
# vector-valued approach will allow distributed computing and other benefits that are beyond
# the scope of what this package aims to provide.
# We use the pattern of allowing the user to pass a container with the integrand, Fourier
# series and workspace, and use dispatch to enable the optimizations
# the nested batched integrand is optional, but when included it allows for thread-safe
# parallelization
abstract type AbstractFourierIntegralFunction <: AbstractIntegralFunction end
"""
FourierIntegralFunction(f, s, [prototype=nothing]; alias=false)
## Arguments
- `f`: The integrand, accepting inputs `f(x, s(x), p)`
- `s::AbstractFourierSeries`: The Fourier series to evaluate
- `prototype`:
- `alias::Bool`: whether to `deepcopy` the series (false) or use the series as-is (true)
"""
struct FourierIntegralFunction{F,S,P} <: AbstractFourierIntegralFunction
f::F
s::S
prototype::P
alias::Bool
end
FourierIntegralFunction(f, s, p=nothing; alias=false) = FourierIntegralFunction(f, s, p, alias)
function get_prototype(f::FourierIntegralFunction, x, ws, p)
f.prototype === nothing ? f.f(x, ws(x), p) : f.prototype
end
get_prototype(f::FourierIntegralFunction, x, p) = get_prototype(f, x, f.s, p)
function get_fourierworkspace(f::AbstractFourierIntegralFunction)
f.s isa FourierWorkspace ? f.s : FourierSeriesEvaluators.workspace_allocate(f.alias ? f.s : deepcopy(f.s), FourierSeriesEvaluators.period(f.s))
end
# TODO implement FourierInplaceIntegrand FourierInplaceBatchIntegrand
"""
CommonSolveFourierIntegralFunction(prob, alg, update!, postsolve, s, [prototype, specialize]; alias=false, kws...)
Constructor for an integrand that solves a problem defined with the CommonSolve.jl
interface, `prob`, which is instantiated using `init(prob, alg; kws...)`. Helper functions
include: `update!(cache, x, s(x), p)` is called before
`solve!(cache)`, followed by `postsolve(sol, x, s(x), p)`, which should return the value of the solution.
The `prototype` argument can help control how much to `specialize` on the type of the
problem, which defaults to `FullSpecialize()` so that run times are improved. However
`FunctionWrapperSpecialize()` may help reduce compile times.
"""
struct CommonSolveFourierIntegralFunction{P,A,S,K,U,PS,T,M<:AbstractSpecialization} <: AbstractFourierIntegralFunction
prob::P
alg::A
s::S
kwargs::K
update!::U
postsolve::PS
prototype::T
specialize::M
alias::Bool
end
function CommonSolveFourierIntegralFunction(prob, alg, update!, postsolve, s, prototype=nothing, specialize=FullSpecialize(); alias=false, kws...)
return CommonSolveFourierIntegralFunction(prob, alg, s, NamedTuple(kws), update!, postsolve, prototype, specialize, alias)
end
function do_solve!(cache, f::CommonSolveFourierIntegralFunction, x, s, p)
f.update!(cache, x, s, p)
sol = solve!(cache)
return f.postsolve(sol, x, s, p)
end
function get_prototype(f::CommonSolveFourierIntegralFunction, x, ws, p)
if isnothing(f.prototype)
cache = init(f.prob, f.alg; f.kwargs...)
do_solve!(cache, f, x, ws(x), p)
else
f.prototype
end
end
get_prototype(f::CommonSolveFourierIntegralFunction, x, p) = get_prototype(f, x, f.s, p)
function init_specialized_fourierintegrand(cache, f, dom, p; x=get_prototype(dom), ws=f.s, s = ws(x), prototype=f.prototype)
proto = prototype === nothing ? do_solve!(cache, f, x, s, p) : prototype
func = (x, s, p) -> do_solve!(cache, f, x, s, p)
integrand = if f.specialize isa FullSpecialize
func
elseif f.specialize isa FunctionWrapperSpecialize
FunctionWrapper{typeof(prototype), typeof((x, s, p))}(func)
else
throw(ArgumentError("$(f.specialize) is not implemented"))
end
return integrand, proto
end
function _init_commonsolvefourierfunction(f, dom, p; kws...)
cache = init(f.prob, f.alg; f.kwargs...)
integrand, prototype = init_specialized_fourierintegrand(cache, f, dom, p; kws...)
return cache, integrand, prototype
end
# TODO implement CommonSolveFourierInplaceIntegrand CommonSolveFourierInplaceBatchIntegrand
# similar to workspace_allocate, but more type-stable because of loop unrolling and vector types
function workspace_allocate_vec(s::AbstractFourierSeries{N}, x::NTuple{N,Any}, len::NTuple{N,Integer}=ntuple(one,Val(N))) where {N}
# Only the top-level workspace has an AbstractFourierSeries in the series field
# In the lower level workspaces the series field has a cache that can be contract!-ed
# into a series
dim = Val(N)
if N == 1
c = FourierSeriesEvaluators.allocate(s, x[N], dim)
ws = Vector{typeof(c)}(undef, len[N])
ws[1] = c
for n in 2:len[N]
ws[n] = FourierSeriesEvaluators.allocate(s, x[N], dim)
end
else
c = FourierSeriesEvaluators.allocate(s, x[N], dim)
t = FourierSeriesEvaluators.contract!(c, s, x[N], dim)
c_ = FourierWorkspace(c, FourierSeriesEvaluators.workspace_allocate(t, x[1:N-1], len[1:N-1]).cache)
ws = Vector{typeof(c_)}(undef, len[N])
ws[1] = c_
for n in 2:len[N]
_c = FourierSeriesEvaluators.allocate(s, x[N], dim)
_t = FourierSeriesEvaluators.contract!(_c, s, x[N], dim)
ws[n] = FourierWorkspace(_c, FourierSeriesEvaluators.workspace_allocate(_t, x[1:N-1], len[1:N-1]).cache)
end
end
return FourierWorkspace(s, ws)
end
struct FourierValue{X,S}
x::X
s::S
end
@inline AutoSymPTR.mymul(w, x::FourierValue) = FourierValue(AutoSymPTR.mymul(w, x.x), x.s)
@inline AutoSymPTR.mymul(::AutoSymPTR.One, x::FourierValue) = x
function init_cacheval(f::FourierIntegralFunction, dom, p, alg::QuadGKJL; kws...)
segs = PuncturedInterval(dom)
ws = get_fourierworkspace(f)
prototype = get_prototype(f, get_prototype(segs), ws, p)
return init_segbuf(prototype, segs, alg), ws
end
function init_cacheval(f::CommonSolveFourierIntegralFunction, dom, p, alg::QuadGKJL; kws...)
segs = PuncturedInterval(dom)
x = get_prototype(segs)
ws = get_fourierworkspace(f)
cache, integrand, prototype = _init_commonsolvefourierfunction(f, dom, p; x, ws)
return init_segbuf(prototype, segs, alg), ws, cache, integrand
end
function call_quadgk(f::FourierIntegralFunction, p, u, usegs, cacheval; kws...)
segbuf, ws = cacheval
quadgk(x -> (ux =
u*x; f.f(ux, ws(ux), p)), usegs...; kws..., segbuf)
end
function call_quadgk(f::CommonSolveFourierIntegralFunction, p, u, usegs, cacheval; kws...)
segbuf, ws, _, integrand = cacheval
quadgk(x -> (ux = u*x; integrand(ux, ws(ux), p)), usegs...; kws..., segbuf)
end
function init_cacheval(f::FourierIntegralFunction, dom, p, ::HCubatureJL; kws...)
# TODO utilize hcubature_buffer
ws = get_fourierworkspace(f)
return ws
end
function init_cacheval(f::CommonSolveFourierIntegralFunction, dom, p, ::HCubatureJL; kws...)
# TODO utilize hcubature_buffer
ws = get_fourierworkspace(f)
cache, integrand, = _init_commonsolvefourierfunction(f, dom, p; ws)
return (; ws, cache, integrand)
end
function hcubature_integrand(f::FourierIntegralFunction, p, a, b, ws)
x -> f.f(x, ws(x), p)
end
function hcubature_integrand(f::CommonSolveFourierIntegralFunction, p, a, b, cacheval)
integrand = cacheval.integrand
ws = cacheval.ws
x -> integrand(x, ws(x), p)
end
function init_autosymptr_cache(f::FourierIntegralFunction, dom, p, bufsize; kws...)
ws = get_fourierworkspace(f)
return (; buffer=nothing, ws)
end
function init_autosymptr_cache(f::CommonSolveFourierIntegralFunction, dom, p, bufsize; kws...)
ws = get_fourierworkspace(f)
cache, integrand, = _init_commonsolvefourierfunction(f, dom, p; ws)
return (; buffer=nothing, ws, cache, integrand)
end
function autosymptr_integrand(f::FourierIntegralFunction, p, segs, cacheval)
ws = cacheval.ws
x -> x isa FourierValue ? f.f(x.x, x.s, p) : f.f(x, ws(x), p)
end
function autosymptr_integrand(f::CommonSolveFourierIntegralFunction, p, segs, cacheval)
integrand = cacheval.integrand
ws = cacheval.ws
return x -> x isa FourierValue ? integrand(x.x, x.s, p) : integrand(x, ws(x), p)
end
function init_cacheval(f::FourierIntegralFunction, dom, p, alg::AuxQuadGKJL; kws...)
segs = PuncturedInterval(dom)
ws = get_fourierworkspace(f)
prototype = get_prototype(f, get_prototype(segs), ws, p)
return init_segbuf(prototype, segs, alg), ws
end
function init_cacheval(f::CommonSolveFourierIntegralFunction, dom, p, alg::AuxQuadGKJL; kws...)
segs = PuncturedInterval(dom)
ws = get_fourierworkspace(f)
x = get_prototype(segs)
cache, integrand, prototype = _init_commonsolvefourierfunction(f, dom, p; x, ws)
return init_segbuf(prototype, segs, alg), ws, cache, integrand
end
function call_auxquadgk(f::FourierIntegralFunction, p, u, usegs, cacheval; kws...)
segbuf, ws = cacheval
auxquadgk(x -> (ux=u*x; f.f(ux, ws(ux), p)), usegs...; kws..., segbuf)
end
function call_auxquadgk(f::CommonSolveFourierIntegralFunction, p, u, usegs, cacheval; kws...)
# cache = cacheval[2] could call do_solve!(cache, f, x, p) to fully specialize
segbuf, ws, _, integrand = cacheval
auxquadgk(x -> (ux=u*x; integrand(ux, ws(ux), p)), usegs...; kws..., segbuf)
end
function init_cacheval(f::FourierIntegralFunction, dom, p, alg::ContQuadGKJL; kws...)
segs = PuncturedInterval(dom)
ws = get_fourierworkspace(f)
prototype = get_prototype(f, get_prototype(segs), ws, p)
segbufs = init_csegbuf(prototype, dom, alg)
return (; segbufs..., ws)
end
function init_cacheval(f::CommonSolveFourierIntegralFunction, dom, p, alg::ContQuadGKJL; kws...)
segs = PuncturedInterval(dom)
ws = get_fourierworkspace(f)
cache, integrand, prototype = _init_commonsolvefourierfunction(f, dom, p; ws, x=get_prototype(segs))
segbufs = init_csegbuf(prototype, dom, alg)
return (; segbufs..., ws, cache, integrand)
end
function call_contquadgk(f::FourierIntegralFunction, p, segs, cacheval; kws...)
ws = cacheval.ws
contquadgk(x -> f.f(x, ws(x), p), segs; kws...)
end
function call_contquadgk(f::CommonSolveFourierIntegralFunction, p, segs, cacheval; kws...)
integrand = cacheval.integrand
ws = cacheval.ws
contquadgk(x -> integrand(x, ws(x), p), segs...; kws...)
end
function init_cacheval(f::FourierIntegralFunction, dom, p, alg::MeroQuadGKJL; kws...)
segs = PuncturedInterval(dom)
ws = get_fourierworkspace(f)
prototype = get_prototype(f, get_prototype(segs), ws, p)
segbuf = init_msegbuf(prototype, dom, alg)
return (; segbuf, ws)
end
function init_cacheval(f::CommonSolveFourierIntegralFunction, dom, p, alg::MeroQuadGKJL; kws...)
segs = PuncturedInterval(dom)
ws = get_fourierworkspace(f)
cache, integrand, prototype = _init_commonsolvefourierfunction(f, dom, p; ws, x=get_prototype(segs))
segbuf = init_msegbuf(prototype, dom, alg)
return (; segbuf, ws, cache, integrand)
end
function call_meroquadgk(f::FourierIntegralFunction, p, segs, cacheval; kws...)
ws = cacheval.ws
meroquadgk(x -> f.f(x, ws(x), p), segs; kws...)
end
function call_meroquadgk(f::CommonSolveFourierIntegralFunction, p, segs, cacheval; kws...)
integrand = cacheval.integrand
ws = cacheval.ws
meroquadgk(x -> integrand(x, ws(x), p), segs...; kws...)
end
function _fourier_update!(cache, x, p)
_update!(cache, x, p)
s = workspace_contract!(p.ws, x)
cache.cacheval.p = (; cache.cacheval.p..., ws=s)
return
end
function inner_integralfunction(f::FourierIntegralFunction, x0, p)
ws = get_fourierworkspace(f)
proto = get_prototype(f, x0, ws, p)
func = IntegralFunction(proto) do x, (; p, ws, lims_state)
f.f(limit_iterate(lims_state..., x), workspace_evaluate!(ws, x), p)
end
return func, ws
end
function outer_integralfunction(f::FourierIntegralFunction, x0, p)
ws = get_fourierworkspace(f)
proto = get_prototype(f, x0, ws, p)
s = workspace_contract!(ws, x0[end])
func = FourierIntegralFunction(f.f, s, proto; alias=true)
return func, ws, _fourier_update!, _postsolve
end
# TODO it would be desirable to allow the inner integralfunction to be of the
# same type as f, which requires moving workspace out of the parameters into
# some kind of mutable storage
function inner_integralfunction(f::CommonSolveFourierIntegralFunction, x0, p)
ws = get_fourierworkspace(f)
proto = get_prototype(f, x0, ws, p)
cache = init(f.prob, f.alg; f.kwargs...)
func = IntegralFunction(proto) do x, (; p, ws, lims_state)
y = limit_iterate(lims_state..., x)
s = workspace_evaluate!(ws, x)
do_solve!(cache, f, y, s, p)
end
return func, ws
end
function outer_integralfunction(f::CommonSolveFourierIntegralFunction, x0, p)
ws = get_fourierworkspace(f)
proto = get_prototype(f, x0, ws, p)
s = workspace_contract!(ws, x0[end])
func = CommonSolveFourierIntegralFunction(f.prob, f.alg, f.update!, f.postsolve, s, proto, f.specialize; alias=true, f.kwargs...)
return func, ws, _fourier_update!, _postsolve
end
# PTR rules
# no symmetries
struct FourierPTR{N,T,S,X} <: AbstractArray{Tuple{AutoSymPTR.One,FourierValue{SVector{N,T},S}},N}
s::Array{S,N}
p::AutoSymPTR.PTR{N,T,X}
end
function fourier_ptr!(vals::AbstractArray{T,1}, w::FourierWorkspace, x::AbstractVector) where {T}
t = period(w.series, 1)
if length(w.cache) === 1
for (i, y) in zip(eachindex(vals), x)
@inbounds vals[i] = workspace_evaluate!(w, t*y)
end
else
# we batch for memory locality in vals array on each thread
Threads.@threads for (vrange, ichunk) in chunks(axes(vals, 1), length(w.cache), :batch)
for i in vrange
@inbounds vals[i] = workspace_evaluate!(w, t*x[i], ichunk)
end
end
end
return vals
end
function fourier_ptr!(vals::AbstractArray{T,d}, w::FourierWorkspace, x::AbstractVector) where {T,d}
t = period(w.series, d)
if length(w.cache) === 1
for (y, v) in zip(x, eachslice(vals, dims=d))
fourier_ptr!(v, workspace_contract!(w, t*y), x)
end
else
# we batch for memory locality in vals array on each thread
Threads.@threads for (vrange, ichunk) in chunks(axes(vals, d), length(w.cache), :batch)
for i in vrange
ws = workspace_contract!(w, t*x[i], ichunk)
fourier_ptr!(view(vals, ntuple(_->(:),Val(d-1))..., i), ws, x)
end
end
end
return vals
end
function FourierPTR(w::FourierWorkspace, ::Type{T}, ndim, npt) where {T}
FourierSeriesEvaluators.isinplace(w.series) && throw(ArgumentError("inplace series not supported for PTR - please file a bug report"))
# unitless quadrature weight/node, but unitful value to Fourier series
p = AutoSymPTR.PTR(typeof(float(real(one(T)))), ndim, npt)
s = workspace_evaluate(w, ntuple(_->zero(T), ndim))
vals = similar(p, typeof(s))
fourier_ptr!(vals, w, p.x)
return FourierPTR(vals, p)
end
# Array interface
Base.size(r::FourierPTR) = size(r.s)
function Base.getindex(r::FourierPTR{N}, i::Vararg{Int,N}) where {N}
w, x = r.p[i...]
return w, FourierValue(x, r.s[i...])
end
# iteration
function Base.iterate(p::FourierPTR)
next1 = iterate(p.s)
next1 === nothing && return nothing
next2 = iterate(p.p)
next2 === nothing && return nothing
s, state1 = next1
(w, x), state2 = next2
return (w, FourierValue(x, s)), (state1, state2)
end
Base.isdone(::FourierPTR, state) = any(isnothing, state)
function Base.iterate(p::FourierPTR, state)
next1 = iterate(p.s, state[1])
next1 === nothing && return nothing
next2 = iterate(p.p, state[2])
next2 === nothing && return nothing
s, state1 = next1
(w, x), state2 = next2
return (w, FourierValue(x, s)), (state1, state2)
end
function (rule::FourierPTR)(f::F, B::Basis, buffer=nothing) where {F}
arule = AutoSymPTR.AffineQuad(rule, B)
return AutoSymPTR.quadsum(arule, f, arule.vol / length(rule), buffer)
end
# SymPTR rules
struct FourierMonkhorstPack{d,W,T,S}
npt::Int64
nsyms::Int64
wxs::Vector{Tuple{W,FourierValue{SVector{d,T},S}}}
end
function _fourier_symptr!(vals::AbstractVector, w::FourierWorkspace, x::AbstractVector, npt, wsym, ::Tuple{}, idx, coord, offset)
t = period(w.series, 1)
o = offset-1
# we can't parallelize the inner loop without knowing the offsets of each contiguous
# chunk, which would require a ragged array to store. We would be better off with
# changing the symptr algorithm to compute a convex ibz
# but for 3D grids this inner loop should be a large enough base case to make
# parallelizing worth it, although the workloads will vary piecewise linearly as a
# function of the slice, so we should distribute points using :scatter
n = 0
for i in 1:npt
@inbounds wi = wsym[i, idx...]
iszero(wi) && continue
@inbounds xi = x[i]
vals[o+(n+=1)] = (wi, FourierValue(SVector(xi, coord...), workspace_evaluate!(w, t*xi)))
end
return vals
end
function _fourier_symptr!(vals::AbstractVector, w::FourierWorkspace, x::AbstractVector, npt, wsym, flags, idx, coord, offset)
d = ndims(w.series)
t = period(w.series, d)
flag, f = flags[begin:end-1], flags[end]
if (len = length(w.cache)) === 1 # || len <= w.basecasesize[d]
for i in 1:npt
@inbounds(fi = f[i, idx...]) == 0 && continue
@inbounds xi = x[i]
ws = workspace_contract!(w, t*xi)
_fourier_symptr!(vals, ws, x, npt, wsym, flag, (i, idx...), (xi, coord...), fi)
end
else
# since we don't know the distribution of ibz nodes, other than that it will be
# piecewise linear, our best chance for a speedup from parallelizing is to scatter
Threads.@threads for (vrange, ichunk) in chunks(1:npt, len, :scatter)
for i in vrange
@inbounds(fi = f[i, idx...]) == 0 && continue
@inbounds xi = x[i]
ws = workspace_contract!(w, t*xi, ichunk)
_fourier_symptr!(vals, ws, x, npt, wsym, flag, (i, idx...), (xi, coord...), fi)
end
end
end
return vals
end
function fourier_symptr!(wxs, w, u, npt, wsym, flags)
flag, f = flags[begin:end-1], flags[end]
return _fourier_symptr!(wxs, w, u, npt, wsym, flag, (), (), f[])
end
function FourierMonkhorstPack(w::FourierWorkspace, ::Type{T}, ndim::Val{d}, npt, syms) where {d,T}
# unitless quadrature weight/node, but unitful value to Fourier series
FourierSeriesEvaluators.isinplace(w.series) && throw(ArgumentError("inplace series not supported for PTR - please file a bug report"))
u = AutoSymPTR.ptrpoints(typeof(float(real(one(T)))), npt)
s = w(map(*, period(w.series), ntuple(_->zero(T), ndim)))
# the bottleneck is likely to be symptr_rule, which is not a fast or parallel algorithm
wsym, flags, nsym = AutoSymPTR.symptr_rule(npt, ndim, syms)
wxs = Vector{Tuple{eltype(wsym),FourierValue{SVector{d,eltype(u)},typeof(s)}}}(undef, nsym)
# fourier_symptr! may be worth parallelizing for expensive Fourier series, but may not
# be the bottleneck
fourier_symptr!(wxs, w, u, npt, wsym, flags)
return FourierMonkhorstPack(npt, length(syms), wxs)
end
# indexing
Base.getindex(rule::FourierMonkhorstPack, i::Int) = rule.wxs[i]
# iteration
Base.eltype(::Type{FourierMonkhorstPack{d,W,T,S}}) where {d,W,T,S} = Tuple{W,FourierValue{SVector{d,T},S}}
Base.length(r::FourierMonkhorstPack) = length(r.wxs)
Base.iterate(rule::FourierMonkhorstPack, args...) = iterate(rule.wxs, args...)
function (rule::FourierMonkhorstPack{d})(f::F, B::Basis, buffer=nothing) where {d,F}
arule = AutoSymPTR.AffineQuad(rule, B)
return AutoSymPTR.quadsum(arule, f, arule.vol / (rule.npt^d * rule.nsyms), buffer)
end
# rule definition
struct FourierMonkhorstPackRule{S,M}
s::S
m::M
end
function FourierMonkhorstPackRule(s, syms, a, nmin, nmax, n₀, Δn)
mp = AutoSymPTR.MonkhorstPackRule(syms, a, nmin, nmax, n₀, Δn)
return FourierMonkhorstPackRule(s, mp)
end
AutoSymPTR.nsyms(r::FourierMonkhorstPackRule) = AutoSymPTR.nsyms(r.m)
function (r::FourierMonkhorstPackRule)(::Type{T}, v::Val{d}) where {T,d}
if r.m.syms isa Nothing
FourierPTR(r.s, T, v, r.m.n₀)
else
FourierMonkhorstPack(r.s, T, v, r.m.n₀, r.m.syms)
end
end
function AutoSymPTR.nextrule(p::FourierPTR{d,T}, r::FourierMonkhorstPackRule) where {d,T}
return FourierPTR(r.s, T, Val(d), length(p.p.x)+r.m.Δn)
end
function AutoSymPTR.nextrule(p::FourierMonkhorstPack{d,W,T}, r::FourierMonkhorstPackRule) where {d,W,T}
return FourierMonkhorstPack(r.s, T, Val(d), p.npt+r.m.Δn, r.m.syms)
end
# dispatch on PTR algorithms
# function init_buffer(f::FourierIntegrand, len)
# return f.nest isa NestedBatchIntegrand ? Vector{eltype(f.nest.y)}(undef, len) : nothing
# end
function init_fourier_rule(w::FourierWorkspace, dom, alg::MonkhorstPack)
@assert ndims(w.series) == ndims(dom)
if alg.syms === nothing
return FourierPTR(w, eltype(dom), Val(ndims(dom)), alg.npt)
else
return FourierMonkhorstPack(w, eltype(dom), Val(ndims(dom)), alg.npt, alg.syms)
end
end
function init_cacheval(f::AbstractFourierIntegralFunction, dom , p, alg::MonkhorstPack; kws...)
cache = init_autosymptr_cache(f, dom, p, alg.nthreads; kws...)
ws = cache.ws
rule = init_fourier_rule(ws, dom, alg)
return (; rule, buffer=nothing, ws, cache...)
end
function init_fourier_rule(w::FourierWorkspace, dom, alg::AutoSymPTRJL)
@assert ndims(w.series) == ndims(dom)
return FourierMonkhorstPackRule(w, alg.syms, alg.a, alg.nmin, alg.nmax, alg.n₀, alg.Δn)
end
function init_fourier_rule(w::FourierWorkspace, dom::RepBZ, alg::AutoSymPTRJL)
B = get_basis(dom)
rule = init_fourier_rule(w, B, alg)
return SymmetricRuleDef(rule, dom.rep, dom.bz)
end
function init_cacheval(f::AbstractFourierIntegralFunction, dom, p, alg::AutoSymPTRJL; kws...)
cache = init_autosymptr_cache(f, dom, p, alg.nthreads; kws...)
ws = cache.ws
rule = init_fourier_rule(ws, dom, alg)
rule_cache = AutoSymPTR.alloc_cache(eltype(dom), Val(ndims(dom)), rule)
return (; rule, rule_cache, cache...)
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 8128 | abstract type AbstractIntegralFunction end
# should have at least two fields:
# - f
# - integrand_prototype
"""
IntegralFunction(f, [prototype=nothing])
Constructor for an out-of-place integrand of the form `f(x, p)`.
Optionally, a `prototype` can be provided for the output of the function.
"""
struct IntegralFunction{F,P} <: AbstractIntegralFunction
f::F
prototype::P
end
IntegralFunction(f) = IntegralFunction(f, nothing)
function get_prototype(f::IntegralFunction, x, p)
f.prototype === nothing ? f.f(x, p) : f.prototype
end
"""
InplaceIntegralFunction(f!, prototype::AbstractArray)
Constructor for an inplace integrand of the form `f!(y, x, p)`.
A `prototype` array is required to store the same type and size as the result, `y`.
"""
struct InplaceIntegralFunction{F,P<:AbstractArray} <: AbstractIntegralFunction
# in-place function f!(y, x, p) that takes one x value and outputs an array of results in-place
f!::F
prototype::P
end
function get_prototype(f::InplaceIntegralFunction, x, p)
# iip is required to have a prototype array
f.prototype
end
"""
InplaceBatchIntegralFunction(f!, prototype; max_batch::Integer=typemax(Int))
Constructor for an inplace, batched integrand of the form `f!(y, x, p)` that accepts an
array `x` containing a batch of evaluation points stored along the last axis of the array.
A `prototype` array is required to store the same type and size as the result, `y`, however
the last axis, which is reserved for batching, which should contain at least one element.
The `max_batch` keyword sets a soft limit on the number of points batched simultaneously.
"""
struct InplaceBatchIntegralFunction{F,P<:AbstractArray} <: AbstractIntegralFunction
f!::F
prototype::P
max_batch::Int
end
function InplaceBatchIntegralFunction(f!, p::AbstractArray; max_batch::Integer=typemax(Int))
return InplaceBatchIntegralFunction(f!, p, max_batch)
end
function get_prototype(f::InplaceBatchIntegralFunction, x, p)
# iip is required to have a prototype array
f.prototype
end
abstract type AbstractSpecialization end
struct NoSpecialize <: AbstractSpecialization end
struct FunctionWrapperSpecialize <: AbstractSpecialization end
struct FullSpecialize <: AbstractSpecialization end
"""
CommonSolveIntegralFunction(prob, alg, update!, postsolve, [prototype, specialize]; kws...)
Constructor for an integrand that solves a problem defined with the CommonSolve.jl
interface, `prob`, which is instantiated using `init(prob, alg; kws...)`. Helper functions
include: `update!(cache, x, p)` is called before
`solve!(cache)`, followed by `postsolve(sol, x, p)`, which should return the value of the solution.
The `prototype` argument can help control how much to `specialize` on the type of the
problem, which defaults to `FullSpecialize()` so that run times are improved. However
`FunctionWrapperSpecialize()` may help reduce compile times.
"""
struct CommonSolveIntegralFunction{P,A,K,U,S,T,M<:AbstractSpecialization} <: AbstractIntegralFunction
prob::P
alg::A
kwargs::K
update!::U
postsolve::S
prototype::T
specialize::M
end
function CommonSolveIntegralFunction(prob, alg, update!, postsolve, prototype=nothing, specialize=FullSpecialize(); kws...)
return CommonSolveIntegralFunction(prob, alg, NamedTuple(kws), update!, postsolve, prototype, specialize)
end
function do_solve!(cache, f::CommonSolveIntegralFunction, x, p)
f.update!(cache, x, p)
sol = solve!(cache)
return f.postsolve(sol, x, p)
end
function get_prototype(f::CommonSolveIntegralFunction, x, p)
if isnothing(f.prototype)
cache = init(f.prob, f.alg; f.kwargs...)
do_solve!(cache, f, x, p)
else
f.prototype
end
end
function init_specialized_integrand(cache, f, dom, p; x=get_prototype(dom), prototype=f.prototype)
proto = prototype === nothing ? do_solve!(cache, f, x, p) : prototype
func = (x, p) -> do_solve!(cache, f, x, p)
integrand = if f.specialize isa FullSpecialize
func
elseif f.specialize isa FunctionWrapperSpecialize
FunctionWrapper{typeof(prototype), typeof((x, p))}(func)
else
throw(ArgumentError("$(f.specialize) is not implemented"))
end
return integrand, proto
end
function _init_commonsolvefunction(f, dom, p; kws...)
cache = init(f.prob, f.alg; f.kwargs...)
integrand, prototype = init_specialized_integrand(cache, f, dom, p; kws...)
return cache, integrand, prototype
end
# TODO add InplaceCommonSolveIntegralFunction and InplaceBatchCommonSolveIntegralFunction
# TODO add ThreadedCommonSolveIntegralFunction and DistributedCommonSolveIntegralFunction
"""
IntegralAlgorithm
Abstract supertype for integration algorithms.
"""
abstract type IntegralAlgorithm end
"""
NullParameters()
A singleton type representing absent parameters
"""
struct NullParameters end
"""
IntegralProblem(f, domain, [p=NullParameters]; kwargs...)
## Arguments
- `f::AbstractIntegralFunction`: The function to integrate
- `domain`: The domain to integrate over, e.g. `(lb, ub)`
- `p`: Parameters to pass to the integrand
## Keywords
Additional keywords are passed directly to the solver
"""
struct IntegralProblem{F<:AbstractIntegralFunction,D,P,K<:NamedTuple}
f::F
dom::D
p::P
kwargs::K
end
function IntegralProblem(f::AbstractIntegralFunction, dom, p=NullParameters(); kws...)
return IntegralProblem(f, dom, p, NamedTuple(kws))
end
function IntegralProblem(f, dom, p=NullParameters(); kws...)
return IntegralProblem(IntegralFunction(f), dom, p; kws...)
end
mutable struct IntegralSolver{F,D,P,A,C,K}
f::F
dom::D
p::P
alg::A
cacheval::C
kwargs::K
end
function checkkwargs(kwargs)
for key in keys(kwargs)
key in (:abstol, :reltol, :maxiters) || throw(ArgumentError("keyword $key unrecognized"))
end
return nothing
end
"""
init(::IntegralProblem, ::IntegralAlgorithm; kws...)::IntegralSolver
Construct a cache for an [`IntegralProblem`](@ref), [`IntegralAlgorithm`](@ref), and the
keyword arguments to the solver (i.e. `abstol`, `reltol`, or `maxiters`) that can be reused
for solving the problem for multiple different parameters of the same type.
"""
function init(prob::IntegralProblem, alg::IntegralAlgorithm; kwargs...)
f = prob.f; dom = prob.dom; p = prob.p
kws = (; prob.kwargs..., kwargs...)
checkkwargs(kws)
cacheval = init_cacheval(f, dom, p, alg; kws...)
return IntegralSolver(f, dom, p, alg, cacheval, kws)
end
"""
solve(::IntegralProblem, ::IntegralAlgorithm; kws...)::IntegralSolution
Compute the solution to the given [`IntegralProblem`](@ref) using the given
[`IntegralAlgorithm`](@ref) for the given keyword arguments to the solver (i.e. `abstol`,
`reltol`, or `maxiters`).
## Keywords
- `abstol`: an absolute error tolerance to get the solution to a specified number of
absolute digits, e.g. 1e-3 requests accuracy to 3 decimal places. Note that this number
must have the same units as the integral. (default: nothing)
- `reltol`: a relative error tolerance equivalent to specifying a number of significant
digits of accuracy, e.g. 1e-4 requests accuracy to roughly 4 significant digits. (default:
nothing)
- `maxiters`: a soft upper limit on the number of integrand evaluations (default:
`typemax(Int)`)
Solvers typically converge only to the weakest error condition. For example, a relative
tolerance can be used in combination with a smaller-than necessary absolute tolerance so
that the solution is resolved up to the requested significant digits, unless the integral is
smaller than the absolute tolerance.
"""
solve(prob::IntegralProblem, alg::IntegralAlgorithm; kwargs...)
"""
solve!(::IntegralSolver)::IntegralSolution
Compute the solution to an [`IntegralProblem`](@ref) constructed from [`init`](@ref).
"""
function solve!(c::IntegralSolver)
return do_integral(c.f, c.dom, c.p, c.alg, c.cacheval; c.kwargs...)
end
@enum ReturnCode begin
Success
Failure
MaxIters
end
struct IntegralSolution{T,S}
value::T
retcode::ReturnCode
stats::S
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 55 | using Aqua
using AutoBZCore
Aqua.test_all(AutoBZCore)
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 884 | using Test
using Unitful
using UnitfulAtomic
using AtomsBase
using AutoBZCore
using SymmetryReduceBZ
using LinearAlgebra: det
# do the example of getting the volume of the bz of silicon
bounding_box = 10.26 / 2 * [[0, 0, 1], [1, 0, 1], [1, 1, 0]]u"bohr"
silicon = periodic_system([:Si => ones(3)/8,
:Si => -ones(3)/8],
bounding_box, fractional=true)
A = reinterpret(reshape,eltype(eltype(bounding_box)),AtomsBase.bounding_box(silicon))
recip_vol = det(AutoBZCore.canonical_reciprocal_basis(A))
fbz = load_bz(FBZ(), silicon)
fprob = AutoBZCore.AutoBZProblem((x,p) -> 1.0, fbz)
ibz = load_bz(IBZ(), silicon)
iprob = AutoBZCore.AutoBZProblem(TrivialRep(), IntegralFunction((x,p) -> 1.0), ibz)
for alg in (IAI(), PTR())
@test recip_vol ≈ AutoBZCore.solve(fprob, alg).value
@test recip_vol ≈ AutoBZCore.solve(iprob, alg).value
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 1281 | using Test
using LinearAlgebra
using AutoBZCore
using AutoBZCore: PuncturedInterval, HyperCube, segments, endpoints
@testset "domains" begin
@testset "SymmetricBZ" begin
dims = 3
A = I(dims)
B = AutoBZCore.canonical_reciprocal_basis(A)
fbz = load_bz(FBZ(), A)
@test fbz.A ≈ A
@test fbz.B ≈ B
@test nsyms(fbz) == 1
@test fbz.lims == AutoBZCore.CubicLimits(zeros(3), ones(3))
ibz = load_bz(InversionSymIBZ(), A)
@test ibz.A ≈ A
@test ibz.B ≈ B
@test nsyms(ibz) == 2^dims
@test all(isdiag, ibz.syms)
@test ibz.lims == AutoBZCore.CubicLimits(zeros(3), 0.5*ones(3))
cbz = load_bz(CubicSymIBZ(), A)
@test cbz.A ≈ A
@test cbz.B ≈ B
@test nsyms(cbz) == factorial(dims)*2^dims
@test cbz.lims == AutoBZCore.TetrahedralLimits(ntuple(n -> 0.5, dims))
end
end
@testset "algorithms" begin
dims = 3
A = I(dims)
vol = (2π)^dims
for bz in (load_bz(FBZ(), A), load_bz(InversionSymIBZ(), A))
ip = AutoBZProblem((x,p) -> 1.0, bz) # unit measure
for alg in (IAI(), TAI(), PTR(), AutoPTR())
solver = init(ip, alg)
@test @inferred(solve!(solver)).value ≈ vol
end
end
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 4517 | using Test, AutoBZCore, LinearAlgebra, StaticArrays, OffsetArrays, Elliptic
using GeneralizedGaussianQuadrature: generalizedquadrature
using FourierSeriesEvaluators, QuadGK
# test set of known DOS examples
# TODO : check that the exact formulas correctly integrate to unity
tb_graphene = let t=1.0
ax = CartesianIndices((-2:2, -2:2))
hm = OffsetMatrix([zero(MMatrix{2,2,typeof(t),4}) for i in ax], ax)
hm[1,1][1,2] = hm[1,-2][1,2] = hm[-2,1][1,2] = t
hm[-1,-1][2,1] = hm[-1,2][2,1] = hm[2,-1][2,1] = t
FourierSeries(SMatrix{2,2,typeof(t),4}.(hm), period=1.0)
end
# https://arxiv.org/abs/1311.2514v1
function dos_graphene_exact(E::Real, t=oneunit(E))
E = abs(E)
x = abs(E/t)
if x <= 1
f = (1+x)^2 - (x^2-1)^2/4
2E/((pi*t)^2*sqrt(f))*Elliptic.K(4x/f)
elseif 1 < x < 3
f = (1+x)^2 - (x^2-1)^2/4
2E/((pi*t)^2*sqrt(4x))*Elliptic.K(f/4x)
else
zero(inv(oneunit(t)))
end
end
# The following three examples are taken from sec 5.3 of
# https://link.springer.com/book/10.1007/3-540-28841-4
function tb_integer(n, t=1.0)
ax = CartesianIndices(ntuple(_ -> -1:1, n))
C = OffsetArray([zero(MMatrix{1,1,typeof(t),1}) for i in ax], ax)
for i in 1:n, j in (-1, 1)
C[CartesianIndex(ntuple(k -> k ≈ i ? j : 0, n))][1,1] = t
end
return FourierSeries(SMatrix{1,1,typeof(t),1}.(C), period=1.0)
end
tb_integer_1d = tb_integer(1)
function dos_integer_1d_exact(E::Real, t=oneunit(E))
x = abs(E/2t)
if x <= 1
1/sqrt(1 - x^2)/(pi*2t)
else
zero(inv(oneunit(t)))
end
end
tb_integer_2d = tb_integer(2)
function dos_integer_2d_exact(E::Real, t=oneunit(E))
x = abs(E/4t)
if x <= 1
# note Elliptic and SpecialFunctions accept the elliptic modulus m = k^2
1/(pi^2*2t)*Elliptic.K(1 - x^2)
else
zero(inv(oneunit(t)))
end
end
tb_integer_3d = tb_integer(3)
# https://doi.org/10.1143/JPSJ.30.957 (also includes FCC and BCC lattices)
function dos_integer_3d_exact(E::Real, t=oneunit(E))
x = abs(E/6t)
# note Elliptic and SpecialFunctions accept the elliptic modulus m = k^2
f = u -> Elliptic.K(1 - ((3x-cos(u))/2)^2)
if 3x < 1
n, w = generalizedquadrature(30) # quadrature designed for logarithmic singularity
u′ = acos(3x) # breakpoint for logarithmic singularity in the interval (0, pi)
I1 = sum(w .* f.(u′ .+ n .* -u′)) * u′ # (0, u′)
I2 = sum(w .* f.(u′ .+ n .* (pi - u′))) * (pi - u′) # (u′, pi)
oftype(zero(inv(oneunit(t))), 1/(pi^3*2t) * (I1+I2))
# since we may loose precision when the breakpoint is near the boundary, consider
# oftype(zero(inv(oneunit(t))), 1/(pi^3*2t) * ((isfinite(I1) ? I1 : zero(I1)) + (isfinite(I2) ? I2 : zero(I2))))
elseif x < 1
1/(pi^3*2t)*quadgk(f, 0, acos(3x-2))[1]
else
zero(inv(oneunit(t)))
end
end
for (model, solution, bandwidth, bzkind) in (
(tb_graphene, dos_graphene_exact, 4, FBZ()),
(tb_integer_1d, dos_integer_1d_exact, 2, FBZ()),
(tb_integer_2d, dos_integer_2d_exact, 4, FBZ()),
(tb_integer_3d, dos_integer_3d_exact, 6, FBZ()),
(tb_integer_1d, dos_integer_1d_exact, 2, InversionSymIBZ()),
(tb_integer_2d, dos_integer_2d_exact, 4, InversionSymIBZ()),
(tb_integer_3d, dos_integer_3d_exact, 6, InversionSymIBZ()),
(tb_integer_1d, dos_integer_1d_exact, 2, CubicSymIBZ()),
(tb_integer_2d, dos_integer_2d_exact, 4, CubicSymIBZ()),
(tb_integer_3d, dos_integer_3d_exact, 6, CubicSymIBZ()),
)
B = bandwidth
bz = load_bz(bzkind, I(ndims(model)))
prob = DOSProblem(model, float(zero(B)), bz)
E = Float64[-B - 1, -0.8B, -0.6B, -0.2B, 0.1B, 0.3B, 0.5B, 0.7B, 0.9B, B + 2]
for alg in (GGR(; npt=200),)
cache = AutoBZCore.init(prob, alg)
for e in E
cache.domain = e
@test AutoBZCore.solve!(cache).value ≈ solution(e) atol=1e-2
end
end
end
# test caching behavior
let h = FourierSeries(SMatrix{1,1,Float64,1}.([0.5, 0.0, 0.5]), period=1.0, offset=-2), E = 0.1
bz = load_bz(FBZ(), [2pi;;])
prob = DOSProblem(h, E, bz)
alg = GGR()
cache = AutoBZCore.init(prob, alg)
sol1 = AutoBZCore.solve!(cache)
h.c .*= 2
cache.isfresh = true
cache.domain = 2E
sol2 = AutoBZCore.solve!(cache)
@test sol1.value ≈ 2sol2.value
cache.H = FourierSeries(2*h.c, period=h.t, offset=h.o)
cache.domain = 4E
sol3 = AutoBZCore.solve!(cache)
@test sol2.value ≈ 2sol3.value
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 6898 | using Test
using LinearAlgebra
using StaticArrays
using FourierSeriesEvaluators
using AutoBZCore
using AutoBZCore: CubicLimits
using AutoBZCore: PuncturedInterval, HyperCube, segments, endpoints
@testset "FourierIntegralFunction" begin
@testset "quadrature" begin
a = 0
b = 1
p = 0.0
t = 1.0
s = FourierSeries([1, 0, 1]/2; period=t, offset=-2)
int(x, s, p) = x * s + p
ref = (b-a)*p + t*(b*sin(b/t*2pi) + t*cos(b/t*2pi) - (a*sin(a/t*2pi) + t*cos(a/t*2pi)))
abstol = 1e-5
prob = IntegralProblem(FourierIntegralFunction(int, s), (a, b), p; abstol)
for alg in (QuadGKJL(), QuadratureFunction(), AuxQuadGKJL(), ContQuadGKJL(), MeroQuadGKJL())
@test solve(prob, alg).value ≈ ref atol=abstol
end
end
@testset "commonproblem" begin
a = 0
b = 1
p = 0.0
t = 1.0
update! = (cache, x, s, p) -> cache.p = (x, s, p)
postsolve = (sol, x, s, p) -> sol.value
s = FourierSeries([1, 0, 1]/2; period=t, offset=-2)
f = (x, (y, s, p)) -> x * s + p + y
subprob = IntegralProblem(f, (a, b), ((a+b)/2, s((a+b)/2), 1.0))
abstol = 1e-5
prob = IntegralProblem(CommonSolveFourierIntegralFunction(subprob, QuadGKJL(), update!, postsolve, s), (a, b), p; abstol)
for alg in (QuadGKJL(), HCubatureJL(), QuadratureFunction(), AuxQuadGKJL(), ContQuadGKJL(), MeroQuadGKJL())
cache = init(prob, alg)
for p in [3.0, 4.0]
ref = (b-a)*(t*p + (b-a)^2/2) + (b-a)^2/2*t*(b*sin(b/t*2pi) + t*cos(b/t*2pi) - (a*sin(a/t*2pi) + t*cos(a/t*2pi)))
cache.p = p
sol = solve!(cache)
@test ref ≈ sol.value atol=abstol
end
end
f = (x, (y, s, p)) -> x * s + p
subprob = IntegralProblem(f, (a, b), ([(a+b)/2], s((a+b)/2), 1.0))
abstol = 1e-5
prob = IntegralProblem(CommonSolveFourierIntegralFunction(subprob, QuadGKJL(), update!, postsolve, s), AutoBZCore.Basis(t*I(1)), p; abstol)
for alg in (MonkhorstPack(), AutoSymPTRJL(),)
cache = init(prob, alg)
for p in [3.0, 4.0]
ref = (b-a)*(t*p) + (b-a)^2/2*t*(b*sin(b/t*2pi) + t*cos(b/t*2pi) - (a*sin(a/t*2pi) + t*cos(a/t*2pi)))
cache.p = p
sol = solve!(cache)
@test ref ≈ sol.value atol=abstol
end
end
end
@testset "cubature" for dim in 2:3
a = zeros(dim)
b = ones(dim)
p = 0.0
t = 1.0
s = FourierSeries([prod(x) for x in Iterators.product([(0.1, 0.5, 0.3) for i in 1:dim]...)]; period=t, offset=-2)
f = (x, s, p) -> prod(x) * s + p
abstol = 1e-4
prob = IntegralProblem(FourierIntegralFunction(f, s), (a, b), p; abstol)
refprob = IntegralProblem(IntegralFunction(let f=f; (x, p) -> f(x, s(x), p); end), (a, b), p; abstol)
for alg in (HCubatureJL(),)
@test solve(prob, alg).value ≈ solve(refprob, alg).value atol=abstol
end
p=1.3
f = (x, s, p) -> inv(s + im*p)
prob = IntegralProblem(FourierIntegralFunction(f, s), AutoBZCore.Basis(t*I(dim)), p; abstol)
refprob = IntegralProblem(IntegralFunction(let f=f; (x, p) -> f(x, s(x), p); end), AutoBZCore.Basis(t*I(dim)), p; abstol)
for alg in (MonkhorstPack(), AutoSymPTRJL(),)
@test solve(prob, alg).value ≈ solve(refprob, alg).value atol=abstol
end
end
@testset "meta-algorithms" for dim in 2:3
# NestedQuad
a = zeros(dim)
b = ones(dim)
p0 = 0.0
t = 1.0
s = FourierSeries([prod(x) for x in Iterators.product([(0.1, 0.5, 0.3) for i in 1:dim]...)]; period=t, offset=-2)
int(x, s, p) = prod(x) * s + p
abstol = 1e-4
prob = IntegralProblem(FourierIntegralFunction(int, s), CubicLimits(a, b), p0; abstol)
refprob = IntegralProblem(FourierIntegralFunction(int, s), (a, b), p0; abstol)
for alg in (QuadGKJL(), AuxQuadGKJL())
cache = init(prob, NestedQuad(alg))
refcache = init(refprob, HCubatureJL())
for p in [5.0, 6.0]
cache.p = p
refcache.p = p
@test solve!(cache).value ≈ solve!(refcache).value atol=abstol
end
end
# TODO implement CommonSolveFourierInplaceIntegralFunction
# TODO implement CommonSolveFourierInplaceBatchIntegralFunction
end
end
#=
@testset "FourierIntegrand" begin
for dims in 1:3
s = FourierSeries(integer_lattice(dims), period=1)
# AutoBZ interface user function: f(x, args...; kwargs...) where args & kwargs
# stored in MixedParameters
# a FourierIntegrand should expect a FourierValue in the first argument
# a FourierIntegrand is just a wrapper around an integrand
f(x::FourierValue, a; b) = a*x.s*x.x .+ b
# IntegralSolver will accept args & kwargs for a FourierIntegrand
prob = IntegralProblem(FourierIntegrand(f, s, 1.3, b=4.2), zeros(dims), ones(dims))
u = IntegralSolver(prob, HCubatureJL())()
v = IntegralSolver(FourierIntegrand(f, s), zeros(dims), ones(dims), HCubatureJL())(1.3, b=4.2)
w = IntegralSolver(FourierIntegrand(f, s, b=4.2), zeros(dims), ones(dims), HCubatureJL())(1.3)
@test u == v == w
# tests for the nested integrand
nouter = 3
ws = FourierSeriesEvaluators.workspace_allocate(s, FourierSeriesEvaluators.period(s), ntuple(n -> n == dims ? nouter : 1,dims))
p = ParameterIntegrand(f, 1.3, b=4.2)
nest = NestedBatchIntegrand(ntuple(n -> deepcopy(p), nouter), SVector{dims,ComplexF64})
for (alg, dom) in (
(HCubatureJL(), HyperCube(zeros(dims), ones(dims))),
(NestedQuad(AuxQuadGKJL()), CubicLimits(zeros(dims), ones(dims))),
(MonkhorstPack(), Basis(one(SMatrix{dims,dims}))),
)
prob1 = IntegralProblem(FourierIntegrand(p, s), dom)
prob2 = IntegralProblem(FourierIntegrand(p, ws, nest), dom)
@test solve(prob1, alg).u ≈ solve(prob2, alg).u
end
end
end
@testset "algorithms" begin
f(x::FourierValue, a; b) = a*x.s+b
for dims in 1:3
vol = (2pi)^dims
A = I(dims)
s = FourierSeries(integer_lattice(dims), period=1)
for bz in (load_bz(FBZ(), A), load_bz(InversionSymIBZ(), A))
integrand = FourierIntegrand(f, s, 1.3, b=1.0)
prob = IntegralProblem(integrand, bz)
for alg in (IAI(), PTR(), AutoPTR(), TAI()), counter in (false, true)
new_alg = counter ? EvalCounter(alg) : alg
solver = IntegralSolver(prob, new_alg, reltol=0, abstol=1e-6)
@test solver() ≈ vol atol=1e-6
end
end
end
end
=#
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 7327 | using Test
using LinearAlgebra
using AutoBZCore
using AutoBZCore: PuncturedInterval, HyperCube, segments, endpoints
using AutoBZCore: CubicLimits
@testset "domains" begin
# PuncturedInterval
a = (0.0, 1.0, 2.0)
b = collect(a)
sa = PuncturedInterval(a)
sb = PuncturedInterval(b)
@test all(segments(sa) .== segments(sb))
@test (0.0, 2.0) == endpoints(sa) == endpoints(sb)
@test Float64 == eltype(sa) == eltype(sb)
# HyperCube
for d = 1:3
c = HyperCube(zeros(d), ones(d))
@test eltype(c) == Float64
a, b = endpoints(c)
@test all(a .== zeros(d))
@test all(b .== ones(d))
end
end
@testset "quadrature" begin
a = 0.0
b = 2pi
abstol=1e-5
p=3.0
# QuadratureFunction QuadGKJL AuxQuadGKJL ContQuadGKJL MeroQuadGKJL
for (f, ref) in (
((x,p) -> p*sin(x), 0.0),
((x,p) -> p*one(x), p*(b-a)),
((x,p) -> inv(p-cos(x)), (b-a)/sqrt(p^2-1)),
)
prob = IntegralProblem(f, (a, b), p; abstol)
for alg in (QuadratureFunction(), QuadGKJL(), AuxQuadGKJL(), ContQuadGKJL(), MeroQuadGKJL())
sol = solve(prob, alg)
@test ref ≈ sol.value atol=abstol
end
end
@test @inferred(solve(IntegralProblem((x, p) -> exp(-x^2), (-Inf, Inf)), QuadGKJL())).value ≈ sqrt(pi)
end
@testset "commonproblem" begin
a = 1.0
b = 2pi
abstol=1e-5
p0=3.0
# QuadratureFunction QuadGKJL AuxQuadGKJL ContQuadGKJL MeroQuadGKJL
update! = (cache, x, p) -> cache.p = (x, p)
postsolve = (sol, x, p) -> sol.value
f = (x, (y, p)) -> p*(y + x)
subprob = IntegralProblem(f, (a, b), ((a+b)/2, p0); abstol)
integrand = CommonSolveIntegralFunction(subprob, QuadGKJL(), update!, postsolve)
prob = IntegralProblem(integrand, (a, b), p0; abstol)
for alg in (QuadratureFunction(), QuadGKJL(), HCubatureJL(), AuxQuadGKJL(), ContQuadGKJL(), MeroQuadGKJL())
cache = init(prob, alg)
for p in [3.0, 4.0]
ref = p*(b-a)*(b^2-a^2)
cache.p = p
sol = solve!(cache)
@test ref ≈ sol.value atol=abstol
end
end
f = (x, (y, p)) -> p*(sin(only(y))^2 + x)
subprob = IntegralProblem(f, (a, b), ([b/2], p0); abstol)
integrand = CommonSolveIntegralFunction(subprob, QuadGKJL(), update!, postsolve)
prob = IntegralProblem(integrand, AutoBZCore.Basis(b*I(1)), p0; abstol)
for alg in (MonkhorstPack(), AutoSymPTRJL(),)
cache = init(prob, alg)
for p in [3.0, 4.0]
ref = p*((b-a)+(b^2-a^2))*b/2
cache.p = p
sol = solve!(cache)
@test ref ≈ sol.value atol=abstol
end
end
end
@testset "cubature" begin
# HCubatureJL MonkhorstPack AutoSymPTRJL NestedQuad
a = 0.0
b = 2pi
abstol=1e-5
p = 3.0
for dim in 1:3, (f, ref) in (
((x,p) -> p*sum(sin, x), 0.0),
((x,p) -> p*one(eltype(x)), p*(b-a)^dim),
((x,p) -> prod(y -> inv(p-cos(y)), x), ((b-a)/sqrt(p^2-1))^dim),
)
prob = IntegralProblem(f, (fill(a, dim), fill(b, dim)), p; abstol)
for alg in (HCubatureJL(),)
@test ref ≈ solve(prob, alg).value atol=abstol
end
prob = IntegralProblem(f, AutoBZCore.Basis(b*I(dim)), p; abstol)
for alg in (MonkhorstPack(), AutoSymPTRJL(),)
@test ref ≈ solve(prob, alg).value atol=abstol
end
end
end
@testset "inplace" begin
# QuadratureFunction QuadGKJL AuxQuadGKJL HCubatureJL MonkhorstPack AutoSymPTRJL
a = 0.0
b = 2pi
abstol=1e-5
p = 3.0
for (f, ref) in (
((y,x,p) -> y .= p*sin(only(x)), [0.0]),
((y,x,p) -> y .= p*one(only(x)), [p*(b-a)]),
((y,x,p) -> y .= inv(p-cos(only(x))), [(b-a)/sqrt(p^2-1)]),
)
integrand = InplaceIntegralFunction(f, [0.0])
inplaceprob = IntegralProblem(integrand, (a, b), p; abstol)
for alg in (QuadGKJL(), QuadratureFunction(), QuadGKJL(), AuxQuadGKJL())
@test ref ≈ solve(inplaceprob, alg).value atol=abstol
end
inplaceprob = IntegralProblem(integrand, AutoBZCore.Basis([b;;]), p; abstol)
for alg in (MonkhorstPack(), AutoSymPTRJL())
@test ref ≈ solve(inplaceprob, alg).value atol=abstol
end
end
end
@testset "batch" begin
# QuadratureFunction AuxQuadGKJL MonkhorstPack AutoSymPTRJL
a = 0.0
b = 2pi
abstol=1e-5
p = 3.0
for (f, ref) in (
((y,x,p) -> y .= p .* sin.(only.(x)), 0.0),
((y,x,p) -> y .= p .* one.(only.(x)), p*(b-a)),
((y,x,p) -> y .= inv.(p .- cos.(only.(x))), (b-a)/sqrt(p^2-1)),
)
integrand = InplaceBatchIntegralFunction(f, zeros(1))
batchprob = IntegralProblem(integrand, (a, b), p; abstol)
for alg in (QuadGKJL(), QuadratureFunction(), AuxQuadGKJL())
@test ref ≈ solve(batchprob, alg).value atol=abstol
end
batchprob = IntegralProblem(integrand, AutoBZCore.Basis([b;;]), p; abstol)
for alg in (MonkhorstPack(), AutoSymPTRJL())
@test ref ≈ solve(batchprob, alg).value atol=abstol
end
end
end
@testset "multi-algorithms" begin
# NestedQuad
f(x, p) = 1.0 + p*sum(abs2 ∘ cos, x)
abstol=1e-3
p0 = 0.0
for dim in 1:3, alg in (QuadratureFunction(), QuadGKJL(), AuxQuadGKJL())
dom = CubicLimits(zeros(dim), 2pi*ones(dim))
prob = IntegralProblem(f, dom, p0; abstol)
ndalg = NestedQuad(alg)
cache = init(prob, ndalg)
for p in [5.0, 7.0]
cache.p = p
ref = (2pi)^dim + dim*p*pi*(2pi)^(dim-1)
@test ref ≈ solve!(cache).value atol=abstol
# TODO implement CommonSolveInplaceIntegralFunction
inplaceprob = IntegralProblem(InplaceIntegralFunction((y,x,p) -> y .= f(x,p), [0.0]), dom, p)
@test_broken [ref] ≈ solve(inplaceprob, ndalg, abstol=abstol).value atol=abstol
# TODO implement CommonSolveInplaceBatchIntegralFunction
batchprob = IntegralProblem(InplaceBatchIntegralFunction((y,x,p) -> y .= f.(x,Ref(p)), zeros(Float64, 1)), dom, p)
@test_broken ref ≈ solve(batchprob, ndalg, abstol=abstol).value atol=abstol
end
end
#=
# AbsoluteEstimate
est_alg = QuadratureFunction()
abs_alg = QuadGKJL()
alg = AbsoluteEstimate(est_alg, abs_alg)
ref_alg = MeroQuadGKJL()
f2(x, p) = inv(complex(p...) - cos(x))
prob = IntegralProblem(f2, 0.0, 2pi, (0.5, 1e-3))
abstol = 1e-5; reltol=1e-5
@test solve(prob, alg, reltol=reltol).value ≈ solve(prob, ref_alg, abstol=abstol).value atol=abstol
# EvalCounter
for prob in (
IntegralProblem((x, p) -> 1.0, 0, 1),
IntegralProblem(InplaceIntegrand((y, x, p) -> y .= 1.0, fill(0.0)), 0, 1),
IntegralProblem(BatchIntegrand((y, x, p) -> y .= 1.0, Float64), 0, 1)
)
# constant integrand should always use the same number of evaluations as the
# base quadrature rule
for (alg, numevals) in (
(QuadratureFunction(npt=10), 10),
(QuadGKJL(order=7), 15),
(QuadGKJL(order=9), 19),
)
prob.f isa BatchIntegrand && alg isa QuadGKJL && continue
@test solve(prob, EvalCounter(alg)).numevals == numevals
end
end
=#
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 440 | using Test
include("utils.jl")
@testset "aqua" include("aqua.jl")
@testset "interface" include("interface_tests.jl")
@testset "brillouin" include("brillouin.jl")
@testset "fourier" include("fourier.jl")
@testset "SymmetryReduceBZExt" include("test_ibz.jl")
@testset "UnitfulExt" include("unitfulext.jl")
@testset "AtomsBaseExt" include("atomsbaseext.jl")
@testset "WannierIOExt" include("wannierioext.jl")
@testset "DOS" include("dos.jl")
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 5817 | import SymmetryReduceBZ.Lattices: genlat_CUB, genlat_FCC, genlat_BCC,
genlat_TET, genlat_BCT, genlat_ORC, genlat_ORCF, genlat_ORCI, genlat_ORCC,
genlat_HEX, genlat_RHL, genlat_MCL, genlat_MCLC, genlat_TRI
import SymmetryReduceBZ.Symmetry: calc_bz, calc_ibz
import SymmetryReduceBZ.Utilities: volume, vertices, get_uniquefacets
using Polyhedra: Polyhedron
using AutoBZCore
using IteratedIntegration: nested_quad
using LinearAlgebra
using Test
"""
ph_vol(n, tri_idx, ph_vert)
Estimate volume of polyhedron by equispaced integration.
# Arguments
- `n::Int64`: Number of integration points in z dimension
- `tri_idx::Matrix{Int32}`: nt x 3 array of indices of vertices of nt triangles
forming a triangulation of the polyhedron
- `ph_vert::Matrix{Float64}`: nv x 3 array of coordinates of nv polyhedron
vertices. `ph_vert[tri_idx[i,j],:]` gives the xyz coordinates of the jth vertex
of the ith triangle.
# Returns
- `vol::Float64`: Estimated volume of polyhedron
"""
function ph_vol(n::Int64, face_coord, ph_vert::Matrix{Float64})
# function ph_vol(n::Int64, tri_idx::Matrix{Int32}, ph_vert::Matrix{Float64})
SymmetryReduceBZExt = Base.get_extension(AutoBZCore, :SymmetryReduceBZExt)
# Vertices of faces of polyhedron, given by their indices
# face_idx = SymmetryReduceBZExt.faces_from_triangles(tri_idx, ph_vert)
# Vertices of faces of polyhedron, given by their ordered coordinates
# face_coord = SymmetryReduceBZExt.face_coord_from_idx(face_idx, ph_vert)
# Estimate volume of polyhedron by equispaced integration
nz = n # Number of integration points in z dimension
vol = 0.0
zlims = SymmetryReduceBZExt.get_lim(ph_vert) # z limits of integration
dz = (zlims[2] - zlims[1]) / (nz + 1) # Integration step in z dimension
for iz = 1:nz
z = zlims[1] + iz * (zlims[2] - zlims[1]) / (nz + 1) # z coordinate
# Vertices of polygon formed by intersection of polyhedron with z plane,
# given by their ordered coordinates
pg_vert = SymmetryReduceBZExt.pg_vert_from_zslice(z, face_coord)
ylims = SymmetryReduceBZExt.get_lim(pg_vert) # y limits of integration
# Number of integration points in y dimension is scaled by ratio of y and z interval lengths
ny = round(nz * (ylims[2] - ylims[1]) / (zlims[2] - zlims[1]))
dy = (ylims[2] - ylims[1]) / (ny + 1) # Integration step in y dimension
for iy = 1:ny
y = ylims[1] + iy * (ylims[2] - ylims[1]) / (ny + 1) # y coordinate
xlims = SymmetryReduceBZExt.xlim_from_yslice(y, pg_vert) # x limits of integration
# Add volume of dy x dz rectangular prism of length x2-x1
vol += (xlims[2] - xlims[1]) * dy * dz
end
end
return vol
end
function test_vol(latvec::Matrix{Float64}, n::Int64)
# Generate IBZ
atom_types = [0]
atom_pos = Array([0 0 0]')
coordinates = "Cartesian"
makeprim = false
convention = "ordinary"
ibz = calc_ibz(latvec, atom_types, atom_pos, coordinates,
makeprim, convention)
# Get triangles and vertices of IBZ
# tri_idx = ibz.simplices
# ph_vert = ibz.points
ph_vert = permutedims(reduce(hcat, vertices(ibz)))
face_coord = map(x -> permutedims(reduce(hcat, x)), get_uniquefacets(ibz))
vol = ph_vol(n, face_coord, ph_vert) # Estimate volume of IBZ
# vol = ph_vol(n, tri_idx, ph_vert) # Estimate volume of IBZ
# println("Estimated volume: ", vol)
# println("Actual volume: ", ibz.volume)
return abs(vol - volume(ibz)) / volume(ibz) # Return relative error
end
function test_vol2(latvec::Matrix{Float64}, n::Int64)
atom_types = [0]
atom_pos = Array([0 0 0]')
coordinates = "Cartesian"
makeprim = false
convention = "ordinary"
ibz = calc_ibz(latvec, atom_types, atom_pos, coordinates,
makeprim, convention)
fbz = load_bz(FBZ(), latvec)
(dims = size(fbz.A, 1)) == size(fbz.A, 2) || error("lattice basis matrix not square")
ibz_hull = load_bz(IBZ(dims), fbz.A, fbz.B, atom_types, atom_pos, coordinates=coordinates)
vol_hull = nested_quad(x -> 1.0, ibz_hull.lims)[1]*det(fbz.B)/(2pi)^dims
# ibz_poly = load_bz(IBZ{dims,Polyhedron}(), fbz.A, fbz.B, atom_types, atom_pos, coordinates=coordinates)
# vol_poly = nested_quad(x -> 1.0, ibz_poly.lims)[1]*det(fbz.B)/(2pi)^dims
# the loaded ibz.lims is in fractional lattice coordinates, but needs rescaling to cartesian
# println("Reference volume: ", vol_poly)
# println("Estimated volume: ", vol_hull)
# println("Actual volume: ", ibz.volume)
return abs(volume(ibz) - vol_hull) / volume(ibz) # Return relative error
# return max(abs(ibz.volume - vol_hull) / ibz.volume, abs(ibz.volume - vol_poly) / ibz.volume) # Return relative error
end
@testset "IBZ volumes" begin
a = 1.0 # Lattice constant
b = 1.4 # Lattice constant
c = 1.2 # Lattice constant
alpha = pi / 6 # Lattice angle
beta = pi / 3 # Lattice angle
gamma = pi / 4 # Lattice angle
n = 1000 # Number of integration points in z dimension
# tol = 1e-2 # Relative error tolerance
for (routine, tol) in ((test_vol, 1e-2), (test_vol2, 1e-6))
# Estimate volumes of different lattices
@test routine(genlat_CUB(a), n) < tol
@test routine(genlat_FCC(a), n) < tol
@test routine(genlat_BCC(a), n) < tol
@test routine(genlat_TET(a, c), n) < tol
@test routine(genlat_BCT(a, c), n) < tol
@test routine(genlat_ORC(a, b, c), n) < tol
@test routine(genlat_ORCF(a, b, c), n) < tol
@test routine(genlat_ORCI(a, b, c), n) < tol
@test routine(genlat_ORCC(a, b, c), n) < tol
@test routine(genlat_HEX(a, c), n) < tol
@test routine(genlat_RHL(a, alpha), n) < tol
@test routine(genlat_MCL(a, b, c, alpha), n) < tol
@test routine(genlat_MCLC(a, b, c, alpha), n) < tol
@test routine(genlat_TRI(a, b, c, alpha, beta, gamma), n) < tol
end
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 363 | using Test
using Unitful
using AutoBZCore
using AutoBZCore: canonical_reciprocal_basis, canonical_ptr_basis
using LinearAlgebra: I
using StaticArrays
for A in [rand(3, 3) * u"m", rand(SMatrix{3,3,Float64,9})*u"m"]
B = canonical_reciprocal_basis(A)
@test B'A ≈ 2pi*I
pB = canonical_ptr_basis(B)
@test pB isa AutoBZCore.Basis
@test pB.B ≈ I
end | AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 230 | using OffsetArrays
function integer_lattice(n)
C = OffsetArray(zeros(ntuple(_ -> 3, n)), repeat([-1:1], n)...)
for i in 1:n, j in (-1, 1)
C[CartesianIndex(ntuple(k -> k == i ? j : 0, n))] = 1/2n
end
C
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | code | 588 | using Test
using WannierIO
using AutoBZCore
using SymmetryReduceBZ
using LinearAlgebra: det
# TODO use artefacts to provide an input wout file
path = joinpath(dirname(dirname(pathof(AutoBZCore))), "aps_example/svo.wout")
fbz = load_bz(FBZ(), path)
ibz = load_bz(IBZ(), path)
@test det(fbz.B) ≈ det(ibz.B)
fprob = AutoBZCore.AutoBZProblem((x,p) -> 1.0, fbz)
iprob = AutoBZCore.AutoBZProblem(TrivialRep(), IntegralFunction((x,p) -> 1.0), ibz)
for alg in (IAI(), PTR())
@test det(fbz.B) ≈ AutoBZCore.solve(fprob, alg).value
@test det(ibz.B) ≈ AutoBZCore.solve(iprob, alg).value
end
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | docs | 3256 | # AutoBZCore.jl
| Documentation | Build Status | Coverage | Version |
| :-: | :-: | :-: | :-: |
| [![][docs-stable-img]][docs-stable-url] | [![][action-img]][action-url] | [![][codecov-img]][codecov-url] | [![ver-img]][ver-url] |
| [![][docs-dev-img]][docs-dev-url] | [![][pkgeval-img]][pkgeval-url] | [![][aqua-img]][aqua-url] | [![deps-img]][deps-url] |
This package provides a common interface to integration algorithms that are
efficient and high-order accurate for computational tasks including
Brillouin-zone integration and Wannier interpolation. For further information on
integrand interfaces, including optimizations for Wannier interpolation, please see [the
documentation](https://lxvm.github.io/AutoBZCore.jl/stable/).
## Research and citation
If you use AutoBZCore.jl in your software or published research works, please
cite one, or, all of the following. Citations help to encourage the development
and maintenance of open-source scientific software.
- This repository: https://github.com/lxvm/AutoBZCore.jl
- Our paper on BZ integration: [Automatic, high-order, and adaptive algorithms
for Brillouin zone integration. Jason Kaye, Sophie Beck, Alex Barnett, Lorenzo
Van Muñoz, Olivier Parcollet. SciPost Phys. 15, 062
(2023)](https://scipost.org/SciPostPhys.15.2.062)
## Author and Copyright
AutoBZCore.jl was written by [Lorenzo Van Muñoz](https://web.mit.edu/lxvm/www/),
and is free/open-source software under the MIT license.
## Related packages
- [AutoBZ.jl](https://github.com/lxvm/AutoBZ.jl)
- [`wannier-berri`](https://github.com/wannier-berri/wannier-berri)
- [FourierSeriesEvaluators.jl](https://github.com/lxvm/FourierSeriesEvaluators.jl)
- [SymmetryReduceBZ.jl](https://github.com/jerjorg/SymmetryReduceBZ.jl)
- [AtomsBase.jl](https://github.com/qiaojunfeng/WannierIO.jl)
- [HDF5.jl](https://github.com/JuliaIO/HDF5.jl)
- [WannierIO.jl](https://github.com/qiaojunfeng/WannierIO.jl)
- [Integrals.jl](https://github.com/SciML/Integrals.jl)
- [Brillouin.jl](https://github.com/thchr/Brillouin.jl)
- [TightBinding.jl](https://github.com/cometscome/TightBinding.jl)
<!-- badges -->
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://lxvm.github.io/AutoBZCore.jl/stable/
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://lxvm.github.io/AutoBZCore.jl/dev/
[action-img]: https://github.com/lxvm/AutoBZCore.jl/actions/workflows/CI.yml/badge.svg?branch=main
[action-url]: https://github.com/lxvm/AutoBZCore.jl/actions/?query=workflow:CI
[pkgeval-img]: https://juliahub.com/docs/General/AutoBZCore/stable/pkgeval.svg
[pkgeval-url]: https://juliahub.com/ui/Packages/General/AutoBZCore
[codecov-img]: https://codecov.io/github/lxvm/AutoBZCore.jl/branch/main/graph/badge.svg
[codecov-url]: https://app.codecov.io/github/lxvm/AutoBZCore.jl
[aqua-img]: https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg
[aqua-url]: https://github.com/JuliaTesting/Aqua.jl
[ver-img]: https://juliahub.com/docs/AutoBZCore/version.svg
[ver-url]: https://juliahub.com/ui/Packages/AutoBZCore/UDEDl
[deps-img]: https://juliahub.com/docs/General/AutoBZCore/stable/deps.svg
[deps-url]: https://juliahub.com/ui/Packages/General/AutoBZCore?t=2
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | docs | 499 | [Scripts updated for AutoBZCore v0.4 and Julia v1.10]
Hello, to reproduce the code example in these
[slides](https://web.mit.edu/lxvm/www/slides/autobz_aps.pdf) follow these steps:
1. Install [Julia](https://julialang.org/), e.g. using [`juliaup`](https://github.com/JuliaLang/juliaup)
2. Check that the `julia` binary is on your path
3. `git clone https://github.com/lxvm/AutoBZCore.jl.git` and `cd AutoBZCore.jl/aps_example`
4. Run `julia aps_example.jl`, which takes about 5 minutes on a laptop | AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | docs | 1021 | # Algorithms
## `IntegralProblem` algorithms
```@docs
AutoBZCore.IntegralAlgorithm
```
### Quadrature
```@docs
AutoBZCore.QuadratureFunction
AutoBZCore.QuadGKJL
AutoBZCore.AuxQuadGKJL
AutoBZCore.ContQuadGKJL
AutoBZCore.MeroQuadGKJL
```
### Cubature
```@docs
AutoBZCore.HCubatureJL
AutoBZCore.MonkhorstPack
AutoBZCore.AutoSymPTRJL
```
### Meta-algorithms
```@docs
AutoBZCore.NestedQuad
```
## `AutoBZProblem` algorithms
In order to make algorithms domain-agnostic, the BZ loaded from
[`load_bz`](@ref) can be called with the algorithms below, which are aliases
for algorithms above
```@docs
AutoBZCore.AutoBZAlgorithm
AutoBZCore.IAI
AutoBZCore.TAI
AutoBZCore.PTR
AutoBZCore.AutoPTR
```
## `DOSProblem` algorithms
Currently the available algorithms are an initial release and we would like to include
the following reference algorithms that are also common in the literature in a future release:
- (Linear) Tetrahedron Method
- Adaptive Gaussian broadening
```@docs
AutoBZCore.DOSAlgorithm
AutoBZCore.GGR
```
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | docs | 6797 | # Examples
The following are several examples of how to use the algorithms and integrands
provided by AutoBZCore.jl.
For background on the essential interface see the [Problem definitions](@ref) page
## Green's function integration
A common integral appearing in [Dynamical mean-field
theory](https://en.wikipedia.org/wiki/Dynamical_mean-field_theory) is that of
the local Green's function:
```math
G(\omega) = \int d^d \vec{k}\ \operatorname{Tr} \left[ \left( \omega - H \left( \vec{k} \right) - \Sigma(\omega) \right)^{-1} \right].
```
For simplicity, we take ``\Sigma(\omega) = -i\eta``. We can define the integrand
as a function of ``\vec{k}`` and ``H`` and parameters ``\eta, \omega``.
```@example gloc
using LinearAlgebra
gloc_integrand(k, (; h, η, ω)) = tr(inv(complex(ω,η)*I-h(k)))
```
Here we use named tuple destructuring syntax to unpack a named tuple of
parameters in the function definition.
Commonly, ``H(\vec{k})`` is evaluated using Wannier interpolation, i.e. as a
Fourier series. For a simple tight-binding model, the integer lattice, the
Hamiltonian is given by
```math
H(k) = \cos(2\pi k) = \frac{1}{2} \left( e^{2\pi ik} + e^{-2\pi ik} \right)
```
We can use the built-in function `cos` to evaluate this, however, for more
complex Fourier series it becomes easier to use the representation in terms of
Fourier coefficients. Using the package
[FourierSeriesEvaluators.jl](https://github.com/lxvm/FourierSeriesEvaluators.jl),
we can define ``H(k) = \cos(2\pi k)`` by the following:
```@example gloc
using FourierSeriesEvaluators
h = FourierSeries([0.5, 0.0, 0.5]; period=1, offset=-2)
```
The coefficient values of ``1/2`` can be determined from Euler's formula, as
used in the expansion of ``\cos`` above, and the value of `offset` is chosen to
offset the coefficient array indices, `1:3` since Julia has 1-based indexing, to
correspond to values of ``n`` in the phase factors ``e^{2\pi i n k}`` used in
the Fourier series above, i.e. `-1:1`. Now we proceed to the define the
[`IntegralProblem`](@ref) and solve it with a generic adaptive
integration scheme, [`QuadGKJL`](@ref)
```@example gloc
using AutoBZCore
dom = (0, 1)
p = (; h, η=0.1, ω=0.0)
prob = IntegralProblem(gloc_integrand, dom, p)
alg = QuadGKJL()
solve(prob, alg; abstol=1e-3).value
```
## BZ integration
To perform integration over a Brillouin zone, we can load one using the
[`load_bz`](@ref) function and then construct an [`AutoBZProblem`](@ref) to
solve. Since the Brillouin zone may be reduced using point group symmetries, a
common optimization, it is also required to specify the symmetry representation
of the integrand. Continuing the previous example, the trace of the Green's
function has no band/orbital degrees of freedom and transforms trivially under
the point group, so it is a [`TrivialRep`](@ref). The previous calculation can
be replicated as:
```@example gloc
using AutoBZCore
bz = load_bz(FBZ(), 2pi*I(1))
p = (; h, η=0.1, ω=0.0)
prob = AutoBZProblem(TrivialRep(), IntegralFunction(gloc_integrand), bz, p)
alg = TAI()
solve(prob, alg; abstol=1e-3).value
```
Now we proceed to multi-dimensional integrals. In this case, Wannier
interpolation is much more efficient when Fourier series are evaluated one
variable at a time. To understand, this suppose we have a series defined by ``M
\times M`` coefficients (i.e. a 2d series) that we want to evaluate on an ``N
\times N`` grid. Naively evaluating the series at each grid
point will require ``\mathcal{O}(M^{2} N^{2})`` operations, however, we can
reduce the complexity by pre-evaluating certain coefficients as follows
```math
f(x, y) = \sum_{m,n=1}^{M} f_{nm} e^{i(nx + my)} = \sum_{n=1}^{M} e^{inx} \left( \sum_{m=1}^{M} f_{nm} e^{imy} \right) = \sum_{n=1}^{M} e^{inx} \tilde{f}_{n}(y)
```
This means we can evaluate the series on the grid in ``\mathcal{O}(M N^2 + M^2
N)`` operations. When ``N \gg M``, this is ``\mathcal{O}(M N^{2})`` operations,
which is comparable to the computational complexity of a [multi-dimensional
FFT](https://en.wikipedia.org/wiki/Fast_Fourier_transform#Multidimensional_FFTs).
Since the constants of a FFT may not be trivial, this scheme is competitive.
Let's use this with a Fourier series corresponding to
``H(\vec{k}) = \cos(2\pi k_{x}) + \cos(2\pi k_{y})``
and define a new method of `gloc_integrand` that accepts the (efficiently)
evaluated Fourier series in the second argument
```@example gloc
h = FourierSeries([0.0; 0.5; 0.0;; 0.5; 0.0; 0.5;; 0.0; 0.5; 0.0]; period=1, offset=-2)
gloc_integrand(k, h_k, (; η, ω)) = tr(inv(complex(ω,η)*I-h_k))
```
Similar to before, we construct an [`AutoBZCore.IntegralProblem`](@ref) and this time we
take the integration domain to correspond to the full Brillouin zone of a square
lattice with lattice vectors `2pi*I(2)`.
```@example gloc
integrand = FourierIntegralFunction(gloc_integrand, h)
bz = load_bz(FBZ(2), 2pi*I(2))
p = (; η=0.1, ω=0.0)
prob = AutoBZProblem(TrivialRep(), integrand, bz, p)
alg = IAI()
solve(prob, alg).value
```
This package provides several [`AutoBZProblem` algorithms](@ref) that we
can use to solve the multidimensional integral.
The [repo's demo](https://github.com/lxvm/AutoBZCore.jl/tree/main/aps_example)
on density of states provides a complete example of how to compute and
interpolate an integral as a function of its parameters using the [`init` and
`solve!`](@ref) interface
## Density of States
Computing the density of states (DOS) of a self-adjoint, or Hermitian, operator is a
related, but distinct problem to the integrals also presented in this package.
In fact, many DOS algorithms will compute integrals to approximate the DOS of an
operator by introducing an artificial broadening.
To handle the ``T=0^{+}`` limit of the broadening, we implement the well-known
[Gilat-Raubenheimer method](https://arxiv.org/abs/1711.07993) as an algorithm
for the [`AutoBZCore.DOSProblem`](@ref)
Using the [`AutoBZCore.init`](@ref) and [`AutoBZCore.solve!`](@ref) functions, it is possible to
construct a cache to solve a [`DOSProblem`](@ref) for several energies or
several Hamiltonians. As an example of solving for several energies,
```@example dos
using AutoBZCore, FourierSeriesEvaluators, StaticArrays
h = FourierSeries(SMatrix{1,1,Float64,1}.([0.5, 0.0, 0.5]), period=1.0, offset=-2)
E = 0.3
bz = load_bz(FBZ(), [2pi;;])
prob = DOSProblem(h, E, bz)
alg = GGR(; npt=100)
cache = init(prob, alg)
Es = range(-1, 1, length=10) * 1.1
data = map(Es) do E
cache.domain = E
solve!(cache).value
end
```
As an example of interchanging Hamiltonians, which must remain the same type, we
can double the energies, which will halve the DOS
```@example dos
cache.domain = E
sol1 = AutoBZCore.solve!(cache)
h.c .*= 2
cache.isfresh = true
cache.domain = 2E
sol2 = AutoBZCore.solve!(cache)
sol1.value ≈ 2sol2.value
``` | AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | docs | 626 | # Package extensions
## SymmetryReduceBZ.jl
Loading [SymmetryReduceBZ.jl](https://github.com/jerjorg/SymmetryReduceBZ.jl)
provides a specialized method of [`load_bz`](@ref) that when provided atom
species and positions can compute the [`IBZ`](@ref).
## WannierIO.jl
Loading [WannierIO.jl](https://github.com/qiaojunfeng/WannierIO.jl) provides a
specialized method of [`load_bz`](@ref) that loads the BZ defined in a
`seedname.wout` file.
## AtomsBase.jl
Loading [AtomsBase.jl](https://github.com/qiaojunfeng/WannierIO.jl) provides a
specialized method of [`load_bz`](@ref) to load the BZ of an `AtomsBase.AbstractSystem` | AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | docs | 40 | # AutoBZCore.jl
```@docs
AutoBZCore
``` | AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | docs | 439 | # Integrands
The design of AutoBZCore.jl uses multiple dispatch to provide multiple
interfaces for user integrands that allow various optimizations to be compatible
with a common interface for solvers.
```@docs
AutoBZCore.IntegralFunction
AutoBZCore.InplaceIntegralFunction
AutoBZCore.InplaceBatchIntegralFunction
AutoBZCore.CommonSolveIntegralFunction
AutoBZCore.FourierIntegralFunction
AutoBZCore.CommonSolveFourierIntegralFunction
``` | AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | docs | 1395 | # Problem definitions
The design of AutoBZCore.jl is heavily influenced by
[SciML](https://sciml.ai/) packages and uses the
[CommonSolve.jl](https://github.com/SciML/CommonSolve.jl)
interface. Eventually, this package may contribute to
[Integrals.jl](https://github.com/SciML/Integrals.jl).
## Problem interface
AutoBZCore.jl replicates the Integrals.jl interface, using an
[`IntegralProblem`](@ref) type to setup an integral from an
integrand, a domain, and parameters.
```@example prob
using AutoBZCore
f = (x,p) -> sin(p*x)
dom = (0, 1)
p = 0.3
prob = IntegralProblem(f, dom, p)
```
```@docs
AutoBZCore.IntegralProblem
AutoBZCore.NullParameters
```
## `solve`
To solve an integral problem, pick an algorithm and call [`solve`](@ref)
```@example prob
alg = QuadGKJL()
solve(prob, alg)
```
```@docs
AutoBZCore.solve
```
## `init` and `solve!`
To solve many problems with the same integrand but different domains or
parameters, use [`init`](@ref) to allocate a solver and
[`solve!`](@ref) to get the solution
```@example prob
solver = init(prob, alg)
solve!(solver).value
```
To solve again, update the parameters of the solver in place and `solve!` again
```@example prob
# solve again at a new parameter
solver.p = 0.4
solve!(solver).value
```
```@docs
AutoBZCore.init
AutoBZCore.solve!
```
## Additional problems
```@docs
AutoBZCore.AutoBZProblem
AutoBZCore.DOSProblem
```
| AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.4.1 | 2139fc2ca15c6e37970925821d9171cc070e970e | docs | 680 | # Function reference
The following symbols are exported by AutoBZCore.jl
## Brillouin-zone kinds
```@docs
AutoBZCore.load_bz
AutoBZCore.load_bz(::IBZ, A, B, species, positions)
AutoBZCore.AbstractBZ
AutoBZCore.FBZ
AutoBZCore.IBZ
AutoBZCore.InversionSymIBZ
AutoBZCore.CubicSymIBZ
```
## Symmetry representations
```@docs
AutoBZCore.AbstractSymRep
AutoBZCore.TrivialRep
AutoBZCore.UnknownRep
AutoBZCore.symmetrize
```
## Internal
The following docstrings belong to internal types and functions that may change between
versions of AutoBZCore.
```@docs
AutoBZCore.PuncturedInterval
AutoBZCore.HyperCube
AutoBZCore.SymmetricBZ
AutoBZCore.trapz
AutoBZCore.cube_automorphisms
``` | AutoBZCore | https://github.com/lxvm/AutoBZCore.jl.git |
|
[
"MIT"
] | 0.6.1 | b85bfbdf2e5391f74c6260d50bb6d0853e484da9 | code | 1555 | module RheaReactions
using HTTP, JSON, DocStringExtensions, Term, Scratch, Serialization
# cache data using Scratch.jl
CACHE_LOCATION::String = ""
#=
Update these cache directories, this is where each cache type gets stored.
These directories are saved to in e.g. _cache("reaction", rid, rr) in utils.jl
=#
const CACHE_DIRS = ["reaction", "reaction_metabolites", "uniprot_reactions", "ec_reactions", "quartet"]
function __init__()
global CACHE_LOCATION = @get_scratch!("rhea_data")
for dir in CACHE_DIRS
!isdir(joinpath(CACHE_LOCATION, dir)) && mkdir(joinpath(CACHE_LOCATION, dir))
end
if isfile(joinpath(CACHE_LOCATION, "version.txt"))
vnum = read(joinpath(CACHE_LOCATION, "version.txt"))
if String(vnum) != string(Base.VERSION)
Term.tprint("""
{red} Caching uses Julia's serializer, which is incompatible
between different versions of Julia. Please clear the cache with
`clear_cache!()` before proceeding. {/red}
""")
end
else
write(joinpath(CACHE_LOCATION, "version.txt"), string(Base.VERSION))
end
end
const Maybe{T} = Union{T,Nothing}
const endpoint_url = "https://sparql.rhea-db.org/sparql"
include("types.jl")
include("cache.jl")
include("sparql.jl")
include("utils.jl")
export get_reaction,
get_reaction_metabolites,
get_reactions_with_ec,
get_reactions_with_metabolites,
get_reactions_with_uniprot_id,
get_reaction_quartet,
clear_cache!
end
| RheaReactions | https://github.com/stelmo/RheaReactions.jl.git |
|
[
"MIT"
] | 0.6.1 | b85bfbdf2e5391f74c6260d50bb6d0853e484da9 | code | 903 | """
$(TYPEDSIGNATURES)
Clear the entire cache.
"""
clear_cache!() = begin
for dir in readdir(CACHE_LOCATION)
rm(joinpath(CACHE_LOCATION, dir), recursive = true)
dir != "version.txt" && mkdir(joinpath(CACHE_LOCATION, dir)) # add back the empty dir
end
write(joinpath(CACHE_LOCATION, "version.txt"), string(Base.VERSION))
Term.tprint("{blue} Cache cleared! {/blue}")
end
"""
$(TYPEDSIGNATURES)
Checks if the reaction has been cached.
"""
_is_cached(database::String, id) =
isfile(joinpath(RheaReactions.CACHE_LOCATION, database, string(id)))
"""
$(TYPEDSIGNATURES)
Return the cached reaction object.
"""
_get_cache(database::String, id) =
deserialize(joinpath(CACHE_LOCATION, database, string(id)))
"""
$(TYPEDSIGNATURES)
Cache reaction object.
"""
_cache(database::String, id, item) =
serialize(joinpath(CACHE_LOCATION, database, string(id)), item)
| RheaReactions | https://github.com/stelmo/RheaReactions.jl.git |
|
[
"MIT"
] | 0.6.1 | b85bfbdf2e5391f74c6260d50bb6d0853e484da9 | code | 4250 | #=
SPARQL queries
Thanks Mirek for helping!
=#
_reaction_body(rid::Int64) = """
PREFIX rh: <http://rdf.rhea-db.org/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT *
WHERE {
rh:$rid rh:id ?id ;
rh:equation ?eqn ;
rh:accession ?acc ;
rh:status ?status .
OPTIONAL {rh:$rid rh:name ?name }.
OPTIONAL {rh:$rid rh:ec ?ec }.
OPTIONAL {rh:$rid rh:isTransport ?istrans}.
OPTIONAL {rh:$rid rh:isChemicallyBalanced ?isbal }.
}
"""
_metabolite_stoichiometry_body(rid::Int64) = """
PREFIX rh: <http://rdf.rhea-db.org/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT *
WHERE {
{ rh:$rid rh:bidirectionalReaction / rh:substratesOrProducts ?SoP } .
?SoP ?contains [rh:compound ?cmp] .
?contains rh:coefficient ?coef .
?cmp rh:id ?id ;
rh:accession ?acc .
OPTIONAL {?cmp rh:reactivePart ?rp .
?rp rh:formula ?rpformula ;
rh:charge ?rpcharge ;
rh:chebi ?rpchebi .} .
OPTIONAL {?cmp rh:charge ?charge}.
OPTIONAL {?cmp rh:name ?name}.
OPTIONAL {?cmp rh:formula ?formula}.
}
"""
function _reaction_metabolite_matches_body(
substrate_ids::Vector{Int64},
product_ids::Vector{Int64},
)
substrates = join(
[
"""
VALUES (?chebi_s$idx) { (CHEBI:$id) }
OPTIONAL { ?chebi_s$idx up:name ?name_s$idx} .
?rhea rh:side ?reactionSide_S .
?rhea rh:accession ?acc_s$idx .
?reactionSide_S rh:contains / rh:compound / rh:chebi ?chebi_s$idx .
""" for (idx, id) in enumerate(substrate_ids)
],
"\n",
)
products = join(
[
"""
VALUES (?chebi_p$idx) { (CHEBI:$id) }
OPTIONAL { ?chebi_p$idx up:name ?name_p$idx }.
?rhea rh:side ?reactionSide_P .
?rhea rh:accession ?acc_p$idx .
?reactionSide_P rh:contains / rh:compound / rh:chebi ?chebi_p$idx .
""" for (idx, id) in enumerate(product_ids)
],
"\n",
)
"""
PREFIX rh: <http://rdf.rhea-db.org/>
PREFIX CHEBI: <http://purl.obolibrary.org/obo/CHEBI_>
PREFIX up: <http://purl.uniprot.org/core/>
SELECT *
WHERE {
$substrates
$products
?reactionSide_S rh:transformableTo ?reactionSide_P .
?rhea rh:equation ?eqn ;
rh:status ?status ;
rh:id ?id ;
rh:accession ?acc .
OPTIONAL {?rhea rh:name ?name }.
OPTIONAL {?rhea rh:ec ?ec }.
OPTIONAL {?rhea rh:isTransport ?istrans}.
OPTIONAL {?rhea rh:isChemicallyBalanced ?isbal }.
}
"""
end
_uniprot_reviewed_rhea_mapping_body(uid::String) = """
PREFIX rh: <http://rdf.rhea-db.org/>
PREFIX up: <http://purl.uniprot.org/core/>
SELECT *
WHERE {
SERVICE <https://sparql.uniprot.org/sparql> {
<http://purl.uniprot.org/uniprot/$(escape_string(uid))> up:annotation/up:catalyticActivity/up:catalyzedReaction ?rhea .
}
?rhea rh:accession ?accession .
}
"""
_ec_rhea_mapping_body(ec::String) = """
PREFIX rh: <http://rdf.rhea-db.org/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX ec: <http://purl.uniprot.org/enzyme/>
SELECT ?ec ?rhea ?accession
WHERE {
?rhea rdfs:subClassOf rh:Reaction .
?rhea rh:accession ?accession .
?rhea rh:ec ?ec;
rh:ec <http://purl.uniprot.org/enzyme/$(escape_string(ec))> .
}
"""
_from_directional_reaction(rid::Int64) = """
PREFIX rh: <http://rdf.rhea-db.org/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT *
WHERE {
?rxn rh:directionalReaction rh:$rid .
?rxn rh:accession ?accession .
}
"""
_from_bidirectional_reaction(rid::Int64) = """
PREFIX rh: <http://rdf.rhea-db.org/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT *
WHERE {
?rxn rh:bidirectionalReaction rh:$rid .
?rxn rh:accession ?accession .
}
"""
_from_reference_reaction(rid::Int64) = """
PREFIX rh: <http://rdf.rhea-db.org/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT *
WHERE {
{
rh:$rid rh:directionalReaction ?rxn .
?rxn rh:accession ?accession .
}
UNION {
rh:$rid rh:bidirectionalReaction ?rxn .
?rxn rh:accession ?accession .
}
}
""" | RheaReactions | https://github.com/stelmo/RheaReactions.jl.git |
|
[
"MIT"
] | 0.6.1 | b85bfbdf2e5391f74c6260d50bb6d0853e484da9 | code | 701 | """
$(TYPEDEF)
A struct for storing Rhea reaction information. Does not store the metabolite
information.
$(FIELDS)
"""
@with_repr mutable struct RheaReaction
id::Int64
equation::String
status::String
accession::String
name::Maybe{String}
ec::Maybe{Vector{String}} # multiple ECs can be assigned to a single reaction
istransport::Bool
isbalanced::Bool
end
RheaReaction() = RheaReaction(0, "", "", "", nothing, nothing, false, false)
"""
$(TYPEDEF)
A struct for storing Rhea metabolite information.
$(FIELDS)
"""
@with_repr struct RheaMetabolite
id::Int64
accession::String
name::Maybe{String}
charge::Maybe{Int64}
formula::Maybe{String}
end
| RheaReactions | https://github.com/stelmo/RheaReactions.jl.git |
|
[
"MIT"
] | 0.6.1 | b85bfbdf2e5391f74c6260d50bb6d0853e484da9 | code | 9225 |
"""
$(TYPEDSIGNATURES)
Shortcut for `get(get(dict, key1, Dict()), key2, nothing)`.
"""
_double_get(dict, key1, key2; default = nothing) =
get(get(dict, key1, Dict()), key2, default)
"""
$(TYPEDSIGNATURES)
A simple SPARQL query that returns all the data matching `query` from the
Rhea endpoint. Returns `nothing` if the query errors. Can retry at most
`max_retries` before giving up.
"""
function _request_data(query; max_retries = 5)
retry_counter = 0
req = nothing
while retry_counter <= max_retries
retry_counter += 1
try
req = HTTP.request(
"POST",
endpoint_url,
[
"Accept" => "application/sparql-results+json",
"Content-type" => "application/x-www-form-urlencoded",
],
Dict("query" => query),
)
catch
req = nothing
end
end
return req
end
"""
$(TYPEDSIGNATURES)
Parse a json string returned by a rhea request into a dictionary.
"""
function _parse_json(unparsed_json)
parsed_json = Dict{String,Vector{String}}()
!haskey(unparsed_json, "results") && return parsed_json
!haskey(unparsed_json["results"], "bindings") && return parsed_json
unparsed_json["results"]["bindings"]
end
"""
$(TYPEDSIGNATURES)
Combine [`_request_data`](@ref) with [`_parse_json`](@ref).
"""
function _parse_request(args...; kwargs...)
req = _request_data(args...; kwargs...)
isnothing(req) && return nothing
preq = _parse_json(JSON.parse(String(req.body)))
isempty(preq) ? nothing : preq
end
"""
$(TYPEDSIGNATURES)
Get reaction data for Rhea id `rid`. Returns a dictionary mapping URIs to
values. This function is cached automatically by default, use `should_cache` to
change this behavior.
"""
function get_reaction(rid::Int64; should_cache = true)
_is_cached("reaction", rid) && return _get_cache("reaction", rid)
rxns = _parse_request(_reaction_body(rid))
isnothing(rxns) && return nothing
rxn = first(rxns)
rr = RheaReaction()
for rxn in rxns
rr.id = parse(Int64, rxn["id"]["value"])
rr.equation = rxn["eqn"]["value"]
rr.status = rxn["status"]["value"]
rr.accession = rxn["acc"]["value"]
rr.name = _double_get(rxn, "name", "value")
ec = _double_get(rxn, "ec", "value")
!isnothing(ec) && (rr.ec = isnothing(rr.ec) ? [ec] : push!(rr.ec, ec))
rr.istransport = rxn["istrans"]["value"] == "true"
rr.isbalanced = rxn["isbal"]["value"] == "true"
end
should_cache && _cache("reaction", rid, rr)
return rr
end
"""
$(TYPEDSIGNATURES)
Return the reaction metabolite data of Rhea reaction id `rid`. This function is
cached automatically by default, use `should_cache` to change this behavior.
# Note
Charge defaults to `nothing` if an unexpected input is encountered. Likewise,
the stoichiometric coefficient defaults to `999` if a non-numeric input is
encountered. It does not return `nothing`, since the coefficient is also used to
store if the metabolite is a substrate or product. These cases crop up with
polymeric reactions.
Some compounds are GENERIC, strip the reactive part from these and report the
reactive part's ChEBI, charge, and formula.
"""
function get_reaction_metabolites(rid::Int64; should_cache = true)
_is_cached("reaction_metabolites", rid) &&
return _get_cache("reaction_metabolites", rid)
rids = get_reaction_quartet(rid)
compounds = []
for rid in rids # the sparql query only works with the reference reaction, not the directional ones
compounds = RheaReactions._parse_request(RheaReactions._metabolite_stoichiometry_body(rid))
!isnothing(compounds) && break
end
isnothing(compounds) && return nothing
compound_stoichs = Vector{Tuple{Float64, RheaMetabolite}}();
for compound in compounds
#=
If compound ID is GENERIC:xxx then assume only the reactive part "counts".
Strip out the ChEBI ID, charge, and formula from the reactive part.
=#
id = compound["acc"]["value"]
charge_id = startswith(id, "GENERIC") ? "rpcharge" : "charge"
formula_id = startswith(id, "GENERIC") ? "rpformula" : "formula"
accession = startswith(id, "GENERIC") ? last(split(replace(compound["rpchebi"]["value"], "_" => ":"),"/")) : id
_charge = RheaReactions._double_get(compound, charge_id, "value") # could be nothing
#=
Polymeric compounds return charge as a function of n, ignore these.
This implementation then assumes the charge of a compound is never higher/lower than ±9.
=#
charge = isnothing(_charge) || length(_charge) > 2 ? nothing : parse(Int64, _charge)
m = RheaMetabolite(
parse(Int64, compound["id"]["value"]),
accession,
RheaReactions._double_get(compound, "name", "value"),
charge,
RheaReactions._double_get(compound, formula_id, "value"),
)
#=
If coefficient is N or N+1, then return 999 with the sign denoting substrate or product.
=#
_coef = startswith(compound["coef"]["value"], "N") ? "999" : compound["coef"]["value"]
coef = parse(Float64, _coef) * (endswith(compound["SoP"]["value"], "_L") ? -1.0 : 1.0)
push!(compound_stoichs, (coef, m))
end
should_cache && _cache("reaction_metabolites", rid, compound_stoichs)
return compound_stoichs
end
"""
$(TYPEDSIGNATURES)
Return a dictionary of reactions where the ChEBI metabolite IDs in
`substrate_ids` and `product_ids` appear on opposite sides of the reaction.
"""
function get_reactions_with_metabolites(
substrate_ids::Vector{Int64},
product_ids::Vector{Int64},
)
rxns = _parse_request(_reaction_metabolite_matches_body(substrate_ids, product_ids))
isnothing(rxns) && return nothing
rr_rxns = Dict{Int64,RheaReaction}()
for rxn in rxns
id = parse(Int64, rxn["id"]["value"])
if !haskey(rr_rxns, id)
rr_rxns[id] = RheaReaction()
end
rr = rr_rxns[id]
rr.id = parse(Int64, rxn["id"]["value"])
rr.equation = rxn["eqn"]["value"]
rr.status = rxn["status"]["value"]
rr.accession = rxn["acc"]["value"]
rr.name = _double_get(rxn, "name", "value")
ec = _double_get(rxn, "ec", "value")
!isnothing(ec) && (rr.ec = isnothing(rr.ec) ? [ec] : push!(rr.ec, ec))
rr.istransport = rxn["istrans"]["value"] == "true"
rr.isbalanced = rxn["isbal"]["value"] == "true"
end
return rr_rxns
end
"""
$(TYPEDSIGNATURES)
Return the accession number associated with each element in `elements`.
"""
function _get_accessions(elements)
xs = Int64[]
for element in elements
x = _double_get(element, "accession", "value")
isnothing(x) && continue
push!(xs, parse(Int64, last(split(x, ":"))))
end
return xs
end
"""
$(TYPEDSIGNATURES)
Return a list of reactions that are associated with the Uniprot ID `uniprot_id`.
"""
function get_reactions_with_uniprot_id(uniprot_id::String; should_cache = true)
_is_cached("uniprot_reactions", uniprot_id) &&
return _get_cache("uniprot_reactions", uniprot_id)
elements = _parse_request(_uniprot_reviewed_rhea_mapping_body(uniprot_id))
isnothing(elements) && return nothing
uid_to_rhea = _get_accessions(elements)
should_cache && _cache("uniprot_reactions", uniprot_id, uid_to_rhea)
return uid_to_rhea
end
"""
$(TYPEDSIGNATURES)
Return a list of all Rhea reaction IDs that map to a specific EC number `ec`.
"""
function get_reactions_with_ec(ec::String; should_cache = true)
_is_cached("ec_reactions", ec) && return _get_cache("ec_reactions", ec)
elements = _parse_request(_ec_rhea_mapping_body(ec))
isnothing(elements) && return nothing
ec_to_rheas = _get_accessions(elements)
should_cache && _cache("ec_reactions", ec, ec_to_rheas)
return unique(ec_to_rheas)
end
"""
$(TYPEDSIGNATURES)
Return a list of the reference, directional (x2), and bidirectional reactions
associated with `rid`. This is useful if you want to find the reactions
catalyzing the same transformation, but with different directions.
"""
function get_reaction_quartet(rid::Int64; should_cache = true)
_is_cached("quartet", rid) && return _get_cache("quartet", rid)
ref_solution = -1
elements = RheaReactions._parse_request(RheaReactions._from_directional_reaction(rid))
if !isnothing(elements)
ref_solution = first(RheaReactions._get_accessions(elements))
end
elements = RheaReactions._parse_request(RheaReactions._from_bidirectional_reaction(rid))
if !isnothing(elements)
ref_solution = first(RheaReactions._get_accessions(elements))
end
ref_solution = ref_solution == -1 ? rid : ref_solution
elements = RheaReactions._parse_request(RheaReactions._from_reference_reaction(ref_solution))
other_rxns = RheaReactions._get_accessions(elements)
quartet = [ref_solution; other_rxns]
should_cache && _cache("quartet", rid, quartet)
return quartet
end | RheaReactions | https://github.com/stelmo/RheaReactions.jl.git |
|
[
"MIT"
] | 0.6.1 | b85bfbdf2e5391f74c6260d50bb6d0853e484da9 | code | 489 | @testset "PPS metabolites" begin
#=
PPS
Phosphoenolpyruvate synthetase
rhea id 11364
ATP + H2O + pyruvate = AMP + 2 H+ + phosphate + phosphoenolpyruvate
=#
rid = 11364
coef_met = get_reaction_metabolites(rid)
@test length(coef_met) == 7
# find (H+)
idx = findfirst(x -> x[2].name == "H(+)", coef_met)
h = coef_met[idx][2]
@test h.id == 3249
@test h.accession == "CHEBI:15378"
@test h.charge == 1
@test h.formula == "H"
end
| RheaReactions | https://github.com/stelmo/RheaReactions.jl.git |
|
[
"MIT"
] | 0.6.1 | b85bfbdf2e5391f74c6260d50bb6d0853e484da9 | code | 1158 | @testset "Reaction matches" begin
#=
Similar to the example: require CHEBI:29985 (L-glutamate) and CHEBI:58359
(L-glutamine) to be on opposite sides of the reaction.
=#
substrate_ids = [29985]
product_ids = [58359]
rxns = RheaReactions.get_reactions_with_metabolites(substrate_ids, product_ids)
@test length(rxns) == 31
@test rxns[13237].id == 13237
@test rxns[13237].status == "http://rdf.rhea-db.org/Approved"
end
@testset "Reaction EC matches" begin
rxns = get_reactions_with_ec("2.5.1.49")
@test issetequal(rxns, [10048, 27822])
end
@testset "Reaction Uniprot matches" begin
rxns = get_reactions_with_uniprot_id("P30085")
@test issetequal(rxns, [24400, 11600, 18113, 44640, 25094])
end
@testset "Reaction quartet" begin
quartet = [10736, 10737, 10738, 10739]
rxns = get_reaction_quartet(quartet[1])
@test issetequal(rxns, quartet)
rxns = get_reaction_quartet(quartet[2])
@test issetequal(rxns, quartet)
rxns = get_reaction_quartet(quartet[3])
@test issetequal(rxns, quartet)
rxns = get_reaction_quartet(quartet[4])
@test issetequal(rxns, quartet)
end
| RheaReactions | https://github.com/stelmo/RheaReactions.jl.git |
|
[
"MIT"
] | 0.6.1 | b85bfbdf2e5391f74c6260d50bb6d0853e484da9 | code | 1147 | @testset "PPS reaction" begin
#=
PPS
Phosphoenolpyruvate synthetase
rhea id 11364
ATP + H2O + pyruvate = AMP + 2 H+ + phosphate + phosphoenolpyruvate
=#
rid = 11364
rxn = get_reaction(rid)
@test rxn.id == rid
@test rxn.equation ==
"ATP + H2O + pyruvate = AMP + 2 H(+) + phosphate + phosphoenolpyruvate"
@test rxn.accession == "RHEA:11364"
@test rxn.status == "http://rdf.rhea-db.org/Approved"
@test isnothing(rxn.name)
@test rxn.ec == ["http://purl.uniprot.org/enzyme/2.7.9.2"]
@test !rxn.istransport
@test rxn.isbalanced
end
@testset "CMP kinase reaction" begin
#=
CMP kinase
rhea id 11600
ATP + CMP = ADP + CDP
=#
rid = 11600
rxn = get_reaction(rid)
@test rxn.id == rid
@test rxn.equation == "ATP + CMP = ADP + CDP"
@test rxn.accession == "RHEA:11600"
@test rxn.status == "http://rdf.rhea-db.org/Approved"
@test isnothing(rxn.name)
@test rxn.ec == [
"http://purl.uniprot.org/enzyme/2.7.4.14",
"http://purl.uniprot.org/enzyme/2.7.4.25",
]
@test !rxn.istransport
@test rxn.isbalanced
end
| RheaReactions | https://github.com/stelmo/RheaReactions.jl.git |
|
[
"MIT"
] | 0.6.1 | b85bfbdf2e5391f74c6260d50bb6d0853e484da9 | code | 182 | using RheaReactions
using Test
@testset "RheaReactions.jl" begin
clear_cache!()
include("reactions.jl")
include("metabolites.jl")
include("reaction_matches.jl")
end
| RheaReactions | https://github.com/stelmo/RheaReactions.jl.git |
|
[
"MIT"
] | 0.6.1 | b85bfbdf2e5391f74c6260d50bb6d0853e484da9 | docs | 1926 | # RheaReactions.jl
[repostatus-url]: https://www.repostatus.org/#active
[repostatus-img]: https://www.repostatus.org/badges/latest/active.svg
[![repostatus-img]][repostatus-url]
This is a simple package you can use to query Rhea reactions and associated
annotations. Its primary use is in reconstructing metabolic models. It caches
all requests by default, speeding up repeated calls where appropriate.
```julia
using RheaReactions # load module
get_reaction(11364) # Rhea reaction ID 11364
```
You can also get the metabolites associated with that reaction:
```julia
# [(coefficient, metabolite), ...]
coeff_mets = get_reaction_metabolites(11364) # Rhea reaction ID 11364
```
And look at each metabolite individually:
```julia
coeff_mets[1][2] # metabolite
```
You can also look for all reactions that have a certain set of metabolite
substrates and products. This function looks for all reactions that have both
CHEBI:29985 (L-glutamate) and CHEBI:58359 (L-glutamine) on opposite sides
of the reaction:
```julia
substrate_ids = [29985,]
product_ids = [58359,]
get_reactions_with_metabolites(
substrate_ids,
product_ids,
) # NB: not cached!
```
You can also look for Rhea reactions associated with a specific Uniprot ID:
```julia
get_reactions_with_uniprot_id("P30085")
```
You can look for all Rhea reaction IDs that map to a specific EC number:
```julia
get_reactions_with_ec("2.5.1.49")
```
Rhea reactions are typically broken into quartets.: one reference reaction, two
directional reactions, and one bidirectional reaction. You can find all four
reactions given any single reaction:
```julia
get_reaction_quartet(11364) # Rhea reaction ID 11364
```
You can test the package with:
```julia
] test
```
### Troubleshooting
The cache can be source of subtle issues. If you get errors or unexpected behavior do:
1. `clear_cache!()`,
2. Restart the Julia session.
If you still get errors, please file an issue!
| RheaReactions | https://github.com/stelmo/RheaReactions.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 2442 | """
Pulse Input DDM
A julia module for fitting bounded accumlator models using behavioral
and/or neural data from pulse-based evidence accumlation tasks.
"""
module PulseInputDDM
using DocStringExtensions
using StatsBase, Distributions, LineSearches
using ForwardDiff, Distributed, LinearAlgebra
using Optim, DSP, SpecialFunctions, MAT, Random
using Discretizers, ImageFiltering
using ForwardDiff: value
using PositiveFactorizations, Parameters, Flatten
using Polynomials, Missings
using HypothesisTests, TaylorSeries
using BasisFunctionExpansions
import StatsFuns: logistic, logit, softplus, xlogy
import Base.rand
import Base.Iterators: partition
import Flatten: flattenable
export choiceDDM, θchoice, θz
export neuralDDM, θneural, θy, neural_options, neuraldata
export save_model
export Sigmoid, Softplus
export noiseless_neuralDDM, θneural_noiseless, neural_options_noiseless
export neural_poly_DDM
export θneural_choice
export neural_choiceDDM, θneural_choice, neural_choice_options
export fit
export dimz
export likelihood, choice_loglikelihood, joint_loglikelihood
export choice_optimize, choice_neural_optimize, choice_likelihood
export simulate_expected_firing_rate, reload_neural_data
export loglikelihood, synthetic_data
export CIs, optimize, Hessian, gradient
export load_choice_data, reload_neural_model, save_neural_model, flatten
export save, load, reload_choice_model, save_choice_model
export reload_joint_model
export initalize
export synthetic_clicks, binLR, bin_clicks
export default_parameters_and_data, compute_LL
export mean_exp_rate_per_trial, mean_exp_rate_per_cond
export process_spike_data
export train_and_test, all_Softplus, save_choice_data
export load_neural_data
include("types.jl")
include("choice_model/types.jl")
include("neural_model/types.jl")
include("neural-choice_model/types.jl")
include("base_model.jl")
include("utils.jl")
include("optim_funcs.jl")
include("sample_model.jl")
include("choice_model/choice_model.jl")
include("choice_model/sample_model.jl")
include("choice_model/process_data.jl")
include("choice_model/IO.jl")
include("neural_model/neural_model.jl")
include("neural_model/sample_model.jl")
include("neural_model/process_data.jl")
include("neural_model/initalize.jl")
include("neural_model/RBF_model.jl")
include("neural-choice_model/neural-choice_model.jl")
include("neural-choice_model/sample_model.jl")
include("neural-choice_model/neural-choice_model-ALT.jl")
end
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 10544 |
const dimz = 7
"""
CIs(H)
Given a Hessian matrix `H`, compute the 2 std confidence intervals based on the Laplace approximation.
If `H` is not positive definite (which it should be, but might not be due numerical round off, etc.) compute
a close approximation to it by adding a correction term. The magnitude of this correction is reported.
"""
function CIs(H::Array{Float64,2}) where T <: DDM
HPSD = Matrix(cholesky(Positive, H, Val{false}))
if !isapprox(HPSD,H)
norm_ϵ = norm(HPSD - H)/norm(H)
@warn "Hessian is not positive definite. Approximated by closest PSD matrix.
||ϵ||/||H|| is $norm_ϵ"
end
CI = 2*sqrt.(diag(inv(HPSD)))
return CI, HPSD
end
"""
P, M, xc, dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt)
Creates several variables that are required to compute the LL for each trial, but that
are identical for all trials.
## PARAMETERS:
- σ2_i initial variance
- B bound height
- λ drift
- σ2_a accumlator variance
- n number of bins
- dt temporal bin width
## RETURNS:
- P A vector. Discrete approximation to P(a).
- M A n x n matrix. The transition matrix of P(a_t | a_{t-1})
- xc A vector. Spatial bin centers
- dx Scalar. The spacing between spatial bins.
## EXAMPLE CALL:
```jldoctest
```
"""
function initialize_latent_model(σ2_i::TT, B::TT, λ::TT, σ2_a::TT,
n::Int, dt::Float64) where {TT <: Any}
xc,dx = bins(B,n)
P = P0(σ2_i,n,dx,xc,dt)
M = transition_M(σ2_a*dt,λ,zero(TT),dx,xc,n,dt)
return P, M, xc, dx
end
function initialize_latent_model(σ2_i::TT, B::TT, λ::TT, σ2_a::TT,
dx::Float64, dt::Float64) where {TT <: Any}
xc,n = bins(B,dx)
P = P0(σ2_i,n,dx,xc,dt)
M = transition_M(σ2_a*dt,λ,zero(TT),dx,xc,n,dt)
return P, M, xc, n
end
"""
P0(σ2_i, n dx, xc, dt)
"""
function P0(σ2_i::TT, n::Int, dx::VV, xc::Vector{TT}, dt::Float64) where {TT,VV <: Any}
P = zeros(TT,n)
P[ceil(Int,n/2)] = one(TT)
M = transition_M(σ2_i,zero(TT),zero(TT),dx,xc,n,dt)
P = M * P
end
"""
"""
function ΣLR_ΔLR(t::Int, nL::Vector{Int}, nR::Vector{Int},
La::Vector{TT}, Ra::Vector{TT}) where {TT <: Any}
any(t .== nL) ? sL = sum(La[t .== nL]) : sL = zero(TT)
any(t .== nR) ? sR = sum(Ra[t .== nR]) : sR = zero(TT)
sL + sR, -sL + sR
end
"""
backward_one_step!(P, F, λ, σ2_a, σ2_s, t, nL, nR, La, Ra, M, dx, xc, n, dt)
"""
function backward_one_step!(P::Vector{TT}, F::Array{TT,2}, λ::TT, σ2_a::TT, σ2_s::TT,
t::Int, nL::Vector{Int}, nR::Vector{Int},
La::Vector{TT}, Ra::Vector{TT}, M::Array{TT,2},
dx::UU, xc::Vector{TT}, n::Int, dt::Float64) where {TT,UU <: Any}
Σ, μ = ΣLR_ΔLR(t, nL, nR, La, Ra)
σ2 = σ2_s * Σ
if Σ > zero(TT)
transition_M!(F,σ2+σ2_a*dt,λ, μ, dx, xc, n, dt)
P = F' * P
else
P = M' * P
end
return P, F
end
"""
latent_one_step_alt!(P, F, λ, σ2_a, σ2_s, t, nL, nR, La, Ra, M, dx, xc, n, dt)
"""
function latent_one_step_alt!(alpha::Vector{TT}, F::Array{TT,2}, λ::TT, σ2_a::TT, σ2_s::TT,
t::Int, nL::Vector{Int}, nR::Vector{Int},
La::Vector{TT}, Ra::Vector{TT}, M::Array{TT,2},
dx::UU, xc::Vector{TT}, n::Int, dt::Float64) where {TT,UU <: Any}
mm = maximum(alpha)
any(t .== nL) ? sL = sum(La[t .== nL]) : sL = zero(TT)
any(t .== nR) ? sR = sum(Ra[t .== nR]) : sR = zero(TT)
σ2 = σ2_s * (sL + sR); μ = -sL + sR
if (sL + sR) > zero(TT)
transition_M!(F,σ2+σ2_a*dt,λ, μ, dx, xc, n, dt)
alpha = log.((exp.(alpha .- mm)' * F')') .+ mm
else
alpha = log.((exp.(alpha .- mm)' * M')') .+ mm
end
return alpha, F
end
"""
latent_one_step!(P, F, λ, σ2_a, σ2_s, t, nL, nR, La, Ra, M, dx, xc, n, dt)
"""
function latent_one_step!(P::Vector{TT}, F::Array{TT,2}, λ::TT, σ2_a::TT, σ2_s::TT,
t::Int, nL::Vector{Int}, nR::Vector{Int},
La::Vector{TT}, Ra::Vector{TT}, M::Array{TT,2},
dx::UU, xc::Vector{TT}, n::Int, dt::Float64) where {TT,UU <: Any}
any(t .== nL) ? sL = sum(La[t .== nL]) : sL = zero(TT)
any(t .== nR) ? sR = sum(Ra[t .== nR]) : sR = zero(TT)
σ2 = σ2_s * (sL + sR); μ = -sL + sR
if (sL + sR) > zero(TT)
transition_M!(F,σ2+σ2_a*dt,λ, μ, dx, xc, n, dt)
P = F * P
else
P = M * P
end
return P, F
end
"""
bins(B,n)
Computes the bin center locations and bin spacing, given the boundary and number of bins.
### Examples
```jldoctest
julia> xc,dx = pulse_input_DDM.bins(25.5,53)
([-26.0, -25.0, -24.0, -23.0, -22.0, -21.0, -20.0, -19.0, -18.0, -17.0 … 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0], 1.0)
```
"""
function bins(B::TT, n::Int) where {TT}
dx = 2. *B/(n-2)
xc = vcat(collect(range(-(B+dx/2.),stop=-dx,length=Int((n-1)/2.))),0.,
collect(range(dx,stop=(B+dx/2.),length=Int((n-1)/2))))
return xc, dx
end
"""
bins(B, dx)
Computes the bin center locations and number of bins, given the boundary and desired (average) bin spacing.
### Examples
```jldoctest
julia> xc,n = pulse_input_DDM.bins(10.,0.25)
([-10.25, -9.75, -9.5, -9.25, -9.0, -8.75, -8.5, -8.25, -8.0, -7.75 … 7.75, 8.0, 8.25, 8.5, 8.75, 9.0, 9.25, 9.5, 9.75, 10.25], 81)
```
"""
function bins(B::TT, dx::Float64) where {TT}
xc = collect(0.:dx:floor(value(B)/dx)*dx)
if xc[end] == B
xc = vcat(xc[1:end-1], B + dx)
else
xc = vcat(xc, 2*B - xc[end])
end
xc = vcat(-xc[end:-1:2], xc)
n = length(xc)
return xc, n
end
"""
expm1_div_x(x)
"""
function expm1_div_x(x)
#y = exp(x)
#y == 1. ? one(y) : (y-1.)/log(y)
#y == 1. ? one(y) : expm1(x)/x
t = Taylor1(100)
y = (exp(t) - 1)/t
y(x)
end
"""
transition_M(σ2, λ, μ, dx, xc, n, dt)
Returns a \$n \\times n\$ Markov transition matrix. The transition matrix is discrete approximation to the Fokker-Planck equation with drift λ, diffusion σ2 and driving current (i.e. click input) μ. dx and dt define the spatial and temporal binning, respectively. xc are the bin center locations.
See also: [`transition_M!`](@ref)
### Examples
```jldoctest
julia> dt, n, B, σ2, λ, μ = 0.1, 53, 10., 10., -0.5, 1.;
julia> xc,dx = pulse_input_DDM.bins(B, n);
julia> M = pulse_input_DDM.transition_M(σ2, λ, μ, dx, xc, n, dt);
julia> size(M)
(53, 53)
```
"""
function transition_M(σ2::TT, λ::TT, μ::TT, dx::UU,
xc::Vector{TT}, n::Int, dt::Float64) where {TT,UU <: Any}
M = zeros(TT,n,n)
transition_M!(M,σ2,λ,μ,dx,xc,n,dt)
return M
end
"""
transition_M!(F::Array{TT,2}, σ2::TT, λ::TT, μ::TT, dx::Float64,
xc::Vector{TT}, n::Int, dt::Float64) where {TT <: Any}
"""
function transition_M!(F::Array{TT,2}, σ2::TT, λ::TT, μ::TT, dx::UU,
xc::Vector{TT}, n::Int, dt::Float64) where {TT,UU <: Any}
F[1,1] = one(TT); F[n,n] = one(TT); F[:,2:n-1] = zeros(TT,n,n-2)
ndeltas = max(70,ceil(Int, 10. *sqrt(σ2)/dx))
deltaidx = collect(-ndeltas:ndeltas)
deltas = deltaidx * (5. *sqrt(σ2))/ndeltas
ps = exp.(-0.5 * (5*deltaidx./ndeltas).^2)
ps = ps/sum(ps)
@inbounds for j = 2:n-1
#abs(λ) < 1e-150 ? mu = xc[j] + μ : mu = exp(λ*dt)*(xc[j] + μ/(λ*dt)) - μ/(λ*dt)
#abs(λ) < 1e-150 ? mu = xc[j] + h * dt : mu = exp(λ*dt)*(xc[j] + h/λ) - h/λ
#mu = exp(λ*dt)*xc[j] + μ * (exp(λ*dt) - 1.)/(λ*dt)
#mu = exp(λ*dt)*xc[j] + μ * (expm1(λ*dt)/(λ*dt)
mu = exp(λ*dt)*xc[j] + μ * expm1_div_x(λ*dt)
#now we're going to look over all the slices of the gaussian
for k = 1:2*ndeltas+1
s = mu + deltas[k]
if s <= xc[1]
F[1,j] += ps[k]
elseif s >= xc[n]
F[n,j] += ps[k]
else
if (xc[1] < s) && (xc[2] > s)
lp,hp = 1,2
elseif (xc[n-1] < s) && (xc[n] > s)
lp,hp = n-1,n
else
hp,lp = ceil(Int, (s-xc[2])/dx) + 2, floor(Int, (s-xc[2])/dx) + 2
end
if hp == lp
F[lp,j] += ps[k]
else
dd = xc[hp] - xc[lp]
F[hp,j] += ps[k]*(s-xc[lp])/dd
F[lp,j] += ps[k]*(xc[hp]-s)/dd
end
end
end
end
end
"""
adapted_clicks(ϕ, τ_ϕ, L, R; cross)
Compute the adapted state of left and right clicks.
Arguments:
- `ϕ`: determines the strength of adaptation after each click.
- `τ_ϕ`: determines the timescale of adaptation.
- `L`: `array` of left click times.
- `R`: `array` of right click times.
Optional arguments:
- cross: `Bool` to perform or not perform cross-click adaptation (default is false).
Returns:
- ` La`: `array` of adapted state of each left click (same length as `L`).
- `Ra`: `array` of adapted state of each right click (same length as `R`).
"""
function adapt_clicks(ϕ::TT, τ_ϕ::TT, L::Vector{Float64}, R::Vector{Float64}; cross::Bool=false) where {TT}
if cross
all = vcat(hcat(L[2:end], -1 * ones(length(L)-1)), hcat(R, ones(length(R))))
all = all[sortperm(all[:, 1]), :]
adapted = ones(TT, size(all,1))
adapted[1] = eps()
if (typeof(ϕ) == Float64) && (isapprox(ϕ, 1.0))
else
(length(all) > 1 && ϕ != 1.) ? adapt_clicks!(ϕ, τ_ϕ, adapted, all[:, 1]) : nothing
end
all = vcat([0., -1.]', all)
adapted = vcat(eps(), adapted)
La, Ra = adapted[all[:,2] .== -1.], adapted[all[:,2] .== 1.]
else
La, Ra = ones(TT,length(L)), ones(TT,length(R))
#this if statement is for cases when ϕ is 1. and not being learned
if (typeof(ϕ) == Float64) && (isapprox(ϕ, 1.0))
else
La[1], Ra[1] = eps(), eps()
(length(L) > 1 && ϕ != 1.) ? adapt_clicks!(ϕ, τ_ϕ, La, L) : nothing
(length(R) > 1 && ϕ != 1.) ? adapt_clicks!(ϕ, τ_ϕ, Ra, R) : nothing
end
end
return La, Ra
end
"""
adapt_clicks!(Ca, C, ϕ, τ_ϕ)
"""
function adapt_clicks!(ϕ::TT, τ_ϕ::TT, Ca::Vector{TT}, C::Vector{Float64}) where {TT}
ici = diff(C)
for i = 1:length(ici)
arg = (1/τ_ϕ) * (-ici[i] + xlogy(τ_ϕ, abs(1. - Ca[i]* ϕ)))
if Ca[i]* ϕ <= 1
Ca[i+1] = 1. - exp(arg)
else
Ca[i+1] = 1. + exp(arg)
end
end
end | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 2483 | """
optimize(x, ll, lb, ub)
Wrapper for executing an constrained optimization.
Arguments:
- `ll`: objective function.
- `x`: an `array` of initial point
- `lb`: lower bounds. `array` the same length as `x`.
- `ub: upper bounds. `array` the same length as `x`.
"""
function optimize(x::Vector{TT}, ll, lb, ub;
g_tol::Float64=1e-12, x_tol::Float64=1e-16, f_tol::Float64=1e-16,
iterations::Int=Int(5e3), outer_iterations::Int=Int(1e1),
show_trace::Bool=true, extended_trace::Bool=false,
scaled::Bool=false, time_limit::Float64=170000.,
show_every::Int=10) where TT <: Real
obj = OnceDifferentiable(ll, x; autodiff=:forward)
m = BFGS(alphaguess = InitialStatic(alpha=1.0,scaled=scaled), linesearch = BackTracking())
#start_time = time()
#time_to_setup = zeros(1)
#callback = x-> advanced_time_control(x, start_time, time_to_setup)
options = Optim.Options(g_tol=g_tol, x_tol=x_tol, f_tol=f_tol,
iterations= iterations, allow_f_increases=true,
store_trace = true, show_trace = show_trace, extended_trace=extended_trace,
outer_g_tol=g_tol, outer_x_tol=x_tol, outer_f_tol=f_tol,
outer_iterations= outer_iterations, allow_outer_f_increases=true,
time_limit = time_limit, show_every=show_every)
output = Optim.optimize(obj, lb, ub, x, Fminbox(m), options)
return output
end
"""
"""
function advanced_time_control(x, start_time, time_to_setup)
println(" * Iteration: ", x.iteration)
so_far = time()-start_time
println(" * Time so far: ", so_far)
if x.iteration == 0
time_to_setup[1] = time()-start_time
else
expected_next_time = so_far + (time()-start_time-time_to_setup[1])/(x.iteration)
println(" * Next iteration ≈ ", expected_next_time)
println()
return expected_next_time < 60 ? false : true
end
println()
false
end
"""
stack(x,c)
Combine two vector into one. The first vector is variables for optimization, the second are constants.
"""
function stack(x::Vector{TT}, c::Vector{Float64}, fit::Union{BitArray{1},Vector{Bool}}) where TT
v = Vector{TT}(undef,length(fit))
v[fit] = x
v[.!fit] = c
return v
end
"""
unstack(v)
Break one vector into two. The first vector is variables for optimization, the second are constants.
"""
function unstack(v::Vector{TT}, fit::Union{BitArray{1},Vector{Bool}}) where TT
x,c = v[fit], v[.!fit]
end
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 3864 | """
synthetic_clicks(ntrials, rng)
Computes randomly timed left and right clicks for ntrials.
rng sets the random seed so that clicks can be consistently produced.
Output is bundled into an array of 'click' types.
"""
function synthetic_clicks(ntrials::Int, rng::Int;
tmin::Float64=0.2, tmax::Float64=1.0, clicktot::Int=40)
Random.seed!(rng)
T = tmin .+ (tmax-tmin).*rand(ntrials)
T = ceil.(T, digits=2)
ratetot = clicktot./T
Rbar = ratetot.*rand(ntrials)
Lbar = ratetot .- Rbar
R = cumsum.(rand.(Exponential.(1 ./Rbar), clicktot))
L = cumsum.(rand.(Exponential.(1 ./Lbar), clicktot))
R = map((T,R)-> vcat(0,R[R .<= T]), T,R)
L = map((T,L)-> vcat(0,L[L .<= T]), T,L)
clicks.(L, R, T)
end
"""
rand(θz, inputs)
Generate a sample latent trajecgtory, given parameters of the latent model `θz` and `inputs` for one trial.
Returns:
- `A`: an `array` of the latent path.
"""
function rand(θz::θz{T}, inputs) where T <: Real
@unpack σ2_i, B, λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz
@unpack clicks, binned_clicks, centered, dt, delay, pad = inputs
@unpack nT, nL, nR = binned_clicks
@unpack L, R = clicks
La, Ra = adapt_clicks(ϕ, τ_ϕ, L, R)
time_bin = (-(pad-1):nT+pad) .- delay
A = Vector{T}(undef, length(time_bin))
if σ2_i > 0.
a = sqrt(σ2_i)*randn()
else
a = zero(typeof(σ2_i))
end
for t = 1:length(time_bin)
if time_bin[t] < 1
if σ2_i > 0.
a = sqrt(σ2_i)*randn()
else
a = zero(typeof(σ2_i))
end
else
a = sample_one_step!(a, time_bin[t], σ2_a, σ2_s, λ, nL, nR, La, Ra, dt)
end
abs(a) > B ? (a = B * sign(a); A[t:end] .= a; break) : A[t] = a
end
return A
end
"""
sample_one_step!(a, t, σ2_a, σ2_s, λ, nL, nR, La, Ra, dt)
Move latent state one dt forward, given parameters defining the DDM.
"""
function sample_one_step!(a::TT, t::Int, σ2_a::TT, σ2_s::TT, λ::TT,
nL::Vector{Int}, nR::Vector{Int},
La, Ra, dt::Float64) where {TT <: Any}
any(t .== nL) ? sL = sum(La[t .== nL]) : sL = zero(TT)
any(t .== nR) ? sR = sum(Ra[t .== nR]) : sR = zero(TT)
σ2, μ = σ2_s * (sL + sR), -sL + sR
if (σ2_a * dt + σ2) > 0.
η = sqrt(σ2_a * dt + σ2) * randn()
else
η = zero(typeof(σ2_a))
end
#if abs(λ) < 1e-150
# a += μ + η
#else
# h = μ/(dt*λ)
# a = exp(λ*dt)*(a + h) - h + η
#end
a = exp(λ*dt)*a + μ * expm1_div_x(λ*dt) + η
return a
end
"""
"""
function rand(θz, inputs, P::Vector{TT}, M::Array{TT,2}, dx::UU,
xc::Vector{TT}; n::Int=53, cross::Bool=false) where {TT,UU <: Real}
@unpack λ,σ2_a,σ2_s,ϕ,τ_ϕ = θz
@unpack binned_clicks, clicks, dt = inputs
@unpack nT, nL, nR = binned_clicks
@unpack L, R = clicks
La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R; cross=cross)
F = zeros(TT,n,n)
a = Vector{TT}(undef,nT)
@inbounds for t = 1:nT
P,F = latent_one_step!(P,F,λ,σ2_a,σ2_s,t,nL,nR,La,Ra,M,dx,xc,n,dt)
P /= sum(P)
a[t] = xc[findfirst(cumsum(P) .> rand())]
P = TT.(xc .== a[t])
end
return a
end
"""
"""
function randP(θz, inputs, P::Vector{TT}, M::Array{TT,2}, dx::UU,
xc::Vector{TT}; n::Int=53, cross::Bool=false) where {TT,UU <: Real}
@unpack λ,σ2_a,σ2_s,ϕ,τ_ϕ = θz
@unpack binned_clicks, clicks, dt = inputs
@unpack nT, nL, nR = binned_clicks
@unpack L, R = clicks
La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R; cross=cross)
F = zeros(TT,n,n)
@inbounds for t = 1:nT
P,F = latent_one_step!(P,F,λ,σ2_a,σ2_s,t,nL,nR,La,Ra,M,dx,xc,n,dt)
P /= sum(P)
end
return P
end | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 580 | abstract type DDM end
abstract type DDMdata end
abstract type DDMθ end
abstract type DDMf end
"""
"""
@with_kw mutable struct θz{T<:Real} @deftype T
σ2_i = 0.5
B = 15.
λ = -0.5; @assert λ != 0.
σ2_a = 50.
σ2_s = 1.5
ϕ = 0.8; @assert ϕ != 1.
τ_ϕ = 0.05
end
"""
"""
@with_kw struct clicks
L::Vector{Float64}
R::Vector{Float64}
T::Float64
end
"""
"""
@with_kw struct binned_clicks
#clicks::T
nT::Int
nL::Vector{Int}
nR::Vector{Int}
end
@with_kw struct bins
#clicks::T
xc::Vector{Real}
dx::Real
n::Int
end | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 6835 | """
all_Softplus(data)
Returns: `array` of `array` of `string`, of all Softplus
"""
function all_Softplus(ncells)
#ncells = getfield.(first.(data), :ncells)
f = repeat(["Softplus"], sum(ncells))
borg = vcat(0,cumsum(ncells))
f = [f[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]]
end
"""
"""
function diffLR(data)
@unpack binned_clicks, clicks, dt, pad, delay = data.input_data
L,R = binLR(binned_clicks, clicks, dt)
vcat(zeros(Int, pad + delay), cumsum(-L + R), zeros(Int, pad - delay))
end
"""
"""
function binLR(binned_clicks, clicks, dt)
@unpack L, R = clicks
@unpack nT = binned_clicks
#compute the cumulative diff of clicks
t = 0:dt:nT*dt;
L = StatsBase.fit(Histogram,L,t,closed=:left)
R = StatsBase.fit(Histogram,R,t,closed=:left)
L = L.weights
R = R.weights
return L,R
end
function load_and_dprime(path::String, sessids, ratnames;
dt::Float64=1e-3, delay::Float64=0.)
data = aggregate_spiking_data(path,sessids,ratnames)
data = map(x->bin_clicks_spikes_and_λ0!(x; dt=dt,delay=delay), data)
map(d-> map(n-> dprime(map(r-> data[d]["μ_rn"][r][n], 1:data[d]["ntrials"]), data[d]["pokedR"]),
1:data[d]["N"]),
1:length(data))
end
function predict_choice_Y(pz, py, bias, data; dt::Float64=1e-2, n::Int=53, f_str::String="softplus",
λ0::Vector{Vector{Vector{Float64}}}=Vector{Vector{Vector{Float64}}}())
PS = pulse_input_DDM.PY_all_trials(pz, py, data; λ0=λ0, f_str=f_str)
xc,dx,xe = pulse_input_DDM.bins(pz[2],n);
nbinsL, Sfrac = pulse_input_DDM.bias_bin(bias,xe,dx,n)
Pd = vcat(1. * ones(nbinsL), 1. * Sfrac + 0. * (1. - Sfrac), 0. * ones(n - (nbinsL + 1)))
predicted_choice = map(x-> !Bool(round(sum(x[:, end] .* Pd))) , PS)
per_corr = sum(predicted_choice .== data["pokedR"]) / length(data["pokedR"])
return per_corr, predicted_choice
end
function predict_choice(pz, bias, data; n::Int=53)
PS = P_all_trials(pz, data)
xc,dx,xe = pulse_input_DDM.bins(pz[2],n);
nbinsL, Sfrac = pulse_input_DDM.bias_bin(bias,xe,dx,n)
Pd = vcat(1. * ones(nbinsL), 1. * Sfrac + 0. * (1. - Sfrac), 0. * ones(n - (nbinsL + 1)))
predicted_choice = map(x-> !Bool(round(sum(x[:, end] .* Pd))) , PS)
per_corr = sum(predicted_choice .== data["pokedR"]) / length(data["pokedR"])
return per_corr, predicted_choice
end
function dprime(FR,choice)
abs(mean(FR[choice .== false]) - mean(FR[choice .== true])) /
sqrt(0.5 * (var(FR[choice .== false])^2 + var(FR[choice .== true])^2))
end
function FilterSpikes(x,SD;pad::String="zeros")
#plot(conv(pdf.(Normal(mu1,sigma1),x1),pdf.(Normal(mu2,sigma2),x2)))
#plot(filt(digitalfilter(PolynomialRatio(gaussian(10,1e-2),ones(10)),pdf.(Normal(mu1,sigma1),x1)))
#plot(pdf.(Normal(mu1,sigma1),x1))
#plot(filt(digitalfilter(Lowpass(0.8),FIRWindow(gaussian(100,0.5))),pdf.(Normal(mu1,sigma1),x1)))
#plot(filtfilt(gaussian(100,0.02), pdf.(Normal(mu1,sigma1),x1) + pdf.(Normal(mu2,sigma2),x1)))
#x = round.(-0.45+rand(1000))
#plot((1/1e-3)*(1/100)*filt(rect(100),x));
#plot(gaussian(100,0.5))
#gaussian(10000,0.5)
#plot((1/1e-3)*filt((1/sum(pdf.(Normal(mu1,sigma1),x1)))*pdf.(Normal(mu1,sigma1),x1),x))
#plot(pdf.(Normal(mu1,sigma1),x1))
gausswidth = 8*SD; # 2.5 is the default for the function gausswin
F = pdf.(Normal(gausswidth/2, SD),1:gausswidth);
F /= sum(F);
#try
shift = Int(floor(length(F)/2)); # this is the amount of time that must be added to the beginning and end;
if pad == "zeros"
prefilt = vcat(zeros(shift,size(x,2)),x,zeros(shift,size(x,2)));
elseif pad == "mean"
#pads the beginning and end with copies of first and last value (not zeros)
prefilt = vcat(broadcast(+,zeros(shift,size(x,2)),mean(x[1:SD,:],1)),x,
broadcast(+,zeros(shift,size(x,2)),mean(x[end-SD:end,:],1)));
end
postfilt = filt(F,prefilt); # filters the data with the impulse response in Filter
postfilt = postfilt[2*shift:size(postfilt,1)-1,:];
#catch
# postfilt = x
#end
end
function psth(data::Dict,filt_sd::Float64,dt::Float64)
#b = (1/3)* ones(1,3);
#rate(data(j).N,1:size(data(j).spike_counts,1),j) = filter(b,1,data(j).spike_counts/dt)';
try
lambda = (1/dt) * FilterSpikes(filt_sd,data["spike_counts"]);
catch
lambda = (1/dt) * data["spike_counts"]/dt;
end
end
function decimate(x, r)
# Decimation reduces the original sampling rate of a sequence
# to a lower rate. It is the opposite of interpolation.
#
# The decimate function lowpass filters the input to guard
# against aliasing and downsamples the result.
#
# y = decimate(x,r)
#
# Reduces the sampling rate of x, the input signal, by a factor
# of r. The decimated vector, y, is shortened by a factor of r
# so that length(y) = ceil(length(x)/r). By default, decimate
# uses a lowpass Chebyshev Type I IIR filter of order 8.
#
# Sometimes, the specified filter order produces passband
# distortion due to roundoff errors accumulated from the
# convolutions needed to create the transfer function. The filter
# order is automatically reduced when distortion causes the
# magnitude response at the cutoff frequency to differ from the
# ripple by more than 1E–6.
nfilt = 8
cutoff = .8 / r
rip = 0.05 # dB
function filtmag_db(b, a, f)
# Find filter's magnitude response in decibels at given frequency.
nb = length(b)
na = length(a)
top = dot(exp.(-1im*collect(0:nb-1)*pi*f), b)
bot = dot(exp.(-1im*collect(0:na-1)*pi*f), a)
20*log10(abs(top/bot))
end
b, a = cheby1(nfilt, rip, cutoff)
while all(b==0) || (abs(filtmag_db(b, a, cutoff)+rip)>1e-6)
nfilt = nfilt - 1
nfilt == 0 ? break : nothing
b, a = cheby1(nfilt, rip, cutoff)
end
y = filtfilt(PolynomialRatio(b, a), x)
nd = length(x)
nout = ceil(nd/r)
nbeg = Int(r - (r * nout - nd))
y[nbeg:r:nd]
end
function cheby1(n, r, wp)
# Chebyshev Type I digital filter design.
#
# b, a = cheby1(n, r, wp)
#
# Designs an nth order lowpass digital Chebyshev filter with
# R decibels of peak-to-peak ripple in the passband.
#
# The function returns the filter coefficients in length
# n+1 vectors b (numerator) and a (denominator).
#
# The passband-edge frequency wp must be 0.0 < wp < 1.0, with
# 1.0 corresponding to half the sample rate.
#
# Use r=0.5 as a starting point, if you are unsure about choosing r.
h = digitalfilter(Lowpass(wp), Chebyshev1(n, r))
tf = convert(PolynomialRatio, h)
coefb(tf), coefa(tf)
end
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 2801 | """
save_choice_data(file)
Given a path, save data in to a `.MAT` file of an acceptable format, containing data,
to use with `PulseInputDDM` to fit its choice model.
"""
function save_choice_data(file::String, data)
rawdata = Dict("rawdata" => [(leftbups = x.click_data.clicks.L, rightbups = x.click_data.clicks.R,
T = x.click_data.clicks.T, pokedR = x.choice) for x in data])
matwrite(file, rawdata)
end
"""
load_choice_data(file)
Given a path to a `.MAT` file containing data (properly formatted), loads data into
an acceptable format to use with `pulse_input_DDM` to fit its choice model.
"""
function load_choice_data(file::String; centered::Bool=false, dt::Float64=1e-2)
data = read(matopen(file), "rawdata")
if typeof(data) .== Dict{String, Any}
T = vec(data["T"])
L = vec(map(x-> vec(collect(x)), data[collect(keys(data))[occursin.("left", collect(keys(data)))][1]]))
R = vec(map(x-> vec(collect(x)), data[collect(keys(data))[occursin.("right", collect(keys(data)))][1]]))
choices = vec(convert(BitArray, data["pokedR"]))
elseif typeof(data) == Vector{Any}
T = vec([data[i]["T"] for i in 1:length(data)])
L = vec(map(x-> vec(collect((x[collect(keys(x))[occursin.("left", collect(keys(x)))][1]]))), data))
R = vec(map(x-> vec(collect((x[collect(keys(x))[occursin.("right", collect(keys(x)))][1]]))), data))
choices = vec(convert(BitArray, [data[i]["pokedR"] for i in 1:length(data)]))
end
theclicks = clicks.(L, R, T)
binned_clicks = bin_clicks.(theclicks, centered=centered, dt=dt)
inputs = map((clicks, binned_clicks)-> choiceinputs(clicks=clicks, binned_clicks=binned_clicks,
dt=dt, centered=centered), theclicks, binned_clicks)
choicedata.(inputs, choices)
end
"""
save_choice_model(file, model, options)
Given a file, model produced by optimize and options, save the results of the optimization to a .MAT file
"""
function save_choice_model(file, model)
@unpack lb, ub, fit, θ = model
dict = Dict("ML_params"=> collect(Flatten.flatten(θ)),
"name" => ["σ2_i", "B", "λ", "σ2_a", "σ2_s", "ϕ", "τ_ϕ", "bias", "lapse"],
"lb"=> lb, "ub"=> ub, "fit"=> fit)
matwrite(file, dict)
end
"""
reload_choice_model(file)
Given a path, reload the results of a previous optimization saved as a .MAT file and
place them in the "state" key of the dictionaires that optimize_model() expects.
"""
function reload_choice_model(file)
x = read(matopen(file), "ML_params")
lb = read(matopen(file), "lb")
ub = read(matopen(file), "ub")
fit = read(matopen(file), "fit")
choiceDDM(θ=Flatten.reconstruct(θchoice(), x), fit=fit, lb=lb, ub=ub)
end
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 10972 | """
fit(model, options)
fit model parameters for a `choiceDDM`.
Returns:
- `model`: an instance of a `choiceDDM`.
- `output`: results from [`Optim.optimize`](@ref).
Arguments:
- `model`: an instance of a `choiceDDM`.
"""
function fit(model::choiceDDM, data::Union{choicedata{choiceinputs{clicks, binned_clicks}},
Vector{choicedata{choiceinputs{clicks, binned_clicks}}}};
x_tol::Float64=1e-10, f_tol::Float64=1e-9, g_tol::Float64=1e-3,
iterations::Int=Int(2e3), show_trace::Bool=true, outer_iterations::Int=Int(1e1),
extended_trace::Bool=false, scaled::Bool=false, time_limit::Float64=170000., show_every::Int=10)
@unpack fit, lb, ub, θ, n, cross = model
x0 = collect(Flatten.flatten(θ))
lb, = unstack(lb, fit)
ub, = unstack(ub, fit)
x0,c = unstack(x0, fit)
ℓℓ(x) = -loglikelihood(stack(x,c,fit), model, data)
output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol,
f_tol=f_tol, iterations=iterations, show_trace=show_trace,
outer_iterations=outer_iterations, extended_trace=extended_trace,
scaled=scaled, time_limit=time_limit, show_every=show_every)
x = Optim.minimizer(output)
x = stack(x,c,fit)
model.θ = Flatten.reconstruct(θ, x)
return model, output
end
"""
loglikelihood(x, model)
Given a vector of parameters and a type containing the data related to the choice DDM model, compute the LL.
See also: [`loglikelihood`](@ref)
"""
function loglikelihood(x::Vector{T1}, model::choiceDDM, data::Union{choicedata{choiceinputs{clicks, binned_clicks}}, Vector{choicedata{choiceinputs{clicks, binned_clicks}}}}) where {T1 <: Real}
@unpack n, cross = model
θ = Flatten.reconstruct(θchoice(), x)
model = choiceDDM(θ=θ, n=n, cross=cross)
loglikelihood(model, data)
end
"""
gradient(model)
Compute the gradient of the negative log-likelihood at the current value of the parameters of a `choiceDDM`.
"""
function gradient(model::choiceDDM, data::Union{choicedata{choiceinputs{clicks, binned_clicks}}, Vector{choicedata{choiceinputs{clicks, binned_clicks}}}})
@unpack θ = model
x = collect(Flatten.flatten(θ))
ℓℓ(x) = -loglikelihood(x, model, data)
ForwardDiff.gradient(ℓℓ, x)
end
"""
Hessian(model)
Compute the hessian of the negative log-likelihood at the current value of the parameters of a `choiceDDM`.
"""
function Hessian(model::choiceDDM, data::Union{choicedata{choiceinputs{clicks, binned_clicks}}, Vector{choicedata{choiceinputs{clicks, binned_clicks}}}})
@unpack θ = model
x = collect(Flatten.flatten(θ))
ℓℓ(x) = -loglikelihood(x, model, data)
ForwardDiff.hessian(ℓℓ, x)
end
"""
loglikelihood(model)
Given parameters θ and data (inputs and choices) computes the LL for all trials
"""
function loglikelihood(model::choiceDDM, data::Union{choicedata{choiceinputs{clicks, binned_clicks}}, Vector{choicedata{choiceinputs{clicks, binned_clicks}}}})
@unpack θ, n, cross = model
@unpack θz = θ
@unpack σ2_i, B, λ, σ2_a = θz
@unpack dt = data[1].click_data
P,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt)
sum(pmap(data -> loglikelihood!(θ, P, M, dx, xc, data, n, cross), data))
end
"""
loglikelihood!(θ, P, M, dx, xc, data, n, cross)
Given parameters θ and data (inputs and choices) computes the LL for one trial
"""
loglikelihood!(θ::θchoice,
P::Vector{TT}, M::Array{TT,2}, dx::UU,
xc::Vector{TT}, data::choicedata,
n::Int, cross::Bool) where {TT,UU <: Real} = log(likelihood!(θ, P, M, dx, xc, data, n, cross))
"""
likelihood(model)
Given parameters θ and data (inputs and choices) computes the likehood of the choice for all trials
"""
function likelihood(model::choiceDDM, data::Vector{choicedata{choiceinputs{clicks, binned_clicks}}})
@unpack θ, n, cross = model
@unpack θz = θ
@unpack σ2_i, B, λ, σ2_a = θz
@unpack dt = data[1].click_data
P,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt)
pmap(data -> likelihood!(θ, P, M, dx, xc, data, n, cross), data)
end
"""
likelihood!(θ, P, M, dx, xc, data, n, cross)
Given parameters θ and data (inputs and choices) computes the LL for one trial
"""
function likelihood!(θ::θchoice,
P::Vector{TT}, M::Array{TT,2}, dx::UU,
xc::Vector{TT}, data::choicedata,
n::Int, cross::Bool) where {TT,UU <: Real}
@unpack θz, bias, lapse = θ
@unpack click_data, choice = data
P = P_single_trial!(θz,P,M,dx,xc,click_data,n,cross)
sum(choice_likelihood!(bias,xc,P,choice,n,dx)) * (1 - lapse) + lapse/2
end
"""
P_single_trial!(θz, P, M, dx, xc, click_data, n)
Given parameters θz progagates P for one trial
"""
function P_single_trial!(θz,
P::Vector{TT}, M::Array{TT,2}, dx::UU,
xc::Vector{TT}, click_data,
n::Int, cross::Bool;
keepP::Bool=false) where {TT,UU <: Real}
@unpack λ,σ2_a,σ2_s,ϕ,τ_ϕ = θz
@unpack binned_clicks, clicks, dt = click_data
@unpack nT, nL, nR = binned_clicks
@unpack L, R = clicks
#adapt magnitude of the click inputs
La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R; cross=cross)
#empty transition matrix for time bins with clicks
F = zeros(TT,n,n)
if keepP
PS = Vector{Vector{Float64}}(undef, nT)
end
@inbounds for t = 1:nT
#maybe only pass one L,R,nT?
P,F = latent_one_step!(P,F,λ,σ2_a,σ2_s,t,nL,nR,La,Ra,M,dx,xc,n,dt)
if keepP
PS[t] = P
end
end
if keepP
return PS
else
return P
end
end
"""
choice_likelihood!(bias, xc, P, pokedR, n, dx)
Preserves mass in the distribution P on the side consistent with the choice pokedR relative to the point bias. Deals gracefully in situations where the bias equals a bin center. However, if the bias grows larger than the bound, the LL becomes very large and the gradient is zero. However, it's general convexity of the -LL surface w.r.t this parameter should generally preclude it from approaches these regions.
### Examples
```jldoctest
julia> n, dt = 13, 1e-2;
julia> bias = 0.51;
julia> σ2_i, B, λ, σ2_a = 1., 2., 0., 10.; # the bound height of 2 is intentionally low, so P is not too long
julia> P, M, xc, dx = pulse_input_DDM.initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt);
julia> pokedR = true;
julia> round.(pulse_input_DDM.choice_likelihood!(bias, xc, P, pokedR, n, dx), digits=2)
13-element Array{Float64,1}:
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.04
0.09
0.08
0.05
0.03
0.02
```
"""
function choice_likelihood!(bias::TT, xc::Vector{TT}, P::Vector{VV},
pokedR::Bool, n::Int, dx::UU) where {TT,UU,VV <: Any}
lp = searchsortedlast(xc,bias)
hp = lp + 1
if ((hp==n+1) & (pokedR==true))
P[1:lp-1] .= zero(TT)
P[lp] = eps()
elseif((lp==0) & (pokedR==false))
P[hp+1:end] .= zero(TT)
P[hp] = eps()
elseif ((hp==n+1) & (pokedR==false)) || ((lp==0) & (pokedR==true))
P .= one(TT)
else
dh, dl = xc[hp] - bias, bias - xc[lp]
dd = dh + dl
if pokedR
P[1:lp-1] .= zero(TT)
P[hp] = P[hp] * (1/2 + dh/dd/2)
P[lp] = P[lp] * (dh/dd/2)
else
P[hp+1:end] .= zero(TT)
P[hp] = P[hp] * (dl/dd/2)
P[lp] = P[lp] * (1/2 + dl/dd/2)
end
end
return P
end
"""
posterior(model)
"""
function posterior(model::choiceDDM, data::Union{choicedata{choiceinputs{clicks, binned_clicks}}, Vector{choicedata{choiceinputs{clicks, binned_clicks}}}})
@unpack θ, n, cross = model
@unpack θz = θ
@unpack σ2_i, B, λ, σ2_a = θz
@unpack dt = data[1].click_data
P,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt)
pmap(data -> posterior(θ, data, P, M, dx, xc, data, n, cross), data)
end
"""
posterior(θz, P, M, dx, xc, click_data, n)
"""
function posterior(θ::θchoice, data::choicedata,
P::Vector{TT}, M::Array{TT,2}, dx::UU,
xc::Vector{TT}, click_data,
n::Int, cross::Bool) where {TT,UU <: Real}
@unpack θz, bias, lapse = θ
@unpack click_data, choice = data
@unpack λ,σ2_a,σ2_s,ϕ,τ_ϕ = θz
@unpack binned_clicks, clicks, dt = click_data
@unpack nT, nL, nR = binned_clicks
@unpack L, R = clicks
#adapt magnitude of the click inputs
La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R; cross=cross)
F = zeros(TT,n,n)
α = Array{Float64,2}(undef, n, nT)
β = Array{Float64,2}(undef, n, nT)
c = Vector{TT}(undef, nT)
@inbounds for t = 1:nT
P,F = latent_one_step!(P,F,λ,σ2_a,σ2_s,t,nL,nR,La,Ra,M,dx,xc,n,dt)
(t == nT) && (P = choice_likelihood!(bias,xc,P,choice,n,dx))
c[t] = sum(P)
P /= c[t]
α[:,t] = P
end
P = ones(Float64,n) #initialze backward pass with all 1's
β[:,end] = P
@inbounds for t = nT-1:-1:1
(t+1 == nT) && (P = choice_likelihood!(bias,xc,P,choice,n,dx))
P,F = backward_one_step!(P, F, λ, σ2_a, σ2_s, t+1, nL, nR, La, Ra, M, dx, xc, n, dt)
P /= c[t+1]
β[:,t] = P
end
return α, β, xc
end
"""
forward(model)
"""
function forward(model::choiceDDM, data::Vector{choicedata{choiceinputs{clicks, binned_clicks}}})
@unpack θ, n, cross = model
@unpack θz = θ
@unpack σ2_i, B, λ, σ2_a = θz
@unpack dt = data[1].click_data
P,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt)
pmap(data -> forward(θ, P, M, dx, xc, data, n, cross), data)
end
"""
forward(θz, P, M, dx, xc, click_data, n)
"""
function forward(θ::θchoice,
P::Vector{TT}, M::Array{TT,2}, dx::UU,
xc::Vector{TT}, data,
n::Int, cross::Bool) where {TT,UU <: Real}
@unpack θz, bias, lapse = θ
@unpack click_data, choice = data
@unpack λ,σ2_a,σ2_s,ϕ,τ_ϕ = θz
@unpack binned_clicks, clicks, dt = click_data
@unpack nT, nL, nR = binned_clicks
@unpack L, R = clicks
#adapt magnitude of the click inputs
La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R; cross=cross)
F = zeros(TT,n,n)
α = Array{Float64,2}(undef, n, nT)
c = Vector{TT}(undef, nT)
@inbounds for t = 1:nT
P,F = latent_one_step!(P,F,λ,σ2_a,σ2_s,t,nL,nR,La,Ra,M,dx,xc,n,dt)
c[t] = sum(P)
P /= c[t]
α[:,t] = P
end
return α, xc
end
#=
Backward pass, for one day when I might need to compute the posterior again.
@inbounds for t = 1:T
P,F = latent_one_step!(P,F,pz,t,hereL,hereR,La,Ra,M,dx,xc,n,dt)
(t == T) && (P .*= Pd)
c[t] = sum(P)
P /= c[t]
comp_posterior ? post[:,t] = P : nothing
end
P = ones(Float64,n); #initialze backward pass with all 1's
post[:,T] .*= P;
@inbounds for t = T-1:-1:1
(t + 1 == T) && (P .*= Pd)
P,F = latent_one_step!(P,F,pz,t+1,hereL,hereR,La,Ra,M,dx,xc,n,dt;backwards=true)
P /= c[t+1]
post[:,t] .*= P
end
=# | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 1036 | """
bin_clicks(clicks::Vector{T})
Wrapper to broadcast bin_clicks across a vector of clicks.
"""
bin_clicks(clicks::Vector{T}; dt::Float64=1e-2, centered::Bool=false) where T <: Any =
bin_clicks.(clicks; dt=dt, centered=centered)
"""
bin_clicks(clicks)
Bins clicks, based on dt (defaults to 1e-2). 'centered' determines if the bin edges
occur at 0 and dt (and then ever dt after that), or at -dt/2 and dt/2 (and then
every dt after that). If the former, the bins align with the binning of spikes
in the neural model. For choice model, the latter is fine.
"""
function bin_clicks(clicks::clicks; dt::Float64=1e-2, centered::Bool=false)
@unpack T,L,R = clicks
nT = ceil(Int, round((T/dt), digits=10))
if centered
nL = searchsortedlast.(Ref((0. -dt/2):dt:(nT -dt/2)*dt), L)
nR = searchsortedlast.(Ref((0. -dt/2):dt:(nT -dt/2)*dt), R)
else
nL = searchsortedlast.(Ref(0.:dt:nT*dt), L)
nR = searchsortedlast.(Ref(0.:dt:nT*dt), R)
end
binned_clicks(nT, nL, nR)
end
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 1661 | """
synthetic_data(; θ=θchoice(), ntrials=2000, rng=1)
Returns default parameters and ntrials of synthetic data (clicks and choices) organized into a choicedata type.
"""
function synthetic_data(; θ::θchoice=θchoice(), ntrials::Int=2000, rng::Int=1, dt::Float64=1e-2, centered::Bool=false)
clicks, choices = rand(θ, ntrials; rng=rng)
binned_clicks = bin_clicks.(clicks, centered=centered, dt=dt)
inputs = map((clicks, binned_clicks)-> choiceinputs(clicks=clicks, binned_clicks=binned_clicks,
dt=dt, centered=centered), clicks, binned_clicks)
return θ, choicedata.(inputs, choices)
end
"""
rand(θ, ntrials)
Produces synthetic clicks and choices for n trials using model parameters θ.
"""
function rand(θ::θchoice, ntrials::Int; dt::Float64=1e-4, rng::Int = 1, centered::Bool=false)
clicks = synthetic_clicks(ntrials, rng)
binned_clicks = bin_clicks.(clicks,centered=centered,dt=dt)
inputs = map((clicks, binned_clicks)-> choiceinputs(clicks=clicks, binned_clicks=binned_clicks,
dt=dt, centered=centered), clicks, binned_clicks)
ntrials = length(inputs)
rng = sample(Random.seed!(rng), 1:ntrials, ntrials; replace=false)
#choices = rand.(Ref(θ), inputs, rng)
choices = pmap((inputs, rng) -> rand(θ, inputs, rng), inputs, rng)
return clicks, choices
end
"""
rand(θ, inputs, rng)
Produces L/R choice for one trial, given model parameters and inputs.
"""
function rand(θ::θchoice, inputs::choiceinputs, rng::Int)
Random.seed!(rng)
@unpack θz, bias, lapse = θ
a = rand(θz,inputs)
rand() > lapse ? choice = a[end] >= bias : choice = Bool(round(rand()))
end | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 1924 | """
"""
@with_kw struct choiceinputs{T1,T2}
clicks::T1
binned_clicks::T2
dt::Float64
centered::Bool
delay::Int=0
pad::Int=0
end
"""
θchoice(θz, bias, lapse) <: DDMθ
Fields:
- `θz`: is a module-defined type that contains the parameters related to the latent variable model.
- `bias` is the choice bias parameter.
- `lapse` is the lapse parameter.
Example:
```julia
θchoice(θz=θz(σ2_i = 0.5, B = 15., λ = -0.5, σ2_a = 50., σ2_s = 1.5,
ϕ = 0.8, τ_ϕ = 0.05), bias=1., lapse=0.05)
```
"""
@with_kw mutable struct θchoice{T1, T2, T3} <: DDMθ
θz::T1 = θz()
bias::T2 = 1.
lapse::T3 = 0.05
end
"""
choicedata{T1} <: DDMdata
Fields:
- `click_data` is a type that contains all of the parameters related to click input.
- `choice` is the choice data for a single trial.
Example:
```julia
```
"""
@with_kw struct choicedata{T1} <: DDMdata
click_data::T1
choice::Bool
end
"""
choiceDDM(θ, n, cross)
Fields:
- `θ`: a instance of the module-defined class `θchoice` that contains all of the model parameters for a `choiceDDM`
- `n`: number of spatial bins to use (defaults to 53).
- `cross`: whether or not to use cross click adaptation (defaults to false).
- `fit`: `array` of `Bool` for optimization for `choiceDDM` model.
- `lb`: `array` of lower bounds for optimization for `choiceDDM` model.
- `ub`: `array` of upper bounds for optimization for `choiceDDM` model.
Example:
```julia
ntrials, dt, centered, n = 1, 1e-2, false, 53
θ = θchoice()
_, data = synthetic_data(n ;θ=θ, ntrials=ntrials, rng=1, dt=dt);
choiceDDM(θ=θ, data=data, n=n)
```
"""
@with_kw mutable struct choiceDDM{T} <: DDM
θ::T = θchoice()
n::Int=53
cross::Bool=false
fit::Vector{Bool} = vcat(trues(dimz+2))
lb::Vector{Float64} = vcat([0., 4., -5., 0., 0., 0.01, 0.005], [-5, 0.])
ub::Vector{Float64} = vcat([30., 30., 5., 100., 2.5, 1.2, 1.], [5, 1.])
end
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 3050 | """
choice_optimize(model, options)
Optimize choice-related model parameters for a `neural_choiceDDM` using choice data.
Arguments:
- `model`: an instance of a `neural_choiceDDM`.
Returns:
- `model`: an instance of a `neural_choiceDDM`.
- `output`: results from [`Optim.optimize`](@ref).
"""
function choice_optimize(model::neural_choiceDDM, data;
x_tol::Float64=1e-10, f_tol::Float64=1e-9, g_tol::Float64=1e-3,
iterations::Int=Int(2e3), show_trace::Bool=true, outer_iterations::Int=Int(1e1),
scaled::Bool=false, extended_trace::Bool=false)
@unpack θ, n, cross, fit, lb, ub = model
@unpack f = θ
x0 = PulseInputDDM.flatten(θ)
lb, = unstack(lb, fit)
ub, = unstack(ub, fit)
x0,c = unstack(x0, fit)
ℓℓ(x) = -choice_loglikelihood(stack(x,c,fit), model, data)
output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol,
f_tol=f_tol, iterations=iterations, show_trace=show_trace,
outer_iterations=outer_iterations, scaled=scaled,
extended_trace=extended_trace)
x = Optim.minimizer(output)
x = stack(x,c,fit)
model.θ = θneural_choice(x, f)
return model, output
end
"""
choice_loglikelihood(x, model)
A wrapper function that accepts a vector of mixed parameters, splits the vector
into two vectors based on the parameter mapping function provided as an input. Used
in optimization, Hessian and gradient computation.
"""
function choice_loglikelihood(x::Vector{T}, model::neural_choiceDDM, data) where {T <: Real}
@unpack θ,n,cross,fit,lb,ub = model
@unpack f = θ
model = neural_choiceDDM(θ=θneural_choice(x, f), n=n, cross=cross,fit=fit, lb=lb, ub=ub)
choice_loglikelihood(model, data)
end
"""
choice_loglikelihood(model)
Given parameters θ and data (inputs and choices) computes the LL for all trials
"""
choice_loglikelihood(model::neural_choiceDDM, data) = sum(log.(vcat(choice_likelihood(model, data)...)))
"""
"""
function choice_loglikelihood_per_trial(model::neural_choiceDDM, data)
output = choice_likelihood(model, data)
map(x-> map(x-> sum(log.(x)), x), output)
end
"""
choice_likelihood(model)
Arguments: `neural_choiceDDM` instance
Returns: `array` of `array` of P(d|θ, Y)
"""
function choice_likelihood(model::neural_choiceDDM, data)
@unpack θ,n,cross = model
@unpack θz, θy, bias, lapse = θ
@unpack σ2_i, B, λ, σ2_a = θz
@unpack dt = data[1][1].input_data
P,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt)
map((data, θy) -> pmap(data ->
choice_likelihood(θ,θy,data,P,M,xc,dx,n,cross), data), data, θy)
end
"""
"""
function choice_likelihood(θ, θy, data::neuraldata,
P::Vector{T1}, M::Array{T1,2},
xc::Vector{T1}, dx::T3, n, cross) where {T1,T3 <: Real}
@unpack choice = data
@unpack θz, bias, lapse = θ
P = likelihood(θz, θy, data, P, M, xc, dx, n, cross)[2]
sum(choice_likelihood!(bias,xc,P,choice,n,dx)) * (1 - lapse) + lapse/2
end | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 8145 | """
choice_neural_optimize(model, options)
Optimize (potentially all) model parameters for a `neural_choiceDDM` using choice and neural data.
Arguments:
- `model`: an instance of a `neural_choiceDDM`.
Returns:
- `model`: an instance of a `neural_choiceDDM`.
- `output`: results from [`Optim.optimize`](@ref).
"""
function choice_neural_optimize(model::neural_choiceDDM, data;
x_tol::Float64=1e-10, f_tol::Float64=1e-9, g_tol::Float64=1e-3,
iterations::Int=Int(2e3), show_trace::Bool=true, outer_iterations::Int=Int(1e1),
scaled::Bool=false, extended_trace::Bool=false)
@unpack θ, n, cross, fit, lb, ub = model
@unpack f = θ
x0 = PulseInputDDM.flatten(θ)
lb, = unstack(lb, fit)
ub, = unstack(ub, fit)
x0,c = unstack(x0, fit)
ℓℓ(x) = -joint_loglikelihood(stack(x,c,fit), model, data)
output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol,
f_tol=f_tol, iterations=iterations, show_trace=show_trace,
outer_iterations=outer_iterations, scaled=scaled,
extended_trace=extended_trace)
x = Optim.minimizer(output)
x = stack(x,c,fit)
model.θ = θneural_choice(x, f)
return model, output
end
"""
gradient(model)
Compute the gradient of the negative log-likelihood at the current value of the parameters of a `neural_choiceDDM`.
Arguments:
- `model`: instance of `neural_choiceDDM`
"""
function gradient(model::neural_choiceDDM, data)
@unpack θ = model
x = flatten(θ)
ℓℓ(x) = -joint_loglikelihood(x, model, data)
ForwardDiff.gradient(ℓℓ, x)
end
"""
Hessian(model; chunck_size)
Compute the hessian of the negative log-likelihood at the current value of the parameters of a `neural_choiceDDM`.
Arguments:
- `model`: instance of `neural_choiceDDM`
Optional arguments:
- `chunk_size`: parameter to manange how many passes over the LL are required to compute the Hessian. Can be larger if you have access to more memory.
"""
function Hessian(model::neural_choiceDDM, data; chunk_size::Int=4)
@unpack θ = model
x = flatten(θ)
ℓℓ(x) = -joint_loglikelihood(x, model, data)
cfg = ForwardDiff.HessianConfig(ℓℓ, x, ForwardDiff.Chunk{chunk_size}())
ForwardDiff.hessian(ℓℓ, x, cfg)
end
"""
joint_loglikelihood(x, model)
A wrapper function that accepts a vector of mixed parameters, splits the vector
into two vectors based on the parameter mapping function provided as an input. Used
in optimization, Hessian and gradient computation.
"""
function joint_loglikelihood(x::Vector{T}, model::neural_choiceDDM, data) where {T <: Real}
@unpack θ,n,cross,fit,lb,ub = model
@unpack f = θ
model = neural_choiceDDM(θ=θneural_choice(x, f), n=n, cross=cross,fit=fit, lb=lb, ub=ub)
joint_loglikelihood(model, data)
end
"""
joint_loglikelihood(model)
Given parameters θ and data (inputs and choices) computes the LL for all trials
"""
joint_loglikelihood(model::neural_choiceDDM, data) = sum(log.(vcat(vcat(joint_likelihood(model, data)...)...)))
"""
joint_loglikelihood_per_trial(model)
Given parameters θ and data (inputs and choices) computes the LL for all trials
"""
function joint_loglikelihood_per_trial(model::neural_choiceDDM, data)
output = joint_likelihood(model, data)
map(x-> map(x-> sum(log.(x)), x), output)
end
"""
joint_likelihood(model)
Arguments: `neural_choiceDDM` instance
Returns: `array` of `array` of P(d, Y|θ)
"""
function joint_likelihood(model::neural_choiceDDM, data)
@unpack θ,n,cross = model
@unpack θz, θy, bias, lapse = θ
@unpack σ2_i, B, λ, σ2_a = θz
@unpack dt = data[1][1].input_data
P,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt)
map((data, θy) -> pmap(data ->
joint_likelihood(θ,θy,data,P,M,xc,dx,n,cross), data), data, θy)
end
"""
"""
function joint_likelihood(θ, θy, data::neuraldata,
P::Vector{T1}, M::Array{T1,2},
xc::Vector{T1}, dx::T3, n, cross) where {T1,T3 <: Real}
@unpack choice = data
@unpack θz, bias, lapse = θ
c, P = likelihood(θz, θy, data, P, M, xc, dx, n, cross)
return vcat(c, sum(choice_likelihood!(bias,xc,P,choice,n,dx)) * (1 - lapse) + lapse/2)
end
"""
"""
function posterior(model::neural_choiceDDM, data)
@unpack θ,n,cross = model
@unpack θy,θz = θ
@unpack σ2_i, B, λ, σ2_a = θz
@unpack dt = data[1][1].input_data
P,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt)
map((data, θy) -> pmap(data -> posterior(θ, θy, data, P, M, xc, dx, n, cross), data), data, θy)
end
"""
"""
function posterior(θ::θneural_choice, θy, data::neuraldata,
P::Vector{T1}, M::Array{T1,2},
xc::Vector{T1}, dx::T3, n, cross) where {T1,T3 <: Real}
@unpack θz, bias, lapse = θ
@unpack λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz
@unpack spikes, input_data, choice = data
@unpack binned_clicks, clicks, dt, λ0, centered, delay, pad = input_data
@unpack nT, nL, nR = binned_clicks
@unpack L, R = clicks
#adapt magnitude of the click inputs
La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R;cross=cross)
time_bin = (-(pad-1):nT+pad) .- delay
c = Vector{T1}(undef, length(time_bin))
F = zeros(T1,n,n) #empty transition matrix for time bins with clicks
α = Array{Float64,2}(undef, n, length(time_bin))
β = Array{Float64,2}(undef, n, length(time_bin))
@inbounds for t = 1:length(time_bin)
if time_bin[t] >= 1
P, F = latent_one_step!(P, F, λ, σ2_a, σ2_s, time_bin[t], nL, nR, La, Ra, M, dx, xc, n, dt)
end
P = P .* (vcat(map(xc-> exp(sum(map((k,θy,λ0)-> logpdf(Poisson(θy(xc,λ0[t]) * dt),
k[t]), spikes, θy, λ0))), xc)...))
(t == length(time_bin)) && (P = choice_likelihood!(bias,xc,P,choice,n,dx))
c[t] = sum(P)
P /= c[t]
α[:,t] = P
end
P = ones(Float64,n) #initialze backward pass with all 1's
β[:,end] = P
@inbounds for t = length(time_bin)-1:-1:1
(t+1 == length(time_bin)) && (P = choice_likelihood!(bias,xc,P,choice,n,dx))
P = P .* (vcat(map(xc-> exp(sum(map((k,θy,λ0)-> logpdf(Poisson(θy(xc,λ0[t+1]) * dt),
k[t+1]), spikes, θy, λ0))), xc)...))
if time_bin[t] >= 0
P,F = backward_one_step!(P, F, λ, σ2_a, σ2_s, time_bin[t+1], nL, nR, La, Ra, M, dx, xc, n, dt)
end
P /= c[t+1]
β[:,t] = P
end
return α, β, xc
end
"""
"""
function forward(model::neural_choiceDDM, data)
@unpack θ,n,cross = model
@unpack θy,θz = θ
@unpack σ2_i, B, λ, σ2_a = θz
@unpack dt = data[1][1].input_data
P,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt)
map((data, θy) -> pmap(data -> forward(θ, θy, data, P, M, xc, dx, n, cross), data), data, θy)
end
"""
"""
function forward(θ::θneural_choice, θy, data::neuraldata,
P::Vector{T1}, M::Array{T1,2},
xc::Vector{T1}, dx::T3, n, cross) where {T1,T3 <: Real}
@unpack θz, bias, lapse = θ
@unpack λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz
@unpack spikes, input_data, choice = data
@unpack binned_clicks, clicks, dt, λ0, centered, delay, pad = input_data
@unpack nT, nL, nR = binned_clicks
@unpack L, R = clicks
#adapt magnitude of the click inputs
La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R;cross=cross)
time_bin = (-(pad-1):nT+pad) .- delay
c = Vector{T1}(undef, length(time_bin))
F = zeros(T1,n,n) #empty transition matrix for time bins with clicks
α = Array{Float64,2}(undef, n, length(time_bin))
@inbounds for t = 1:length(time_bin)
if time_bin[t] >= 1
P, F = latent_one_step!(P, F, λ, σ2_a, σ2_s, time_bin[t], nL, nR, La, Ra, M, dx, xc, n, dt)
end
P = P .* (vcat(map(xc-> exp(sum(map((k,θy,λ0)-> logpdf(Poisson(θy(xc,λ0[t]) * dt),
k[t]), spikes, θy, λ0))), xc)...))
c[t] = sum(P)
P /= c[t]
α[:,t] = P
end
return α, xc
end | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 2727 | """
"""
function synthetic_data(θ::θneural_choice,
ntrials::Vector{Int}, ncells::Vector{Int}; centered::Bool=true,
dt::Float64=1e-2, rng::Int=1, dt_synthetic::Float64=1e-4,
delay::Int=0, pad::Int=10, pos_ramp::Bool=false)
nsess = length(ntrials)
rng = sample(Random.seed!(rng), 1:nsess, nsess; replace=false)
@unpack θz,θy,bias,lapse = θ
output = rand.(Ref(θz), θy, bias, lapse, ntrials, ncells, rng; delay=delay, pad=0, pos_ramp=pos_ramp)
spikes = getindex.(output, 1)
λ0 = getindex.(output, 2)
clicks = getindex.(output, 3)
choices = getindex.(output, 4)
output = bin_clicks_spikes_λ0.(spikes, clicks, λ0;
centered=centered, dt=dt, dt_synthetic=dt_synthetic, synthetic=true)
#λ0 = synthetic_λ0.(clicks, ncells; dt=dt, pos_ramp=pos_ramp, pad=0)
spikes = getindex.(output, 1)
binned_clicks = getindex.(output, 2)
λ0 = getindex.(output, 3)
input_data = neuralinputs.(clicks, binned_clicks, λ0, dt, centered, delay, 0)
padded = map(spikes-> map(spikes-> map(SCn-> vcat(rand.(Poisson.((sum(SCn[1:10])/(10*dt))*ones(pad)*dt)),
SCn, rand.(Poisson.((sum(SCn[end-9:end])/(10*dt))*ones(pad)*dt))), spikes), spikes), spikes)
μ_rnt = map(padded-> filtered_rate.(padded, dt), padded)
nT = map(x-> map(x-> x.nT, x), binned_clicks)
μ_t = map((μ_rnt, ncells, nT)-> map(n-> [max(0., mean([μ_rnt[i][n][t]
for i in findall(nT .>= t)]))
for t in 1:(maximum(nT))], 1:ncells), μ_rnt, ncells, nT)
neuraldata.(input_data, spikes, ncells, choices), μ_rnt, μ_t
end
"""
"""
function rand(θz::θz, θy, bias, lapse, ntrials, ncells, rng; centered::Bool=false, dt::Float64=1e-4, pos_ramp::Bool=false,
delay::Int=0, pad::Int=10)
clicks = synthetic_clicks.(ntrials, rng)
binned_clicks = bin_clicks.(clicks, centered=centered, dt=dt)
λ0 = synthetic_λ0.(clicks, ncells; dt=dt, pos_ramp=pos_ramp, pad=pad)
input_data = neuralinputs.(clicks, binned_clicks, λ0, dt, centered, delay, pad)
rng = sample(Random.seed!(rng), 1:ntrials, ntrials; replace=false)
output = pmap((input_data,rng) -> rand(θz,θy,bias,lapse,input_data; rng=rng), input_data, rng)
spikes = getindex.(output, 3)
choices = getindex.(output, 4)
return spikes, λ0, clicks, choices
end
"""
"""
function rand(θz::θz, θy, bias, lapse, input_data::neuralinputs; rng::Int=1)
@unpack λ0, dt = input_data
Random.seed!(rng)
a = rand(θz,input_data)
λ = map((θy,λ0)-> θy(a, λ0), θy, λ0)
spikes = map(λ-> rand.(Poisson.(λ*dt)), λ)
rand() > lapse ? choice = a[end] >= bias : choice = Bool(round(rand()))
return λ, a, spikes, choice
end
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 2097 | """
neuralchoiceDDM
Fields:
- θ
- data
- n
- cross
"""
@with_kw mutable struct neural_choiceDDM{T} <: DDM
θ::T
n::Int=53
cross::Bool=false
fit::Vector{Bool}
ub::Vector{Float64}
lb::Vector{Float64}
end
"""
"""
@with_kw mutable struct θneural_choice{T1, T2, T3} <: DDMθ
θz::T1
bias::T2
lapse::T2
θy::T3
f::Vector{Vector{String}}
end
"""
"""
function neural_choice_options(f)
nparams, ncells = nθparams(f)
fit = vcat(trues(dimz+2), trues.(nparams)...)
lb = Vector(undef, sum(ncells))
ub = Vector(undef, sum(ncells))
for i in 1:sum(ncells)
if vcat(f...)[i] == "Softplus"
lb[i] = [-10]
ub[i] = [10]
elseif vcat(f...)[i] == "Sigmoid"
lb[i] = [-100.,0.,-10.,-10.]
ub[i] = [100.,100.,10.,10.]
elseif vcat(f...)[i] == "Softplus_negbin"
lb[i] = [0, -10]
ub[i] = [Inf, 10]
end
end
lb = vcat([1e-3, 8., -5., 1e-3, 1e-3, 1e-3, 0.005], [-10, 0.], vcat(lb...))
ub = vcat([100., 40., 5., 400., 10., 1.2, 1.], [10, 1.], vcat(ub...));
fit, lb, ub
end
"""
"""
function θneural_choice(x::Vector{T}, f::Vector{Vector{String}}) where {T <: Real}
nparams, ncells = nθparams(f)
borg = vcat(dimz + 2,dimz + 2 .+cumsum(nparams))
blah = [x[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]]
blah = map((f,x) -> f(x...), getfield.(Ref(@__MODULE__), Symbol.(vcat(f...))), blah)
borg = vcat(0,cumsum(ncells))
θy = [blah[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]]
θneural_choice(θz(x[1:dimz]...), x[dimz+1], x[dimz+2], θy, f)
end
"""
flatten(θ)
Extract parameters related to a `neural_choiceDDM` from an instance of `θneural_choice` and returns an ordered vector.
```
"""
function flatten(θ::θneural_choice)
@unpack θy, θz, bias, lapse = θ
@unpack σ2_i, B, λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz
vcat(σ2_i, B, λ, σ2_a, σ2_s, ϕ, τ_ϕ, bias, lapse,
vcat(collect.(Flatten.flatten.(vcat(θy...)))...))
end | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 8449 | #=
"""
"""
function optimize(data, options::neural_poly_options;
x_tol::Float64=1e-10, f_tol::Float64=1e-6, g_tol::Float64=1e-3,
iterations::Int=Int(2e3), show_trace::Bool=true,
outer_iterations::Int=Int(1e1), α1::Float64=0.)
@unpack fit, lb, ub, x0, ncells, f, nparams, npolys = options
lb, = unstack(lb, fit)
ub, = unstack(ub, fit)
x0,c = unstack(x0, fit)
#ℓℓ(x) = -loglikelihood(stack(x,c,fit), data, ncells, nparams, f, npolys)
ℓℓ(x) = -(loglikelihood(stack(x,c,fit), data, ncells, nparams, f, npolys) -
α1 * (x[2] - lb[2]).^2)
output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol,
f_tol=f_tol, iterations=iterations, show_trace=show_trace,
outer_iterations=outer_iterations)
x = Optim.minimizer(output)
x = stack(x,c,fit)
θ = θneural_poly(x, ncells, nparams, f, npolys)
model = neural_poly_DDM(θ, data)
converged = Optim.converged(output)
return model, output
end
"""
loglikelihood(x, data, ncells)
A wrapper function that accepts a vector of mixed parameters, splits the vector
into two vectors based on the parameter mapping function provided as an input. Used
in optimization, Hessian and gradient computation.
"""
function loglikelihood(x::Vector{T1}, data::Vector{Vector{T2}}, ncells::Vector{Int},
nparams::Int, f::String, npolys::Int) where {T1 <: Real, T2 <: neuraldata}
θ = θneural_poly(x, ncells, nparams, f, npolys)
loglikelihood(θ, data)
end
"""
gradient(model)
"""
function gradient(model::neural_poly_DDM)
@unpack θ, data = model
@unpack ncells, nparams, f, npolys = θ
x = flatten(θ)
ℓℓ(x) = -loglikelihood(x, data, ncells, nparams, f, npolys)
ForwardDiff.gradient(ℓℓ, x)
end
"""
"""
function loglikelihood(θ::θneural_poly, data::Vector{Vector{T1}}) where T1 <: neuraldata
@unpack θz, θμ, θy = θ
sum(map((θy, θμ, data) -> sum(pmap(data-> loglikelihood(θz, θμ, θy, data), data,
batch_size=length(data))), θy, θμ, data))
end
"""
"""
function loglikelihood(θz::θz, θμ::Vector{Poly{T2}}, θy::Vector{T1},
data::neuraldata) where {T1 <: DDMf, T2 <: Real}
@unpack spikes, input_data = data
@unpack dt = input_data
λ, = loglikelihood(θz,θμ,θy,input_data)
sum(logpdf.(Poisson.(vcat(λ...)*dt), vcat(spikes...)))
end
"""
"""
function loglikelihood(θz::θz, θμ::Vector{Poly{T2}}, θy::Vector{T1},
input_data::neuralinputs) where {T1 <: DDMf, T2 <: Real}
@unpack binned_clicks, dt = input_data
@unpack nT = binned_clicks
a = rand(θz,input_data)
λ = map((θy,θμ)-> θy(a, θμ(1:nT)), θy, θμ)
return λ, a
end
"""
"""
@with_kw struct μ_poly_options
ncells::Vector{Int}
npolys::Int = 4
fit::Vector{Bool} = trues(sum(ncells)*npolys)
lb::Vector{Float64} = repeat(-Inf * ones(npolys), sum(ncells))
ub::Vector{Float64} = repeat(Inf * ones(npolys), sum(ncells))
x0::Vector{Float64} = repeat([10. ^-i for i in 0:(npolys-1)], sum(ncells))
end
"""
"""
mutable struct θμ_poly{T1} <: DDMθ
θμ::T1
ncells::Vector{Int}
npolys::Int
end
"""
"""
function optimize(data, options::μ_poly_options;
x_tol::Float64=1e-10, f_tol::Float64=1e-6, g_tol::Float64=1e-3,
iterations::Int=Int(2e3), show_trace::Bool=true,
outer_iterations::Int=Int(1e1), α1::Float64=0.)
@unpack fit, lb, ub, x0, ncells, npolys = options
θ = θμ_poly(x0, ncells, npolys)
lb, = unstack(lb, fit)
ub, = unstack(ub, fit)
x0,c = unstack(x0, fit)
ℓℓ(x) = -loglikelihood(stack(x,c,fit), data, θ)
output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol,
f_tol=f_tol, iterations=iterations, show_trace=show_trace,
outer_iterations=outer_iterations)
x = Optim.minimizer(output)
x = stack(x,c,fit)
θ = θμ_poly(x, ncells, npolys)
model = neural_poly_DDM(θ, data)
converged = Optim.converged(output)
return model, output
end
function loglikelihood(x::Vector{T1}, data::Vector{Vector{T2}}, θ::θμ_poly) where {T1 <: Real, T2 <: neuraldata}
@unpack ncells, npolys = θ
θ = θμ_poly(x, ncells, npolys)
loglikelihood(θ, data)
end
"""
"""
function θμ_poly(x::Vector{T}, ncells::Vector{Int}, npolys::Int) where {T <: Real}
dims2 = vcat(0,cumsum(ncells))
blah = Tuple.(collect(partition(x, npolys)))
blah2 = map(x-> Poly(collect(x)), blah)
θμ = map(idx-> blah2[idx], [dims2[i]+1:dims2[i+1] for i in 1:length(dims2)-1])
θμ_poly(θμ, ncells, npolys)
end
"""
"""
function loglikelihood(θ::θμ_poly, data::Vector{Vector{T1}}) where {T2 <: Real, T1 <: neuraldata}
@unpack θμ = θ
sum(map((θμ, data) -> sum(loglikelihood.(Ref(θμ), data)), θμ, data))
end
"""
"""
function loglikelihood(θμ::Vector{Poly{T2}},
data::neuraldata) where {T2 <: Real}
@unpack spikes, input_data = data
@unpack binned_clicks, dt, pad = input_data
@unpack nT = binned_clicks
λ = map(θμ-> softplus.(θμ(1:nT+2*pad)), θμ)
sum(logpdf.(Poisson.(vcat(λ...)*dt), vcat(spikes...)))
end
=#
"""
RBF
"""
"""
"""
@with_kw struct neural_poly_DDM{T,U} <: DDM
θ::T
data::U
end
@with_kw struct μ_RBF_options
ncells::Vector{Int}
nRBFs::Int = 6
fit::Vector{Bool} = trues(sum(ncells)*nRBFs)
lb::Vector{Float64} = repeat(zeros(nRBFs), sum(ncells))
ub::Vector{Float64} = repeat(Inf * ones(nRBFs), sum(ncells))
x0::Vector{Float64} = repeat([1. for i in 0:(nRBFs-1)], sum(ncells))
end
"""
"""
mutable struct θμ_RBF{T1} <: DDMθ
θμ::T1
ncells::Vector{Int}
nRBFs::Int
end
function train_and_test(data, options::μ_RBF_options; seed::Int=1, nRBFs = 2:10)
ntrials = length(data)
train = sample(Random.seed!(seed), 1:ntrials, ceil(Int, 0.9 * ntrials), replace=false)
test = setdiff(1:ntrials, train)
ncells = options.ncells;
model = pmap(nRBF-> optimize([data[train]], μ_RBF_options(ncells=ncells, nRBFs=nRBF); show_trace=false)[1], nRBFs)
testLL = map(model-> loglikelihood(model.θ, [data[test]]), model)
return nRBFs, model, testLL
end
function optimize(data, options::μ_RBF_options;
x_tol::Float64=1e-10, f_tol::Float64=1e-6, g_tol::Float64=1e-3,
iterations::Int=Int(2e3), show_trace::Bool=true,
outer_iterations::Int=Int(1e1), α1::Float64=0.)
@unpack fit, lb, ub, x0, ncells, nRBFs = options
θ = θμ_RBF(x0, ncells, nRBFs)
lb, = unstack(lb, fit)
ub, = unstack(ub, fit)
x0,c = unstack(x0, fit)
ℓℓ(x) = -loglikelihood(stack(x,c,fit), data, θ)
output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol,
f_tol=f_tol, iterations=iterations, show_trace=show_trace,
outer_iterations=outer_iterations)
x = Optim.minimizer(output)
x = stack(x,c,fit)
θ = θμ_RBF(x, ncells, nRBFs)
model = neural_poly_DDM(θ, data)
converged = Optim.converged(output)
return model, output
end
function loglikelihood(x::Vector{T1}, data::Vector{Vector{T2}}, θ::θμ_RBF) where {T1 <: Real, T2 <: neuraldata}
@unpack ncells, nRBFs = θ
θ = θμ_RBF(x, ncells, nRBFs)
loglikelihood(θ, data)
end
"""
"""
function θμ_RBF(x::Vector{T}, ncells::Vector{Int}, nRBFs::Int) where {T <: Real}
dims2 = vcat(0,cumsum(ncells))
blah = Tuple.(collect(partition(x, nRBFs)))
blah2 = map(x-> collect(x), blah)
θμ = map(idx-> blah2[idx], [dims2[i]+1:dims2[i+1] for i in 1:length(dims2)-1])
θμ_RBF(θμ, ncells, nRBFs)
end
"""
"""
function loglikelihood(θ::θμ_RBF, data::Vector{Vector{T1}}) where {T2 <: Real, T1 <: neuraldata}
@unpack θμ, nRBFs = θ
pad = data[1][1].input_data.pad
maxnT = maximum(vcat(map(data-> map(data-> data.input_data.binned_clicks.nT, data), data)...))
x = 1:maxnT+2*pad
rbf = UniformRBFE(x, nRBFs, normalize=true)
sum(map((θμ, data) -> sum(loglikelihood.(Ref(θμ), data, Ref(rbf))), θμ, data))
end
"""
"""
function loglikelihood(θμ::Vector{Vector{T2}},
data::neuraldata, rbf) where {T2 <: Real}
@unpack spikes, input_data = data
@unpack binned_clicks, dt, pad = input_data
@unpack nT = binned_clicks
x = 1:nT+2*pad
#λ = map(θμ-> max.(0., rbf(x) * θμ), θμ)
#λ = map(θμ-> softplus.(rbf(x) * θμ), θμ)
λ = map(θμ-> rbf(x) * θμ, θμ)
sum(logpdf.(Poisson.(vcat(λ...)*dt), vcat(spikes...)))
end | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 4187 | """
initalize(data, f)
Returns: initializition of neural parameters. Module-defined class `θy`.
"""
function initalize(data, f::Vector{Vector{String}})
θy0 = θy.(data, f)
x0 = vcat([0., 15., 0. - eps(), 0., 0., 1.0 - eps(), 0.008], vcat(vcat(θy0...)...))
θ = θneural(x0, f)
fitbool, lb, ub = neural_options_noiseless(f)
model0 = noiseless_neuralDDM(θ=θ, fit=fitbool, lb=lb, ub=ub)
model0, = fit(model0, data; iterations=10, outer_iterations=1)
return model0
end
"""
fit(model, options)
fit model parameters for a `noiseless_neuralDDM`.
Arguments:
- `model`: an instance of a `noiseless_neuralDDM`.
- `options`: some details related to the optimzation, such as which parameters were fit (`fit`), and the upper (`ub`) and lower (`lb`) bounds of those parameters.
Returns:
- `model`: an instance of a `noiseless_neuralDDM`.
- `output`: results from [`Optim.optimize`](@ref).
"""
function fit(model::noiseless_neuralDDM, data;
x_tol::Float64=1e-10, f_tol::Float64=1e-6, g_tol::Float64=1e-3,
iterations::Int=Int(2e3), show_trace::Bool=false,
outer_iterations::Int=Int(1e1))
@unpack fit, lb, ub, θ = model
@unpack f = θ
x0 = PulseInputDDM.flatten(θ)
lb, = unstack(lb, fit)
ub, = unstack(ub, fit)
x0,c = unstack(x0, fit)
ℓℓ(x) = -loglikelihood(stack(x,c,fit), model, data)
output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol,
f_tol=f_tol, iterations=iterations, show_trace=show_trace,
outer_iterations=outer_iterations)
x = Optim.minimizer(output)
x = stack(x,c,fit)
model.θ = θneural(x, f)
return model, output
end
"""
loglikelihood(x, model)
A wrapper function that accepts a vector of mixed parameters, splits the vector
into two vectors based on the parameter mapping function provided as an input. Used
in optimization, Hessian and gradient computation.
"""
function loglikelihood(x::Vector{T1}, model::noiseless_neuralDDM, data) where {T1 <: Real}
@unpack θ,fit,lb,ub = model
@unpack f = θ
model = noiseless_neuralDDM(θ=θneural(x, f),fit=fit,lb=lb,ub=ub)
loglikelihood(model, data)
end
"""
"""
function loglikelihood(model::noiseless_neuralDDM, data)
@unpack θ = model
@unpack θz, θy = θ
sum(map((θy, data) -> sum(pmap(data-> loglikelihood(θz, θy, data), data,
batch_size=length(data))), θy, data))
end
"""
"""
function loglikelihood(θz::θz, θy::Vector{T1}, data::neuraldata) where T1 <: DDMf
@unpack spikes, input_data = data
@unpack λ0, dt = input_data
#ΔLR = diffLR(data)
ΔLR = rand(θz, input_data)
#λ = loglikelihood(θz,θy,λ0,ΔLR)
λ = map((θy,λ0)-> θy(ΔLR, λ0), θy, λ0)
sum(logpdf.(Poisson.(vcat(λ...)*dt), vcat(spikes...)))
end
#=
"""
"""
function loglikelihood(θz::θz, θy::Vector{T1}, λ0, ΔLR) where T1 <: DDMf
#@unpack λ0, dt = input_data
#a = rand(θz,input_data)
λ = map((θy,λ0)-> θy(ΔLR, λ0), θy, λ0)
#return λ, a
end
=#
"""
"""
function θy(data, f::Vector{String})
ΔLR = diffLR.(data)
spikes = group_by_neuron(data)
@unpack dt = data[1].input_data
map((spikes,f)-> θy(vcat(ΔLR...), vcat(spikes...), dt, f), spikes, f)
end
"""
"""
function θy(ΔLR, spikes, dt, f; nconds::Int=7)
conds_bins = encode(LinearDiscretizer(binedges(DiscretizeUniformWidth(nconds), ΔLR)), ΔLR)
rate = map(i -> (1/dt)*mean(spikes[conds_bins .== i]), 1:nconds)
c = hcat(ones(size(ΔLR, 1)), ΔLR) \ spikes
if (f == "Sigmoid")
#p = vcat(minimum(rate) - mean(rate), maximum(rate)- mean(rate), c[2])
p = vcat(minimum(rate) - mean(rate), maximum(rate)- mean(rate), c[2], 0.)
#p = vcat(minimum(rate), maximum(rate)- minimum(rate), c[2], 0.)
elseif f == "Softplus"
#p = vcat(minimum(rate) - mean(rate), (1/dt)*c[2], 0.)
#p = vcat(eps(), (1/dt)*c[2], 0.)
#p = vcat(minimum(rate) - mean(rate), (1/dt)*c[2])
#p = vcat((1/dt)*c[2], 0.)
p = vcat((1/dt)*c[2])
end
#added because was getting log problem later, since rate function canot be negative
p[1] == 0. ? p[1] += 1e-1 : nothing
return p
end
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 8707 | """
flatten(θ)
Extract parameters `neuralDDM` and place in the correct order into a 1D `array`
```
"""
function flatten(θ::θneural)
@unpack θy, θz = θ
@unpack σ2_i, B, λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz
vcat(σ2_i, B, λ, σ2_a, σ2_s, ϕ, τ_ϕ,
vcat(collect.(Flatten.flatten.(vcat(θy...)))...))
end
"""
gradient(model)
Compute the gradient of the negative log-likelihood at the current value of the parameters of a `neuralDDM`.
"""
function gradient(model::neuralDDM, data)
@unpack θ = model
x = flatten(θ)
ℓℓ(x) = -loglikelihood(x, model, data)
ForwardDiff.gradient(ℓℓ, x)::Vector{Float64}
end
"""
Hessian(model; chunck_size)
Compute the hessian of the negative log-likelihood at the current value of the parameters of a `neuralDDM`.
Arguments:
- `model`: instance of `neuralDDM`
Optional arguments:
- `chunk_size`: parameter to manange how many passes over the LL are required to compute the Hessian. Can be larger if you have access to more memory.
"""
function Hessian(model::neuralDDM, data; chunk_size::Int=4)
@unpack θ = model
x = flatten(θ)
ℓℓ(x) = -loglikelihood(x, model, data)
cfg = ForwardDiff.HessianConfig(ℓℓ, x, ForwardDiff.Chunk{chunk_size}())
ForwardDiff.hessian(ℓℓ, x, cfg)
end
"""
Optimize model parameters for a `neuralDDM`.
$(SIGNATURES)
Arguments:
- `model`: an instance of a `neuralDDM`.
- `data`: an instance of a `neuralDDM`.
Returns:
- `model`: an instance of a `neuralDDM`.
"""
function fit(model::neuralDDM, data;
x_tol::Float64=1e-10, f_tol::Float64=1e-9, g_tol::Float64=1e-3,
iterations::Int=Int(2e3), show_trace::Bool=true, outer_iterations::Int=Int(1e1),
scaled::Bool=false, extended_trace::Bool=false)
@unpack fit, lb, ub, θ, n, cross = model
@unpack f = θ
x0 = PulseInputDDM.flatten(θ)
lb, = unstack(lb, fit)
ub, = unstack(ub, fit)
x0,c = unstack(x0, fit)
ℓℓ(x) = -loglikelihood(stack(x,c,fit), model, data)
output = optimize(x0, ℓℓ, lb, ub; g_tol=g_tol, x_tol=x_tol,
f_tol=f_tol, iterations=iterations, show_trace=show_trace,
outer_iterations=outer_iterations, scaled=scaled,
extended_trace=extended_trace)
x = Optim.minimizer(output)
x = stack(x,c,fit)
model.θ = θneural(x, f)
return model, output
end
"""
loglikelihood(x, model)
Maps `x` into `model`. Used in optimization, Hessian and gradient computation.
Arguments:
- `x`: a vector of mixed parameters.
- `model`: an instance of `neuralDDM`
"""
function loglikelihood(x::Vector{T}, model::neuralDDM, data) where {T <: Real}
@unpack θ,n,cross,fit,lb,ub = model
@unpack f = θ
model = neuralDDM(θ=θneural(x, f), n=n, cross=cross,fit=fit, lb=lb, ub=ub)
loglikelihood(model, data)
end
"""
loglikelihood(model)
Arguments: `neuralDDM` instance
Returns: loglikehood of the data given the parameters.
"""
function loglikelihood(model::neuralDDM, data)
sum(sum.(loglikelihood_pertrial(model, data)))
end
"""
loglikelihood_pertrial(model)
Arguments: `neuralDDM` instance
Returns: loglikehood of the data given the parameters.
"""
function loglikelihood_pertrial(model::neuralDDM, data)
@unpack θ,n,cross = model
@unpack θz, θy = θ
@unpack σ2_i, B, λ, σ2_a = θz
@unpack dt = data[1][1].input_data
P,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt)
map((data, θy) -> pmap(data -> loglikelihood(θz,θy,data, P, M, xc, dx, n, cross), data), data, θy)
end
"""
"""
loglikelihood(θz,θy,data::neuraldata, P::Vector{T1}, M::Array{T1,2},
xc::Vector{T1}, dx::T3, n, cross) where {T1,T3 <: Real} = sum(log.(likelihood(θz,θy,data,P,M,xc,dx,n,cross)[1]))
#=
function likelihood(θz,θy,data::neuraldata,
P::Vector{T1}, M::Array{T1,2},
xc::Vector{T1}, dx::T3, n, cross) where {T1,T3 <: Real}
@unpack λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz
@unpack spikes, input_data = data
@unpack binned_clicks, clicks, dt, λ0, centered, delay, pad = input_data
@unpack nT, nL, nR = binned_clicks
@unpack L, R = clicks
#adapt magnitude of the click inputs
La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R;cross=cross)
F = zeros(T1,n,n) #empty transition matrix for time bins with clicks
time_bin = (-(pad-1):nT+pad) .- delay
alpha = log.(P)
@inbounds for t = 1:length(time_bin)
mm = maximum(alpha)
py = vcat(map(xc-> sum(map((k,θy,λ0)-> logpdf(Poisson(θy(xc,λ0[t]) * dt), k[t]), spikes, θy, λ0)), xc)...)
if time_bin[t] >= 1
any(t .== nL) ? sL = sum(La[t .== nL]) : sL = zero(T1)
any(t .== nR) ? sR = sum(Ra[t .== nR]) : sR = zero(T1)
σ2 = σ2_s * (sL + sR); μ = -sL + sR
if (sL + sR) > zero(T1)
transition_M!(F,σ2+σ2_a*dt,λ, μ, dx, xc, n, dt)
alpha = log.((exp.(alpha .- mm)' * F)') .+ mm .+ py
else
alpha = log.((exp.(alpha .- mm)' * M)') .+ mm .+ py
end
else
alpha = alpha .+ py
end
end
return exp(logsumexp(alpha)), exp.(alpha)
end
=#
"""
"""
function likelihood(θz,θy,data::neuraldata,
P::Vector{T1}, M::Array{T1,2},
xc::Vector{T1}, dx::T3, n, cross) where {T1,T3 <: Real}
@unpack λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz
@unpack spikes, input_data = data
@unpack binned_clicks, clicks, dt, λ0, centered, delay, pad = input_data
@unpack nT, nL, nR = binned_clicks
@unpack L, R = clicks
#adapt magnitude of the click inputs
La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R;cross=cross)
F = zeros(T1,n,n) #empty transition matrix for time bins with clicks
time_bin = (-(pad-1):nT+pad) .- delay
c = Vector{T1}(undef, length(time_bin))
@inbounds for t = 1:length(time_bin)
if time_bin[t] >= 1
P, F = latent_one_step!(P, F, λ, σ2_a, σ2_s, time_bin[t], nL, nR, La, Ra, M, dx, xc, n, dt)
end
#weird that this wasn't working....
#P .*= vcat(map(xc-> exp(sum(map((k,θy,λ0)-> logpdf(Poisson(θy(xc,λ0[t]) * dt),
# k[t]), spikes, θy, λ0))), xc)...)
P = P .* (vcat(map(xc-> exp(sum(map((k,θy,λ0)-> logpdf(Poisson(θy(xc,λ0[t]) * dt),
k[t]), spikes, θy, λ0))), xc)...))
c[t] = sum(P)
P /= c[t]
end
return c, P
end
"""
"""
function posterior(model::neuralDDM, data)
@unpack θ,n,cross = model
@unpack θz, θy = θ
@unpack σ2_i, B, λ, σ2_a = θz
@unpack dt = data[1][1].input_data
P,M,xc,dx = initialize_latent_model(σ2_i, B, λ, σ2_a, n, dt)
map((data, θy) -> pmap(data -> posterior(θz,θy,data, P, M, xc, dx, n, cross), data), data, θy)
end
"""
"""
function posterior(θz::θz, θy, data::neuraldata,
P::Vector{T1}, M::Array{T1,2},
xc::Vector{T1}, dx::T3, n, cross) where {T1,T3 <: Real}
@unpack λ, σ2_a, σ2_s, ϕ, τ_ϕ = θz
@unpack spikes, input_data = data
@unpack binned_clicks, clicks, dt, λ0, centered, delay, pad = input_data
@unpack nT, nL, nR = binned_clicks
@unpack L, R = clicks
#adapt magnitude of the click inputs
La, Ra = adapt_clicks(ϕ,τ_ϕ,L,R;cross=cross)
time_bin = (-(pad-1):nT+pad) .- delay
c = Vector{T1}(undef, length(time_bin))
F = zeros(T1,n,n) #empty transition matrix for time bins with clicks
α = Array{Float64,2}(undef, n, length(time_bin))
β = Array{Float64,2}(undef, n, length(time_bin))
@inbounds for t = 1:length(time_bin)
if time_bin[t] >= 1
P, F = latent_one_step!(P, F, λ, σ2_a, σ2_s, time_bin[t], nL, nR, La, Ra, M, dx, xc, n, dt)
end
P = P .* (vcat(map(xc-> exp(sum(map((k,θy,λ0)-> logpdf(Poisson(θy(xc,λ0[t]) * dt),
k[t]), spikes, θy, λ0))), xc)...))
c[t] = sum(P)
P /= c[t]
α[:,t] = P
end
P = ones(Float64,n) #initialze backward pass with all 1's
β[:,end] = P
@inbounds for t = length(time_bin)-1:-1:1
P = P .* (vcat(map(xc-> exp(sum(map((k,θy,λ0)-> logpdf(Poisson(θy(xc,λ0[t+1]) * dt),
k[t+1]), spikes, θy, λ0))), xc)...))
if time_bin[t] >= 0
P,F = backward_one_step!(P, F, λ, σ2_a, σ2_s, time_bin[t+1], nL, nR, La, Ra, M, dx, xc, n, dt)
end
P /= c[t+1]
β[:,t] = P
end
return α, β, xc
end
"""
"""
function logistic!(x::T) where {T <: Any}
if x >= 0.
x = exp(-x)
x = 1. / (1. + x)
else
x = exp(x)
x = x / (1. + x)
end
return x
end | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 36524 | function remean(data, μ_rnt; do_RBF=true, nRBFs=6)
spikes = getfield.(data, :spikes)
input_data = getfield.(data, :input_data)
choice = getfield.(data, :choice)
binned_clicks = getfield.(input_data, :binned_clicks)
clicks = getfield.(input_data, :clicks)
nT = getfield.(binned_clicks, :nT)
@unpack dt, centered, delay, pad = input_data[1]
ncells = data[1].ncells
μ_t = Vector(undef, ncells)
for n = 1:ncells
μ_t[n] = map(n->[max(0., mean([μ_rnt[i][n][t]
for i in findall(nT .+ 2*pad .>= t)]))
for t in 1:(maximum(nT) .+ 2*pad)], n:n)
λ0 = map(nT-> bin_λ0(μ_t[n], nT+2*pad), nT)
input_data = neuralinputs(clicks, binned_clicks, λ0, dt, centered, delay, pad)
spike_data = neuraldata(input_data, map(x-> [x[n]], spikes), 1, choice)
if do_RBF
model, = optimize([spike_data], pulse_input_DDM.μ_RBF_options(ncells=[1], nRBFs=nRBFs); show_trace=false)
maxnT = maximum(nT)
x = 1:maxnT+2*pad
rbf = UniformRBFE(x, nRBFs, normalize=true)
μ_t[n] = [rbf(x) * model.θ.θμ[1][1]]
end
end
μ_t = map(x-> x[1], μ_t);
λ0 = map(nT-> bin_λ0(μ_t, nT+2*pad), nT)
input_data = neuralinputs(clicks, binned_clicks, λ0, dt, centered, delay, pad)
spike_data = neuraldata(input_data, spikes, ncells, choice)
return spike_data, μ_rnt, μ_t
end
"""
"""
function save(file, model::neuralDDM, options, CI)
@unpack lb, ub, fit = options
@unpack θ = model
dict = Dict("ML_params"=> collect(pulse_input_DDM.flatten(θ)),
"lb"=> lb, "ub"=> ub, "fit"=> fit,
"CI" => CI)
matwrite(file, dict)
#=
if !isempty(H)
#dict["H"] = H
hfile = matopen(path*"hessian_"*file, "w")
write(hfile, "H", H)
close(hfile)
end
=#
end
"""
save_neural_model(file, model, options)
Given a `file`, `model` and `options` produced by `optimize`, save everything to a `.MAT` file in such a way that `reload_neural_data` can bring these things back into a Julia workspace, or they can be loaded in MATLAB.
See also: [`reload_neural_model`](@ref)
"""
function save_neural_model(file, model::Union{neuralDDM, neural_choiceDDM}, data)
@unpack θ, n, cross, lb, ub, fit = model
@unpack f = θ
@unpack dt, delay, pad = data[1][1].input_data
nparams, ncells = nθparams(f)
dict = Dict("ML_params"=> collect(PulseInputDDM.flatten(θ)),
"lb"=> lb, "ub"=> ub, "fit"=> fit, "n"=> n, "cross"=> cross,
"dt"=> dt, "delay"=> delay, "pad"=> pad, "f"=> vcat(vcat(f...)...),
"nparams" => nparams, "ncells" => ncells)
matwrite(file, dict)
#=
if !isempty(H)
#dict["H"] = H
hfile = matopen(path*"hessian_"*file, "w")
write(hfile, "H", H)
close(hfile)
end
=#
end
"""
reload_neural_model(file)
`reload_neural_data` will bring back the parameters from your fit, some details about the optimization (such as the `fit` and bounds vectors) and some details about how you filtered the data. All of the data is not saved in the format that it is loaded by `load_neural_data` because it's too cumbersome to seralize it, so you have to load it again, as above, to re-build `neuralDDM` but you can use some of the stuff that `reload_neural_data` returns to reload the data in the same way (such as `pad` and `dt`)
Returns:
- `θneural`
- `neural_options`
- n
- cross
- dt
- delay
- pad
See also: [`save_neural_model`](@ref)
"""
function reload_neural_model(file)
xf = read(matopen(file), "ML_params")
f = string.(read(matopen(file), "f"))
ncells = collect(read(matopen(file), "ncells"))
nparams = read(matopen(file), "nparams")
borg = vcat(0,cumsum(ncells, dims=1))
nparams = [nparams[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]]
f = [f[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]]
lb = read(matopen(file), "lb")
ub = read(matopen(file), "ub")
fitbool = read(matopen(file), "fit")
n = read(matopen(file), "n")
cross = read(matopen(file), "cross")
dt = read(matopen(file), "dt")
delay = read(matopen(file), "delay")
pad = read(matopen(file), "pad")
neuralDDM(θ=θneural(xf, f),fit=fitbool,lb=lb,ub=ub, n=n, cross=cross)
end
"""
load_neural_data(file::Vector{String}; centered, dt, delay, pad, filtSD, extra_pad, cut, pcut)
Calls `load_neural_data` for each entry in `file` and then creates three array outputs—`spike_data`, `μ_rnt`, `μ_t`—where each entry of an array is the relevant data for a single session.
Returns:
- `data`: an `array` of length number of session. Each entry is for a session, and is another `array`. Each entry of the sub-array is the relevant data for a trial.
- `μ_rnt`: an `array` of length number of sessions. Each entry is another `array` of length number of trials. Each entry of the sub-array is also an `array`, of length number of cells. Each entry of that array is the filtered single-trial firing rate of each neuron
- `μ_t`: an `array` of length number of sessions. Each entry is an `array` of length number of cells. Each entry is the trial-averaged firing rate (across all trials).
"""
function load_neural_data(file::Vector{String}; break_sim_data::Bool=false,
centered::Bool=true, dt::Float64=1e-2, delay::Int=0, pad::Int=0, filtSD::Int=2,
extra_pad::Int=10, cut::Int=10, pcut::Float64=0.01,
do_RBF::Bool=true, nRBFs::Int=6)
output = load_neural_data.(file; break_sim_data=break_sim_data,
centered=centered,
dt=dt, delay=delay, pad=pad, filtSD=filtSD,
extra_pad=extra_pad, cut=cut, pcut=pcut,
do_RBF=do_RBF, nRBFs=nRBFs)
output = filter(x -> x != nothing, output)
spike_data = getindex.(output, 1)
μ_rnt = getindex.(output, 2)
μ_t = getindex.(output, 3)
cpoke_out = getindex.(output, 4)
spike_data, μ_rnt, μ_t, cpoke_out
end
"""
load_neural_data(file::String; centered, dt, delay, pad, filtSD, extra_pad, cut, pcut)
Load neural data `.MAT` file and return an three `arrays`. The first `array` is the `data` formatted correctly for fitting the model. Each element of `data` is a module-defined class called `neuraldata`.
The package expects your data to live in a single `.MAT` file which should contain a struct called `rawdata`. Each element of `rawdata` should have data for one behavioral trial and `rawdata` should contain the following fields with the specified structure:
- `rawdata.leftbups`: row-vector containing the relative timing, in seconds, of left clicks on an individual trial. 0 seconds is the start of the click stimulus
- `rawdata.rightbups`: row-vector containing the relative timing in seconds (origin at 0 sec) of right clicks on an individual trial. 0 seconds is the start of the click stimulus.
- `rawdata.T`: the duration of the trial, in seconds. The beginning of a trial is defined as the start of the click stimulus. The end of a trial is defined based on the behavioral event “cpoke_end”. This was the Hanks convention.
- `rawdata.pokedR`: Bool representing the animal choice (1 = right).
- `rawdata.spike_times`: cell array containing the spike times of each neuron on an individual trial. The cell array will be length of the number of neurons recorded on that trial. Each entry of the cell array is a column vector containing the relative timing of spikes, in seconds. Zero seconds is the start of the click stimulus. Spikes before and after the click inputs should also be included.
Arguments:
- `file`: path to the file you want to load.
Optional arguments;
- `break_sim_data`: this will break up simulatenously recorded neurons, as if they were recorded independently. Not often used by most users.
- `centered`: Defaults to true. For the neural model, this aligns the center of the binned spikes, to the beginning of the binned clicks. This was done to fix a numerical problem. Most users will never need to adjust this.
- `dt`: Binning of the spikes, in seconds.
- `delay`: How much to offset the spikes, relative to the accumlator, in units of `dt`.
- `pad`: How much extra time should spikes be considered before and after the begining of the clicks. Useful especially if delay is large.
- `filtSD`: standard deviation of a Gaussin (in units of `dt`) to filter the spikes with to generate single trial firing rates (`μ_rnt`), and mean firing rate across all trials (`μ_t`).
- `extra_pad`: Extra padding (in addition to `pad`) to add, for filtering purposes. In units of `dt`.
- `cut`: How much extra to cut off at the beginning and end of filtered things (should be equal to `extra_pad` in most cases).
- `pcut`: p-value for selecting cells.
Returns:
- `data`: an `array` of length number of trials. Each element is a module-defined class called `neuraldata`.
- `μ_rnt`: an `array` of length number of trials. Each entry of the sub-array is also an `array`, of length number of cells. Each entry of that array is the filtered single-trial firing rate of each neuron.
- `μ_t`: an `array` of length number of cells. Each entry is the trial-averaged firing rate (across all trials).
"""
function load_neural_data(file::String; break_sim_data::Bool=false,
dt::Float64=1e-2, delay::Int=0, pad::Int=0, filtSD::Int=2,
extra_pad::Int=10, cut::Int=10, pcut::Float64=0.01,
do_RBF::Bool=true, nRBFs::Int=6, centered::Bool=true)
data = read(matopen(file), "rawdata")
if !haskey(data, "spike_times")
data["spike_times"] = data["St"]
end
if haskey(data, "cpoke_out")
cpoke_out = data["cpoke_out"]
cpoke_end = data["cpoke_end"]
else
cpoke_out = data["cpoke_end"]
cpoke_end = data["cpoke_end"]
end
T = vec(data["T"])
L = vec(map(x-> vec(collect(x)), data[collect(keys(data))[occursin.("left", collect(keys(data)))][1]]))
R = vec(map(x-> vec(collect(x)), data[collect(keys(data))[occursin.("right", collect(keys(data)))][1]]))
click_times = clicks.(L, R, T)
binned_clicks = bin_clicks(click_times, centered=centered, dt=dt)
nT = map(x-> x.nT, binned_clicks)
ncells = size(data["spike_times"][1], 2)
spikes = vec(map(x-> vec(vec.(collect.(x))), data["spike_times"]))
output = map((spikes, nT)-> bin_spikes(spikes, dt, nT; pad=0), spikes, nT)
spikes = getindex.(output, 1)
FR = map(i-> map((x,T)-> sum(x[i])/T, spikes, T), 1:ncells)
choice = vec(convert(BitArray, data["pokedR"]))
pval = map(x-> pvalue(EqualVarianceTTest(x[choice], x[.!choice])), FR)
ptest = pval .< pcut
if any(ptest)
ncells = sum(ptest)
if break_sim_data
spike_data = Vector{Vector{neuraldata}}(undef, ncells)
μ_rnt = Vector(undef, ncells)
μ_t = Vector(undef, ncells)
for n = 1:ncells
spikes = vec(map(x-> [vec(collect(x[findall(ptest)][n]))], data["spike_times"]))
output = map((spikes, nT)-> bin_spikes(spikes, dt, nT; pad=pad), spikes, nT)
spikes = getindex.(output, 1)
padded = getindex.(output, 2)
μ_rnt[n] = filtered_rate.(padded, dt; filtSD=filtSD, cut=cut)
μ_t[n] = map(n-> [max(0., mean([μ_rnt[n][i][1][t]
for i in findall(nT .+ 2*pad .>= t)]))
for t in 1:(maximum(nT) .+ 2*pad)], n:n)
λ0 = map(nT-> bin_λ0(μ_t[n], nT+2*pad), nT)
#λ0 = map(nT-> map(μ_t-> zeros(nT), μ_t), nT)
input_data = neuralinputs(click_times, binned_clicks, λ0, dt, centered, delay, pad)
spike_data[n] = neuraldata(input_data, spikes, 1, choice)
if do_RBF
model, = optimize([spike_data[n]], μ_RBF_options(ncells=[1], nRBFs=nRBFs); show_trace=false)
maxnT = maximum(nT)
x = 1:maxnT+2*pad
rbf = UniformRBFE(x, nRBFs, normalize=true)
μ_t[n] = [rbf(x) * model.θ.θμ[1][1]]
end
#model, = optimize([spike_data[n]], μ_poly_options(ncells=[1]); show_trace=false)
#μ_t[n] = [model.θ.θμ[1][1](1:length(μ_t[n][1]))]
λ0 = map(nT-> bin_λ0(μ_t[n], nT+2*pad), nT)
input_data = neuralinputs(click_times, binned_clicks, λ0, dt, centered, delay, pad)
spike_data[n] = neuraldata(input_data, spikes, 1, choice)
end
else
#=
spikes = vec(map(x-> vec(vec.(collect.(x))), data["spike_times"]))
output = map((spikes, nT)-> bin_spikes(spikes, dt, nT; pad=pad), spikes, nT)
spikes = getindex.(output, 1)
padded = getindex.(output, 2)
spikes = map(spikes-> spikes[ptest], spikes)
padded = map(padded-> padded[ptest], padded)
μ_rnt = filtered_rate.(padded, dt; filtSD=filtSD, cut=cut)
μ_t = map(n-> [max(0., mean([μ_rnt[i][n][t]
for i in findall(nT .+ 2*pad .>= t)]))
for t in 1:(maximum(nT) .+ 2*pad)], 1:ncells)
#μ_t = map(n-> [max(0., mean([spikes[i][n][t]/dt
# for i in findall(nT .>= t)]))
# for t in 1:(maximum(nT))], 1:ncells)
λ0 = map(nT-> bin_λ0(μ_t, nT+2*pad), nT)
#λ0 = map(nT-> map(μ_t-> zeros(nT), μ_t), nT)
input_data = neuralinputs(click_times, binned_clicks, λ0, dt, centered, delay, pad)
spike_data = neuraldata(input_data, spikes, ncells)
nRBFs=6
model, = optimize([spike_data], μ_RBF_options(ncells=[ncells], nRBFs=nRBFs); show_trace=false)
maxnT = maximum(nT)
x = 1:maxnT+2*pad
rbf = UniformRBFE(x, nRBFs, normalize=true)
μ_t = map(n-> rbf(x) * model.θ.θμ[1][n], 1:ncells)
#model, = optimize([spike_data], μ_poly_options(ncells=[ncells]); show_trace=false)
#μ_t = map(n-> model.θ.θμ[1][n](1:length(μ_t[n])), 1:ncells)
=#
μ_t = Vector(undef, ncells)
for n = 1:ncells
spikes = vec(map(x-> [vec(collect(x[findall(ptest)][n]))], data["spike_times"]))
output = map((spikes, nT)-> PulseInputDDM.bin_spikes(spikes, dt, nT; pad=pad), spikes, nT)
spikes = getindex.(output, 1)
padded = getindex.(output, 2)
μ_rnt = PulseInputDDM.filtered_rate.(padded, dt; filtSD=filtSD, cut=cut)
μ_t[n] = map(n-> [max(0., mean([μ_rnt[i][1][t]
for i in findall(nT .+ 2*pad .>= t)]))
for t in 1:(maximum(nT) .+ 2*pad)], n:n)
λ0 = map(nT-> PulseInputDDM.bin_λ0(μ_t[n], nT+2*pad), nT)
input_data = PulseInputDDM.neuralinputs(click_times, binned_clicks, λ0, dt, centered, delay, pad)
spike_data = PulseInputDDM.neuraldata(input_data, spikes, 1, choice)
if do_RBF
model, = optimize([spike_data], PulseInputDDM.μ_RBF_options(ncells=[1], nRBFs=nRBFs); show_trace=false)
maxnT = maximum(nT)
x = 1:maxnT+2*pad
rbf = UniformRBFE(x, nRBFs, normalize=true)
μ_t[n] = [rbf(x) * model.θ.θμ[1][1]]
end
end
μ_t = map(x-> x[1], μ_t);
spikes = vec(map(x-> vec(vec.(collect.(x))), data["spike_times"]))
output = map((spikes, nT)-> bin_spikes(spikes, dt, nT; pad=pad), spikes, nT)
spikes = getindex.(output, 1)
padded = getindex.(output, 2)
spikes = map(spikes-> spikes[ptest], spikes)
padded = map(padded-> padded[ptest], padded)
μ_rnt = filtered_rate.(padded, dt; filtSD=filtSD, cut=cut)
λ0 = map(nT-> bin_λ0(μ_t, nT+2*pad), nT)
input_data = neuralinputs(click_times, binned_clicks, λ0, dt, centered, delay, pad)
spike_data = neuraldata(input_data, spikes, ncells, choice)
end
return spike_data, μ_rnt, μ_t, cpoke_out - cpoke_end
else
return nothing
end
end
"""
"""
#function bin_clicks_spikes_λ0(data::Dict; centered::Bool=true,
# dt::Float64=1e-2, delay::Float64=0., pad::Int=10, filtSD::Int=5)
function bin_clicks_spikes_λ0(spikes, clicks, λ0; centered::Bool=true,
dt::Float64=1e-2, delay::Float64=0., dt_synthetic::Float64=1e-4,
synthetic::Bool=false)
spikes = bin_spikes(spikes, dt, dt_synthetic)
binned_clicks = bin_clicks(clicks, centered=centered, dt=dt)
λ0 = bin_λ0(λ0, dt, dt_synthetic)
return spikes, binned_clicks, λ0
end
"""
"""
bin_λ0(λ0::Vector{Vector{Vector{Float64}}}, dt, dt_synthetic) = bin_λ0.(λ0, dt, dt_synthetic)
"""
"""
#bin_λ0(λ0::Vector{Vector{Float64}}, dt, dt_synthetic) = decimate.(λ0, Int(dt/dt_synthetic))
bin_λ0(λ0::Vector{Vector{Float64}}, dt, dt_synthetic) =
map(λ0-> mean.(Iterators.partition(λ0, Int(dt/dt_synthetic))), λ0)
"""
"""
bin_spikes(spikes::Vector{Vector{Vector{Int}}}, dt, dt_synthetic) = bin_spikes.(spikes, dt, dt_synthetic)
"""
"""
bin_spikes(spikes::Vector{Vector{Int}}, dt::Float64, dt_synthetic::Float64) =
map(SCn-> sum.(Iterators.partition(SCn, Int(dt/dt_synthetic))), spikes)
"""
"""
function bin_spikes(spike_times::Vector{Vector{Float64}}, dt, nT::Int; pad::Int=20, extra_pad::Int=10)
trial = map(x-> StatsBase.fit(Histogram, vec(collect(x)),
collect(range(-pad*dt, stop=(nT+pad)*dt,
length=(nT+2*pad)+1)), closed=:left).weights, spike_times)
padded = map(x-> StatsBase.fit(Histogram, vec(collect(x)),
collect(range(-(extra_pad+pad)*dt, stop=(nT+pad+extra_pad)*dt,
length=(nT+2*extra_pad+2*pad)+1)), closed=:left).weights, spike_times)
return trial, padded
end
"""
"""
bin_λ0(λ0::Vector{Vector{Float64}}, nT) = map(λ0-> λ0[1:nT], λ0)
"""
"""
function filtered_rate(padded, dt; filtSD::Int=5, cut::Int=10)
kern = reflect(KernelFactors.gaussian(filtSD, 8*filtSD+1));
map(padded-> imfilter(1/dt * padded, kern,
Fill(zero(eltype(padded))))[cut+1:end-cut], padded)
end
"""
process_spike_data(μ_rnt, data)
Arguments:
- `μ_rnt`: `array` of Gaussian-filterd single trial firing rates for all cells and all trials in one session. `μ_rnt` is output from `load_neural_data`.
- `data`: `array` of all trial data for one session. `data` is output from `load_neural_data`.
Optional arguments:
- `nconds`: number of groups to make to compute PSTHs
Returns:
- `μ_ct`: mean PSTH for each group.
- `σ_ct`: 1 std PSTH for each group.
"""
function process_spike_data(μ_rnt, data; nconds::Int=4)
ncells = data[1].ncells
pad = data[1].input_data.pad
nT = map(x-> x.input_data.binned_clicks.nT, data)
μ_rn = map(n-> map(μ_rnt-> mean(μ_rnt[n]), μ_rnt), 1:ncells)
ΔLRT = map((data,nT) -> getindex(diffLR(data), pad+nT), data, nT)
conds = encode(LinearDiscretizer(binedges(DiscretizeUniformWidth(nconds), ΔLRT)), ΔLRT)
μ_ct = map(n-> map(c-> [mean([μ_rnt[conds .== c][i][n][t]
for i in findall(nT[conds .== c] .+ (2*pad) .>= t)])
for t in 1:(maximum(nT[conds .== c]) .+ (2*pad))], 1:nconds), 1:ncells)
σ_ct = map(n-> map(c-> [std([μ_rnt[conds .== c][i][n][t]
for i in findall(nT[conds .== c] .+ (2*pad) .>= t)]) /
sqrt(length([μ_rnt[conds .== c][i][n][t]
for i in findall(nT[conds .== c] .+ (2*pad) .>= t)]))
for t in 1:(maximum(nT[conds .== c]) .+ (2*pad))], 1:nconds), 1:ncells)
return μ_ct, σ_ct, μ_rn
end
group_by_neuron(data) = [[data[t].spikes[n] for t in 1:length(data)] for n in 1:data[1].ncells]
function filter_data_by_dprime!(data,thresh)
bool_vec = data["d'"] .< thresh
deleteat!(data["μ_t"], bool_vec)
deleteat!(data["μ_ct"], bool_vec)
deleteat!(data["σ_ct"], bool_vec)
deleteat!(data["μ_rn"], bool_vec)
map(x-> deleteat!(x, bool_vec), data["spike_counts_padded"])
map(x-> deleteat!(x, bool_vec), data["spike_counts"])
map(x-> deleteat!(x, bool_vec), data["spike_times"])
map(x-> deleteat!(x, bool_vec), data["μ_rnt"])
map(x-> deleteat!(x, bool_vec), data["λ0"])
map(x-> deleteat!(x, bool_vec), data["cellID"])
deleteat!(data["d'"], bool_vec);
data["N"] = length(data["d'"])
return data
end
function filter_data_by_ΔLL!(data,ΔLL,thresh)
bool_vec = ΔLL .< thresh
deleteat!(data["μ_t"], bool_vec)
deleteat!(data["μ_ct"], bool_vec)
deleteat!(data["σ_ct"], bool_vec)
deleteat!(data["μ_rn"], bool_vec)
map(x-> deleteat!(x, bool_vec), data["spike_counts_padded"])
map(x-> deleteat!(x, bool_vec), data["spike_counts"])
map(x-> deleteat!(x, bool_vec), data["spike_times"])
map(x-> deleteat!(x, bool_vec), data["μ_rnt"])
map(x-> deleteat!(x, bool_vec), data["λ0"])
map(x-> deleteat!(x, bool_vec), data["cellID"])
deleteat!(data["d'"], bool_vec);
data["N"] = length(data["d'"])
return data
end
#################################### Data filtering #########################
#This doesn't work, but might be a good idea to fix up.
function filter_data_by_cell!(data,cell_index)
data["N0"] = length(cell_index)
#organized by neurons, so filter by neurons
data["spike_counts_by_neuron"] = data["spike_counts_by_neuron"][cell_index]
data["trial"] = data["trial"][cell_index]
data["cellID_by_neuron"] = data["cellID_by_neuron"][cell_index]
data["sessID_by_neuron"] = data["sessID_by_neuron"][cell_index]
data["ratID_by_neuron"] = data["ratID_by_neuron"][cell_index]
if (haskey(data,"spike_counts_stimulus_aligned_extended_by_neuron") |
haskey(data, "spike_counts_cpoke_aligned_extended_by_neuron"))
data["spike_counts_stimulus_aligned_extended_by_neuron"] =
data["spike_counts_stimulus_aligned_extended_by_neuron"][cell_index]
data["spike_counts_cpoke_aligned_extended_by_neuron"] =
data["spike_counts_cpoke_aligned_extended_by_neuron"][cell_index]
end
trial_index = unique(collect(vcat(data["trial"]...)))
data["trial0"] = length(trial_index)
#organized by trials, so filter by trials
data["binned_leftbups"] = data["binned_leftbups"][trial_index]
data["binned_rightbups"] = data["binned_rightbups"][trial_index]
data["rightbups"] = data["rightbups"][trial_index]
data["leftbups"] = data["leftbups"][trial_index]
data["T"] = data["T"][trial_index]
data["nT"] = data["nT"][trial_index]
data["pokedR"] = data["pokedR"][trial_index]
data["correct_dir"] = data["correct_dir"][trial_index]
data["sessID"] = data["sessID"][trial_index]
data["ratID"] = data["ratID"][trial_index]
data["stim_start"] = data["stim_start"][trial_index]
data["cpoke_end"] = data["cpoke_end"][trial_index]
#this subtracts the minimum current trial index from all of the trial indices
#for i = 1:data["N0"]
# #data["trial"] = map(x->x[1] - minimum(trial_index) + 1 : x[end] - minimum(trial_index) + 1, data["trial"])
# data["trial"][i] = data["trial"][i][1] - minimum(trial_index) + 1 : data["trial"][i][end] - minimum(trial_index) + 1
#end
#tvec2 = deepcopy(unique(vcat(data["trial"]...)))
#map!(x->findall(x[1] .== tvec2)[1]:findall(x[end] .== tvec2)[1], data["trial"], data["trial"])
#trial_index = unique(collect(vcat(data["trial"]...)))
#shifts all trial times so the run consequtively from 1:data["trial0"]
#for i = 1:data["N0"]
# data["trial"][i] = findfirst(data["trial"][i][1] .== trial_index) : findfirst(data["trial"][i][end] .== trial_index)
#end
data["N"] = Vector{Vector{Int}}(undef,0)
map(x->push!(data["N"], Vector{Int}(undef,0)), 1:data["trial0"])
data["cellID"] = Vector{Vector{Int}}(undef,0)
map(x->push!(data["cellID"], Vector{Int}(undef,0)), 1:data["trial0"])
data["spike_counts"] = Vector{Vector{Vector{Int64}}}(undef,0)
map(x->push!(data["spike_counts"], Vector{Vector{Int}}(undef,0)), 1:data["trial0"])
#map(y->map(x->push!(data["N"][x],y), data["trial"][y]), 1:data["N0"])
#map(y->map(x->push!(data["cellID"][x], cell_index[y]), data["trial"][y]), 1:data["N0"])
#map(y->map(x->push!(data["spike_counts"][data["trial"][y][x]], data["spike_counts_by_neuron"][y][x]),
# 1:length(data["trial"][y])), 1:data["N0"])
for i = 1:data["N0"]
#shifts all trial times so the run consequtively from 1:data["trial0"]
data["trial"][i] = findfirst(data["trial"][i][1] .== trial_index) : findfirst(data["trial"][i][end] .== trial_index)
for j = 1:length(data["trial"][i])
push!(data["N"][data["trial"][i][j]], i)
push!(data["cellID"][data["trial"][i][j]], data["cellID_by_neuron"][i])
push!(data["spike_counts"][data["trial"][i][j]], data["spike_counts_by_neuron"][i][j])
end
end
return data
end
#=
######INCOMPLETE##########
function aggregate_and_append_extended_spiking_data!(data::Dict, path::String, sessids::Vector{Vector{Int}},
ratnames::Vector{String}, dt::Float64, ts::Float64, tf::Float64; delay::Float64=0.)
data["spike_counts_stimulus_aligned_extended"] = Vector{Vector{Vector{Int64}}}()
data["spike_counts_cpoke_aligned_extended"] = Vector{Vector{Vector{Int64}}}()
data["spike_counts_stimulus_aligned_extended_by_neuron"] = Vector{Vector{Vector{Int64}}}()
data["spike_counts_cpoke_aligned_extended_by_neuron"] = Vector{Vector{Vector{Int64}}}()
map(x-> push!(data["spike_counts_stimulus_aligned_extended_by_neuron"], Vector{Vector{Int}}(undef,0)), 1:data["N0"])
map(x-> push!(data["spike_counts_cpoke_aligned_extended_by_neuron"], Vector{Vector{Int}}(undef,0)), 1:data["N0"])
data["time_basis_edges"] = (floor.(ts/dt) * dt) : dt : (ceil.(tf/dt) * dt)
#data["time_basis_centers"] = (data["time_basis_edges"][1] + dt/2): dt: (data["time_basis_edges"][end-1] + dt/2)
data["time_basis_centers"] = data["time_basis_edges"][1:end-1]
for j = 1:length(ratnames)
for i = 1:length(sessids[j])
rawdata = read(matopen(path*"/"*ratnames[j]*"_"*string(sessids[j][i])*".mat"),"rawdata")
data = append_extended_neural_data!(data, rawdata, dt, ts, tf, delay=delay)
end
end
map(n-> map(t-> append!(data["spike_counts_stimulus_aligned_extended_by_neuron"][n],
data["spike_counts_stimulus_aligned_extended"][t][data["N"][t] .== n]), data["trial"][n]), 1:data["N0"])
map(n-> map(t-> append!(data["spike_counts_cpoke_aligned_extended_by_neuron"][n],
data["spike_counts_cpoke_aligned_extended"][t][data["N"][t] .== n]), data["trial"][n]), 1:data["N0"])
return data
end
function append_extended_neural_data!(data::Dict, rawdata::Dict, dt::Float64, ts::Float64, tf::Float64;
delay::Float64=0.)
N = size(rawdata["spike_times"][1],2)
ntrials = length(rawdata["T"])
#time = data["time_basis_edges"]
binnedT = ceil.(Int,rawdata["T"]/dt)
append!(data["spike_counts_stimulus_aligned_extended"], map((t,tri) -> map(n -> fit(Histogram, vec(collect(tri[n] .- delay)),
-10*dt:dt:((t+10)*dt), closed=:left).weights, 1:N), binnedT, rawdata["spike_times"]))
#append!(data["spike_counts_cpoke_aligned_extended"], map(tri -> map(n -> fit(Histogram, vec(collect(tri[n])),
# time, closed=:left).weights, 1:N), rawdata["spike_times"]))
time = data["time_basis_edges"]
for i = 1:ntrials
blah = map(n -> fit(Histogram, vec(collect(rawdata["spike_times"][i][n] .+ rawdata["stim_start"][i] .- delay)),
time, closed=:left).weights, 1:N)
push!(data["spike_counts_cpoke_aligned_extended"], blah)
end
return data
end
function λ0_by_trial(data::Dict, μ_λ; cpoke_aligned::Bool=false,
extended::Bool=false)
λ0 = Dict("by_trial" => Vector{Vector{Vector{Float64}}}(undef,0),
"by_neuron" => Vector{Vector{Vector{Float64}}}())
map(x->push!(λ0["by_trial"] , Vector{Vector{Float64}}(undef,0)), 1:data["trial0"])
map(x-> push!(λ0["by_neuron"], Vector{Vector{Float64}}(undef,0)), 1:data["N0"])
for i = 1:data["N0"]
for j = 1:length(data["trial"][i])
if extended
if cpoke_aligned
stim_start = data["stim_start"][data["trial"][i][j]]
else
stim_start = 0.
end
T0 = findlast(collect(data["time_basis_edges"]) .<= stim_start)
else
T0 = 1
end
nT = data["nT"][data["trial"][i][j]]
push!(λ0["by_trial"][data["trial"][i][j]], μ_λ[i][T0: T0+ nT - 1])
end
end
map(n-> map(t-> append!(λ0["by_neuron"][n],
λ0["by_trial"][t][data["N"][t] .== n]), data["trial"][n]), 1:data["N0"])
return λ0
end
function append_neural_data!(data::Dict, rawdata::Dict, ratname::String, sessID::Int, dt::Float64;
delay::Float64=0.)
N = size(rawdata["spike_times"][1],2)
ntrials = length(rawdata["T"])
#by trial
#append!(data["cellID"], map(x-> vec(collect(x)), rawdata["cellID"]))
#append!(data["stim_start"], rawdata["stim_start"])
#by neuron
append!(data["cellID_by_neuron"], rawdata["cellID"][1])
append!(data["sessID_by_neuron"], repeat([sessID], inner=N))
append!(data["ratID_by_neuron"], repeat([ratname], inner=N))
append!(data["trial"], repeat([data["trial0"]+1 : data["trial0"]+ntrials], inner=N))
#if organize == "by_trial"
#by trial
binnedT = ceil.(Int,rawdata["T"]/dt)
append!(data["cellID"], map(x-> vec(collect(x)), rawdata["cellID"]))
append!(data["spike_counts"], map((x,y) -> map(z -> fit(Histogram, vec(collect(y[z] .- delay)),
0.:dt:x*dt, closed=:left).weights, 1:N), binnedT, rawdata["spike_times"]))
append!(data["N"], repeat([collect(data["N0"]+1 : data["N0"]+N)], inner=ntrials))
#by neuron
#append!(data["trial"], repeat([data["trial0"]+1 : data["trial0"]+ntrials], inner=N))
#elseif organize == "by_neuron"
# append!(data["spike_counts"],map!(z -> map!((x,y) -> fit(Histogram,vec(collect(y[z])),
# 0.:dt:x*dt,closed=:left).weights, Vector{Vector}(undef,ntrials),
# binnedT,rawdata["spike_times"]), Vector{Vector}(undef,N),1:N))
# append!(data["spike_counts_all"], map!(z -> map!((x,y) -> fit(Histogram,vec(collect(y[z])),
# 0.:dt:x*dt, closed=:left).weights, Vector{Vector}(undef,ntrials),
# binnedT, rawdata["spike_times"]), Vector{Vector}(undef,N), 1:N))
# append!(data["trial"],repeat([data["trial0"]+1:data["trial0"]+ntrials],inner=N))
#end
data["N0"] += N
data["trial0"] += ntrials
return data
end
#function group_by_neuron!(data)
#trials = Vector{Vector{Int}}()
#data["spike_counts_by_neuron"] = Vector{Vector{Vector{Int64}}}()
#map(x->push!(trials,Vector{Int}(undef,0)),1:data["N0"])
#map(x-> push!(data["spike_counts_by_neuron"], Vector{Vector{Int}}(undef,0)), 1:data["N"])
#map(y->map(x->push!(trials[x],y),data["N"][y]),1:data["trial0"])
#map(n-> map(t-> append!(data["spike_counts_by_neuron"][n],
# data["spike_counts"][t][n]), 1:data["ntrials"]), 1:data["N"])
#return trials, SC
#=
if (haskey(data,"spike_counts_stimulus_aligned_extended_by_neuron") |
haskey(data, "spike_counts_cpoke_aligned_extended_by_neuron"))
#trials = Vector{Vector{Int}}()
data["spike_counts_stimulus_aligned_extended_by_neuron"] = Vector{Vector{Vector{Int64}}}()
#map(x->push!(trials,Vector{Int}(undef,0)),1:data["N0"])
map(x-> push!(data["spike_counts_stimulus_aligned_extended_by_neuron"], Vector{Vector{Int}}(undef,0)), 1:data["N0"])
#map(y->map(x->push!(trials[x],y),data["N"][y]),1:data["trial0"])
map(n-> map(t-> append!(data["spike_counts_stimulus_aligned_extended_by_neuron"][n],
data["spike_counts_stimulus_aligned_extended"][t][data["N"][t] .== n]), data["trial"][n]), 1:data["N0"])
#trials = Vector{Vector{Int}}()
data["spike_counts_cpoke_aligned_extended_by_neuron"] = Vector{Vector{Vector{Int64}}}()
#map(x->push!(trials,Vector{Int}(undef,0)),1:data["N0"])
map(x-> push!(data["spike_counts_cpoke_aligned_extended_by_neuron"], Vector{Vector{Int}}(undef,0)), 1:data["N0"])
#map(y->map(x->push!(trials[x],y),data["N"][y]),1:data["trial0"])
map(n-> map(t-> append!(data["spike_counts_cpoke_aligned_extended_by_neuron"][n],
data["spike_counts_cpoke_aligned_extended"][t][data["N"][t] .== n]), data["trial"][n]), 1:data["N0"])
end
=#
#return data
#end
function package_extended_data!(data,rawdata,model_type::String,ratname,ts::Float64;dt::Float64=2e-2,organize::String="by_trial")
ntrials = length(rawdata["T"])
append!(data["sessid"],map(x->x[1],rawdata["sessid"]))
append!(data["cell"],map(x->vec(collect(x)),rawdata["cell"]))
append!(data["ratname"],map(x->ratname,rawdata["cell"]))
maxT = ceil.(Int,(rawdata["T"])/dt)
binnedT = ceil.(Int,(rawdata["T"] + ts)/dt);
append!(data["nT"],binnedT)
if any(model_type .== "spikes")
N = size(rawdata["St"][1],2)
if organize == "by_trial"
append!(data["spike_counts"],map((x,y)->map(z->fit(Histogram,vec(collect(y[z])),
-ts:dt:x*dt,closed=:left).weights,1:N),maxT,rawdata["St"]));
append!(data["N"],repmat([collect(data["N0"]+1:data["N0"]+N)],ntrials));
elseif organize == "by_neuron"
append!(data["spike_counts"],map!(z -> map!((x,y) -> fit(Histogram,vec(collect(y[z])),
-ts:dt:x*dt,closed=:left).weights,Vector{Vector}(ntrials),
maxT,rawdata["St"]),Vector{Vector}(N),1:N));
append!(data["trial"],repmat([data["trial0"]+1:data["trial0"]+ntrials],N));
end
data["N0"] += N
data["trial0"] += ntrials
end
return data
end
#scrub a larger dataset to only keep data relevant to a single neuron
function keep_single_neuron_data!(data,i)
data["nT"] = data["nT"][data["trial"][i]]
data["leftbups"] = data["leftbups"][data["trial"][i]]
data["rightbups"] = data["rightbups"][data["trial"][i]]
data["binned_rightbups"] = data["binned_rightbups"][data["trial"][i]]
data["binned_leftbups"] = data["binned_leftbups"][data["trial"][i]]
data["N"] = data["N"][data["trial"][i]]
data["spike_counts"] = data["spike_counts"][data["trial"][i]]
data["spike_counts"] = map((x,y)->x = x[y.==i],data["spike_counts"],data["N"])
map!(x->x = [1],data["N"],data["N"])
return data
end
#just hanging on to this for some later time
function my_callback(os)
so_far = time() - start_time
println(" * Time so far: ", so_far)
history = Array{Float64,2}(sum(fit_vec),0)
history_gx = Array{Float64,2}(sum(fit_vec),0)
for i = 1:length(os)
ptemp = group_params(os[i].metadata["x"], p_const, fit_vec)
ptemp = map_func!(ptemp,model_type,"tanh",N=N)
ptemp_opt, = break_params(ptemp, fit_vec)
history = cat(2,history,ptemp_opt)
history_gx = cat(2,history_gx,os[i].metadata["g(x)"])
end
save(out_pth*"/history.jld", "history", history, "history_gx", history_gx)
return false
end
#function my_callback(os)
#so_far = time() - start_time
#println(" * Time so far: ", so_far)
#history = Array{Float64,2}(sum(fit_vec),0)
#history_gx = Array{Float64,2}(sum(fit_vec),0)
#for i = 1:length(os)
# ptemp = group_params(os[i].metadata["x"], p_const, fit_vec)
# ptemp = map_func!(ptemp,model_type,"tanh",N=N)
# ptemp_opt, = break_params(ptemp, fit_vec)
# history = cat(2,history,ptemp_opt)
# history_gx = cat(2,history_gx,os[i].metadata["g(x)"])
#end
#print(os[1]["x"])
#save(ENV["HOME"]*"/spike-data_latent-accum"*"/history.jld", "os", os)
#print(path)
# return false
#end
=#
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 6334 | function simulate_expected_firing_rate(θ::Union{θneural, θneural_choice}, data, rng)
@unpack θz,θy = θ
μ_λ = rand.(Ref(θz), θy, data, Ref(rng))
return μ_λ
end
"""
simulate_expected_firing_rate(model)
Given a `model` generate samples of the firing rate `λ` of each neuron.
Arguments:
- `model`: an instance of a `neuralDDM`.
Optional arguments:
- `num_samples`: How many independent samples of the latent to simulate, to average over.
Returns:
- ` λ`: an `array` is length `num_samples`. Each entry an `array` of length number of trials. Each entry of that is an `array` of length number of neurons. Each entry of that is the firing rate of that neuron on that trial for some length of time.
- `μ_λ`: the mean firing rate for each neuron (averaging across the noise of the latent process for `num_samples` trials).
- `μ_c_λ`: the average across trials and across group with similar evidence values (grouped into `nconds` number of groups).
"""
function simulate_expected_firing_rate(model, data; num_samples::Int=100, nconds::Int=2, rng1::Int=1)
@unpack θ = model
@unpack θz,θy = θ
rng = sample(Random.seed!(rng1), 1:num_samples, num_samples; replace=false)
λ = map(rng-> rand.(Ref(θz), θy, data, Ref(rng)), rng)
μ_λ = mean(λ)
μ_c_λ = cond_mean.(μ_λ, data; nconds=nconds)
return μ_λ, μ_c_λ, λ
end
function simulate_expected_spikes(model, data; num_samples::Int=100, nconds::Int=2, rng1::Int=1)
@unpack θ = model
@unpack θz,θy = θ
rng = sample(Random.seed!(rng1), 1:num_samples, num_samples; replace=false)
spikes = map(rng-> rand_spikes.(Ref(θz), θy, data, Ref(rng)), rng)
μ_spikes = mean(spikes)
μ_c_spikes = cond_mean.(μ_spikes, data; nconds=nconds)
return μ_spikes, μ_c_spikes, spikes
end
"""
Sample all trials over one session
"""
function rand_a(θz::θz, θy, data, rng)
ntrials = length(data)
rng = sample(Random.seed!(rng), 1:ntrials, ntrials; replace=false)
pmap((data,rng) -> rand(θz,θy,data.input_data; rng=rng)[2], data, rng)
end
"""
Sample all trials over one session
"""
function rand_spikes(θz::θz, θy, data, rng)
ntrials = length(data)
rng = sample(Random.seed!(rng), 1:ntrials, ntrials; replace=false)
pmap((data,rng) -> rand(θz,θy,data.input_data; rng=rng)[3], data, rng)
end
"""
Sample all trials over one session
"""
function rand(θz::θz, θy, data, rng)
ntrials = length(data)
rng = sample(Random.seed!(rng), 1:ntrials, ntrials; replace=false)
pmap((data,rng) -> rand(θz,θy,data.input_data; rng=rng)[1], data, rng)
end
"""
"""
function cond_mean(μ_λ, data; nconds=2)
ncells = data[1].ncells
pad = data[1].input_data.pad
nT = map(x-> x.input_data.binned_clicks.nT, data)
ΔLRT = map((data,nT) -> getindex(diffLR(data), pad+nT), data, nT)
conds = encode(LinearDiscretizer(binedges(DiscretizeUniformWidth(nconds), ΔLRT)), ΔLRT)
map(n-> map(c-> [mean([μ_λ[conds .== c][k][n][t]
for k in findall(nT[conds .== c] .+ (2*pad) .>= t)])
for t in 1:(maximum(nT[conds .== c] .+ (2*pad)))],
1:nconds), 1:ncells)
end
"""
"""
function synthetic_data(θ::θneural,
ntrials::Vector{Int}, ncells::Vector{Int}; centered::Bool=true,
dt::Float64=1e-2, rng::Int=1, dt_synthetic::Float64=1e-4,
delay::Int=0, pad::Int=10, pos_ramp::Bool=false)
nsess = length(ntrials)
rng = sample(Random.seed!(rng), 1:nsess, nsess; replace=false)
@unpack θz,θy = θ
output = rand.(Ref(θz), θy, ntrials, ncells, rng; delay=delay, pad=0, pos_ramp=pos_ramp)
spikes = getindex.(output, 1)
λ0 = getindex.(output, 2)
clicks = getindex.(output, 3)
choices = getindex.(output, 4)
output = bin_clicks_spikes_λ0.(spikes, clicks, λ0;
centered=centered, dt=dt, dt_synthetic=dt_synthetic, synthetic=true)
λ0 = synthetic_λ0.(clicks, ncells; dt=dt, pos_ramp=pos_ramp, pad=0)
spikes = getindex.(output, 1)
binned_clicks = getindex.(output, 2)
input_data = neuralinputs.(clicks, binned_clicks, λ0, dt, centered, delay, 0)
padded = map(spikes-> map(spikes-> map(SCn-> vcat(rand.(Poisson.((sum(SCn[1:10])/(10*dt))*ones(pad)*dt)),
SCn, rand.(Poisson.((sum(SCn[end-9:end])/(10*dt))*ones(pad)*dt))), spikes), spikes), spikes)
μ_rnt = map(padded-> filtered_rate.(padded, dt), padded)
nT = map(x-> map(x-> x.nT, x), binned_clicks)
μ_t = map((μ_rnt, ncells, nT)-> map(n-> [max(0., mean([μ_rnt[i][n][t]
for i in findall(nT .>= t)]))
for t in 1:(maximum(nT))], 1:ncells), μ_rnt, ncells, nT)
neuraldata.(input_data, spikes, ncells, choices), μ_rnt, μ_t
end
"""
"""
synthetic_λ0(clicks, N::Int; dt::Float64=1e-4, rng::Int=1, pos_ramp::Bool=false, pad::Int=10) =
synthetic_λ0.(clicks, N; dt=dt, rng=rng, pos_ramp=pos_ramp, pad=pad)
"""
"""
function synthetic_λ0(clicks::clicks, N::Int; dt::Float64=1e-4, rng::Int=1, pos_ramp::Bool=false, pad::Int=10)
@unpack T = clicks
Random.seed!(rng)
if pos_ramp
λ0 = repeat([collect(range(10. + 5*rand(), stop=20. + 5*rand(), length=Int(ceil(T/dt))))], outer=N)
else
λ0 = repeat([zeros(Int(ceil(T/dt) + 2*pad))], outer=N)
end
end
"""
"""
function rand(θz::θz, θy, ntrials, ncells, rng; centered::Bool=false, dt::Float64=1e-4, pos_ramp::Bool=false,
delay::Int=0, pad::Int=10)
clicks = synthetic_clicks.(ntrials, rng)
binned_clicks = bin_clicks.(clicks, centered=centered, dt=dt)
λ0 = synthetic_λ0.(clicks, ncells; dt=dt, pos_ramp=pos_ramp, pad=pad)
input_data = neuralinputs.(clicks, binned_clicks, λ0, dt, centered, delay, pad)
rng = sample(Random.seed!(rng), 1:ntrials, ntrials; replace=false)
output = pmap((input_data,rng) -> rand(θz,θy,input_data; rng=rng), input_data, rng)
spikes = getindex.(output, 3)
choices = getindex.(output, 4)
return spikes, λ0, clicks, choices
end
"""
"""
function rand(θz::θz, θy, input_data::neuralinputs; rng::Int=1)
@unpack λ0, dt = input_data
Random.seed!(rng)
a = rand(θz,input_data)
λ = map((θy,λ0)-> θy(a, λ0), θy, λ0)
spikes = map(λ-> rand.(Poisson.(λ*dt)), λ)
choice = a[end] .> 0.
return λ, a, spikes, choice
end
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 4967 | """
$(TYPEDEF)
"""
@with_kw struct neuralinputs{T1,T2}
clicks::T1
binned_clicks::T2
λ0::Vector{Vector{Float64}}
dt::Float64
centered::Bool
delay::Int
pad::Int
end
"""
"""
neuralinputs(clicks, binned_clicks, λ0::Vector{Vector{Vector{Float64}}}, dt::Float64, centered::Bool, delay::Int, pad::Int) =
neuralinputs.(clicks, binned_clicks, λ0, dt, centered, delay, pad)
"""
"""
@with_kw mutable struct θneural{T1, T2} <: DDMθ
θz::T1
θy::T2
f::Vector{Vector{String}}
end
"""
neuralDDM
Fields:
- θ
- n
- cross
"""
@with_kw mutable struct neuralDDM{T} <: DDM
θ::T
n::Int=53
cross::Bool=false
fit::Vector{Bool}
lb::Vector{Float64}
ub::Vector{Float64}
end
"""
"""
@with_kw mutable struct noiseless_neuralDDM{T} <: DDM
θ::T
fit::Vector{Bool}
lb::Vector{Float64}
ub::Vector{Float64}
end
"""
"""
function nθparams(f)
ncells = length.(f)
nparams = Vector{Int}(undef, sum(ncells));
nparams[vcat(f...) .== "Softplussign"] .= 1
nparams[vcat(f...) .== "Softplus"] .= 1
nparams[vcat(f...) .== "Sigmoid"] .= 4
nparams[vcat(f...) .== "Softplus_negbin"] .= 2
return nparams, ncells
end
"""
"""
function neural_options_noiseless(f)
nparams, ncells = nθparams(f)
fit = vcat(falses(dimz), trues.(nparams)...)
lb = Vector(undef, sum(ncells))
ub = Vector(undef, sum(ncells))
for i in 1:sum(ncells)
if vcat(f...)[i] == "Softplus"
lb[i] = [-10]
ub[i] = [10]
elseif vcat(f...)[i] == "Sigmoid"
lb[i] = [-100.,0.,-10.,-10.]
ub[i] = [100.,100.,10.,10.]
end
end
lb = vcat([1e-3, 8., -5., 1e-3, 1e-3, 1e-3, 0.005], vcat(lb...))
ub = vcat([100., 100., 5., 400., 10., 1.2, 1.], vcat(ub...));
fit, lb, ub
end
"""
"""
function neural_options(f)
nparams, ncells = nθparams(f)
fit = vcat(trues(dimz), trues.(nparams)...)
lb = Vector(undef, sum(ncells))
ub = Vector(undef, sum(ncells))
for i in 1:sum(ncells)
if vcat(f...)[i] == "Softplus"
lb[i] = [-10]
ub[i] = [10]
elseif vcat(f...)[i] == "Sigmoid"
lb[i] = [-100.,0.,-10.,-10.]
ub[i] = [100.,100.,10.,10.]
end
end
lb = vcat([0., 4., -5., 0., 0., 0.01, 0.005], vcat(lb...))
ub = vcat([30., 30., 5., 100., 2.5, 1.2, 1.], vcat(ub...));
fit, lb, ub
end
"""
"""
function θneural(x::Vector{T}, f::Vector{Vector{String}}) where {T <: Real}
nparams, ncells = nθparams(f)
borg = vcat(dimz,dimz.+cumsum(nparams))
blah = [x[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]]
blah = map((f,x) -> f(x...), getfield.(Ref(@__MODULE__), Symbol.(vcat(f...))), blah)
borg = vcat(0,cumsum(ncells))
θy = [blah[i] for i in [borg[i-1]+1:borg[i] for i in 2:length(borg)]]
θneural(θz(x[1:dimz]...), θy, f)
end
"""
"""
@with_kw struct θy{T1}
θ::T1
end
"""
neuraldata
Module-defined class for keeping data organized for the `neuralDDM` model.
Fields:
- `input_data`: stuff related to the input of the accumaltor model, i.e. clicks, etc.
- `spikes`: the binned spikes
- `ncells`: numbers of cells on that trial (should be the same for every trial in a session)
- `choice`: choice on that trial
"""
@with_kw struct neuraldata <: DDMdata
input_data::neuralinputs
spikes::Vector{Vector{Int}}
ncells::Int
choice::Bool
end
"""
"""
neuraldata(input_data, spikes::Vector{Vector{Vector{Int}}}, ncells::Int, choice) = neuraldata.(input_data,spikes,ncells,choice)
"""
"""
@with_kw struct Sigmoid{T1} <: DDMf
a::T1=10.
b::T1=10.
c::T1=1.
d::T1=0.
end
"""
"""
(θ::Sigmoid)(x::Vector{U}, λ0::Vector{T}) where {U,T <: Real} =
(θ::Sigmoid).(x, λ0)
"""
"""
function (θ::Sigmoid)(x::U, λ0::T) where {U,T <: Real}
@unpack a,b,c,d = θ
y = c * x + d
y = a + b * logistic!(y) + λ0
y = softplus(y)
end
@with_kw struct Softplussign{T1} <: DDMf
#a::T1 = 0
c::T1 = 5.0*rand([-1,1])
end
"""
"""
function (θ::Softplussign)(x::Union{U,Vector{U}}, λ0::Union{T,Vector{T}}) where {U,T <: Real}
#@unpack a,c = θ
@unpack c = θ
#y = a .+ softplus.(c*x .+ d) .+ λ0
#y = softplus.(c*x .+ a .+ λ0)
y = softplus.(c .* sign.(x) .+ softplusinv.(λ0))
#y = max.(eps(), y .+ λ0)
#y = softplus.(y .+ λ0)
end
"""
Softplus(c)
``\\lambda(a) = \\ln(1 + \\exp(c * a))``
"""
@with_kw struct Softplus{T1} <: DDMf
#a::T1 = 0
c::T1 = 5.0*rand([-1,1])
end
"""
"""
function (θ::Softplus)(x::Union{U,Vector{U}}, λ0::Union{T,Vector{T}}) where {U,T <: Real}
#@unpack a,c = θ
@unpack c = θ
#y = a .+ softplus.(c*x .+ d) .+ λ0
#y = softplus.(c*x .+ a .+ λ0)
y = softplus.(c*x .+ softplusinv.(λ0))
#y = max.(eps(), y .+ λ0)
#y = softplus.(y .+ λ0)
end
softplusinv(x) = log(expm1(x))
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 964 | ntrials = 2
θ = θchoice(θz=θz(σ2_i = 0.5, B = 15., λ = -0.5, σ2_a = 50., σ2_s = 1.5,
ϕ = 0.8, τ_ϕ = 0.05),
bias=1., lapse=0.05)
θ, data = synthetic_data(;θ=θ, ntrials=ntrials, rng=1)
model_gen = choiceDDM(θ=θ)
choices = getfield.(data, :choice)
@test all(choices .== vcat(true, false))
@time @test round(loglikelihood(model_gen, data), digits=2) ≈ -0.79
@test round(norm(gradient(model_gen, data)), digits=2) ≈ 0.67
x0 = vcat([0.1, 15., -0.1, 20., 0.5, 0.8, 0.008], [0.,0.01])
θ = Flatten.reconstruct(θchoice(), x0)
model = choiceDDM(θ=θ, fit = trues(dimz+2),
lb=lb=vcat([0., 8., -5., 0., 0., 0.01, 0.005], [-30, 0.]),
ub = vcat([2., 30., 5., 100., 2.5, 1.2, 1.], [30, 1.]))
model, = fit(model, data; iterations=5, outer_iterations=1);
@test round(norm(Flatten.flatten(model.θ)), digits=2) ≈ 25.03
H = Hessian(model, data)
@test round(norm(H), digits=2) ≈ 7.62
CI, HPSD = CIs(H)
@test round(norm(CI), digits=2) ≈ 1503.7 | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 2192 | ncells, ntrials = [1,2], [3,4]
f = [repeat(["Sigmoid"], N) for N in ncells]
θ = θneural(θz = θz(σ2_i = 0.5, B = 15., λ = -0.5, σ2_a = 10., σ2_s = 1.2,
ϕ = 0.6, τ_ϕ = 0.02),
θy=[[Sigmoid() for n in 1:N] for N in ncells], f=f);
data, = synthetic_data(θ, ntrials, ncells);
spikes = map(x-> sum.(x), getfield.(vcat(data...), :spikes))
@test all(spikes .== [[5], [16], [4], [2, 2], [17, 15], [19, 16], [8, 13]])
x = PulseInputDDM.flatten(θ)
θy0 = vcat(vcat(θy.(data, f)...)...)
fitbool, lb, ub = neural_options_noiseless(f)
x0=vcat([0., 30., 0. + eps(), 0., 0., 1. - eps(), 0.008], θy0)
θ0 = θneural(x0, f)
model0 = noiseless_neuralDDM(θ=θ0, fit=fitbool, lb=lb, ub=ub)
x0 = PulseInputDDM.flatten(θ0)
@unpack f = θ0
model, = fit(model0, data; iterations=2, outer_iterations=1)
x0 = vcat([0.1, 15., -0.1, 20., 0.5, 0.8, 0.008], PulseInputDDM.flatten(model.θ)[dimz+1:end])
fitbool, lb, ub = neural_options(f)
model = neuralDDM(θ=θneural(x0, f), fit=fitbool, lb=lb, ub=ub)
model, = fit(model, data; iterations=2, outer_iterations=1)
fitbool, lb, ub = neural_choice_options(f)
choice_neural_model = neural_choiceDDM(θ=θneural_choice(vcat(x0[1:dimz], 0., 0., x0[dimz+1:end]), f), fit=fitbool, lb=lb, ub=ub)
@test round(choice_loglikelihood(choice_neural_model, data), digits=2) ≈ -0.44
@test round(joint_loglikelihood(choice_neural_model, data), digits=2) ≈ -368.03
nparams, = PulseInputDDM.nθparams(f)
choice_neural_model.fit, choice_neural_model.lb, choice_neural_model.ub =vcat(falses(dimz), trues(2), falses.(nparams)...), lb, ub
choice_neural_model, = choice_optimize(choice_neural_model, data; iterations=2, outer_iterations=1)
@test round(norm(PulseInputDDM.flatten(choice_neural_model.θ)), digits=2) ≈ 55.87
choice_neural_model.θ = θ=θneural_choice(vcat(x0[1:dimz], 0., 0., x0[dimz+1:end]), f)
choice_neural_model.fit, choice_neural_model.lb, choice_neural_model.ub = vcat(trues(dimz), trues(2), trues.(nparams)...),
vcat(lb[1:7], -10., lb[9:end]), vcat(ub[1:7], 10., ub[9:end])
choice_neural_model, = choice_optimize(choice_neural_model, data; iterations=2, outer_iterations=1)
@test round(norm(PulseInputDDM.flatten(choice_neural_model.θ)), digits=2) ≈ 55.87 | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 1832 |
ncells, ntrials = [1,2], [3,4]
f = [repeat(["Sigmoid"], N) for N in ncells]
θ = θneural(θz = θz(σ2_i = 0.5, B = 15., λ = -0.5, σ2_a = 10., σ2_s = 1.2,
ϕ = 0.6, τ_ϕ = 0.02),
θy=[[Sigmoid() for n in 1:N] for N in ncells], f=f);
fitbool,lb,ub = neural_options(f)
data, = synthetic_data(θ, ntrials, ncells);
model_gen = neuralDDM(θ=θ,fit=fitbool,lb=lb,ub=ub);
spikes = map(x-> sum.(x), getfield.(vcat(data...), :spikes))
@test all(spikes .== [[5], [16], [4], [2, 2], [17, 15], [19, 16], [8, 13]])
@test round(loglikelihood(model_gen, data), digits=2) ≈ -319.64
@test round(norm(gradient(model_gen, data)), digits=2) ≈ 32.68
x = PulseInputDDM.flatten(θ)
@test round(loglikelihood(x, model_gen, data), digits=2) ≈ -319.64
θy0 = vcat(vcat(θy.(data, f)...)...)
@test round(norm(θy0), digits=2) ≈ 38.43
x0=vcat([0., 30., 0. + eps(), 0., 0., 1. - eps(), 0.008], θy0)
θ0 = θneural(x0, f)
fitbool,lb,ub = neural_options_noiseless(f)
model0 = noiseless_neuralDDM(θ=θ0,fit=fitbool,lb=lb,ub=ub)
@test round(loglikelihood(model0, data), digits=2) ≈ -1495.92
x0 = PulseInputDDM.flatten(θ0)
@unpack f = θ0
@test round(loglikelihood(x0, model0, data), digits=2) ≈ -1495.92
model, = fit(model0, data; iterations=2, outer_iterations=1)
@test round(norm(PulseInputDDM.flatten(model.θ)), digits=2) ≈ 58.28
#@test round(norm(gradient(model, data)), digits=2) ≈ 646.89
x0 = vcat([0.1, 15., -0.1, 20., 0.5, 0.8, 0.008], PulseInputDDM.flatten(model.θ)[dimz+1:end])
fitbool,lb,ub = neural_options(f)
model = neuralDDM(θ=θneural(x0, f),fit=fitbool,lb=lb,ub=ub)
model, = fit(model, data; iterations=2, outer_iterations=1)
@test round(norm(PulseInputDDM.flatten(model.θ)), digits=2) ≈ 55.53
H = Hessian(model, data; chunk_size=4)
@test round(norm(H), digits=2) ≈ 8.6
CI, HPSD = CIs(H)
@test round(norm(CI), digits=2) ≈ 1149.39 | PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | code | 518 | using Test, Random, PulseInputDDM, LinearAlgebra, Flatten, Parameters
@testset "PulseInputDDM" begin
#check that random number generator from Base julia has not changed
Random.seed!(1)
@test isapprox(sum(randn(10)), 2.86; atol=0.01)
@testset "choice_model" begin
include("choice_model_tests.jl")
end
@testset "neural_model" begin
include("neural_model_tests.jl")
end
@testset "joint_model" begin
include("joint_model_tests.jl")
end
end
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"Apache-2.0"
] | 0.4.5 | 7963de10c1dc11cd7c6c68d60ee023a991bf2033 | docs | 5355 | # PulseInputDDM.jl — a Julia package for inferring the parameters of drift diffusion models
A Julia package for inferring the parameters of generalized drift diffusion to bound models (DDMs) from neural activity, behavioral data, or both. The codebase was designed with the expectation that data was collected from subjects performing pulse-based evidence accumulation task, as in [Brunton et al 2013](https://www.science.org/doi/10.1126/science.1233912), but can be adapted for other evidence accumulation tasks.
The package contains a variety of auxillary functions for loading/saving model fits, sampling from fit models (e.g., producing latents, neural activity, or choices from a model with specific parameter settings), and for fitting data to similar/related models.
Written for Julia 1.5.0 and above.
## Help
[Start a discussion!](https://github.com/Brody-Lab/PulseInputDDM/discussions).
## Recommended installation
You need to add the PulseInputDDM package from github by entering the Julia package manager, by typing `]`. Then use `add` to add the package, as follows
```julia
(@v1.10) pkg > add PulseInputDDM
```
Another way to add the package in normal Julia mode (i.e., without typing `]`) is
```julia
julia > using Pkg
julia > Pkg.add("PulseInputDDM")
```
## Updating the package
When major modifications are made to the code base, you will need to update the package. You can do this in Julia's package manager (`]`) by typing `update`.
## Getting help
Most functions in this package contain [docstrings](https://docs.julialang.org/en/v1/manual/documentation/). To get more details about how any function in this package works, in Julia you can type `?` and then the name of the function. Documentation will display in the REPL or notebook.
## Fitting the model to choice data only
Because many neuroscientists use matlab, we use the [MAT.jl](https://github.com/JuliaIO/MAT.jl) package for IO. Data can be loaded using two conventions. One of these conventions is easier when data is saved within matlab as a .MAT file, and is described below.
The package expects your data to live in a single .mat file which should contain a struct called `rawdata`. Each element of `rawdata` should have data for one behavioral trial and `rawdata` should contain the following fields with the specified structure:
- `rawdata.leftbups`: row-vector containing the relative timing, in seconds, of left clicks on an individual trial. 0 seconds is the start of the click stimulus.
- `rawdata.rightbups`: row-vector containing the relative timing in seconds (origin at 0 sec) of right clicks on an individual trial. 0 seconds is the start of the click stimulus.
- `rawdata.T`: the duration of the trial, in seconds. The beginning of a trial is defined as the start of the click stimulus. The end of a trial is defined based on the behavioral event “cpoke_end”. This was the Hanks convention.
- `rawdata.pokedR`: `Bool` representing the animal choice (1 = right).
The example file located at [example_matfile.mat](https://github.com/Brody-Lab/PulseInputDDM/blob/master/examples/choice%20model/example_matfile.mat) adheres to this convention and can be loaded using the `load_choice_data` method.
### Fitting the model
Once your data is correctly formatted and you have the package added in Julia, you are ready to fit the model. An example tutorial is located in the [examples](https://github.com/Brody-Lab/PulseInputDDM/tree/master/examples/choice%20model) directory. The tutorial illustrates how to use many of the most important methods, such as loading data, saving model fits, and optimizing the model parameters. of each below.
## Fitting the model to neural activity
### Data format conventions for neural data
See the setting on fitting modesl to [choices only](##Fitting the model to choice data only) for the expected format for .MAT files if one were fitting the choice model. In addition to those fields, for a neural model `rawdata` should also contain an extra field:
`rawdata.spike_times`: cell array containing the spike times of each neuron on an individual trial. The cell array will be length of the number of neurons recorded on that trial. Each entry of the cell array is a column vector containing the relative timing of spikes, in seconds. Zero seconds is the start of the click stimulus. Spikes before and after the click inputs should also be included.
The convention for fitting a model with neural model is that each session should have its own .MAT file. (This constrasts with the convention for the choice model, where a single .MAT file can contain data from different session). It's just easier this way, especially if different sessions have different number of cells.
## Loading and saving data
## Contribution Guidelines
Constructive contributions are welcome.
- Questions, feedback, bug reports, and proposed features should be submitted as a GitHub issue.
- Alternatively, contact the repository owner, Brian, via email ([email protected]).
- For development contributions, please first open an issue describing the proposed development. The resulting discussion may help prevent duplication of efforts. If moving forward with the development, open a pull request with the updated code or new features. Please reference the corresponding issue in the pull request.
| PulseInputDDM | https://github.com/Brody-Lab/PulseInputDDM.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 405 | using Documenter, SchumakerSpline
makedocs(
format = Documenter.HTML(),
sitename = "SchumakerSpline",
modules = [SchumakerSpline],
pages = Any[
"Introduction" => "index.md",
"Examples" => "examples.md",
"API" => "api.md"]
)
deploydocs(
repo = "github.com/s-baumann/SchumakerSpline.jl.git",
target = "build",
deps = nothing,
make = nothing
)
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 19061 | """
This creates an enum which details how extrapolation from the interpolation domain should be done.
The possible enum values are:
* `Curve` - Curve extrapolation extends out the quadratic form at the edges. This can lead to a nonmonotonic result (as the curve can eventually change direction)
* `Linear` - Linear extrapolation extends out the gradient from at the edges. This will always lead to a monotonic result.
* `Constant` - Linear extrapolation extends out the value from at the edges. This leads to flat values being extended out.
"""
@enum Schumaker_ExtrapolationSchemes begin
Curve = 0
Linear = 1
Constant = 2
end
"""
Schumaker(x::Array{T,1},y::Array{T,1} ; gradients::Union{Missing,Array{T,1}} = missing, extrapolation::Schumaker_ExtrapolationSchemes = Curve,
left_gradient::Union{Missing,T} = missing, right_gradient::Union{Missing,T} = missing)
Schumaker(x::Array{Int,1},y::Array{T,1} ; gradients::Union{Missing,Array{T,1}} = missing, extrapolation::Schumaker_ExtrapolationSchemes = Curve,
left_gradient::Union{Missing,T} = missing, right_gradient::Union{Missing,T} = missing)
Schumaker(x::Array{Date,1},y::Array{T,1} ; gradients::Union{Missing,Array{T,1}} = missing, extrapolation::Schumaker_ExtrapolationSchemes = Curve,
left_gradient::Union{Missing,T} = missing, right_gradient::Union{Missing,T} = missing)
Creates a Schumaker spline.
### Inputs
* `x` - A vector of x coordinates.
* `y` - A vector of y coordinates.
* `extrapolation` - This should be `Curve`, `Linear` or `Constant` specifying how to interpolate outside of the sample domain.
* `gradients` - A vector of gradients at each point. If not supplied these are imputed from x and y.
* `left_gradient` - The gradient at the lowest value of x in the domain. This will override the gradient imputed or submitted in the gradients optional argument (if it is submitted there)
* `right_gradient` - The gradient at the highest value of x in the domain. This will override the gradient imputed or submitted in the gradients optional argument (if it is submitted there)
### Returns
* A `Schumaker` object which contains the spline. This object can then be evaluated with evaluate or evaluate_integral.
"""
struct Schumaker{T<:AbstractFloat}
IntStarts_::Array{T,1}
coefficient_matrix_::Array{T,2}
function Schumaker(x::Array{T,1},y::Array{<:Real,1} ; gradients::Union{Missing,Array{<:Real,1}} = missing, left_gradient::Union{Missing,<:Real} = missing, right_gradient::Union{Missing,<:Real} = missing,
extrapolation::Tuple{Schumaker_ExtrapolationSchemes,Schumaker_ExtrapolationSchemes} = (Curve,Curve)) where T<:Real
if length(x) == 0
error("Zero length x vector is insufficient to create Schumaker Spline.")
elseif length(x) == 1
IntStarts = Array{T,1}(x)
@inbounds SpCoefs = [0 0 y[1]]
# Note that this hardcodes in constant extrapolation. This is only
# feasible one as we do not have derivative or curve information.
return new{T}(IntStarts, SpCoefs)
elseif length(x) == 2
@inbounds IntStarts = Array{T,1}([x[1]])
@inbounds IntEnds = Array{T,1}([x[2]])
@inbounds linear_coefficient = (y[2]- y[1]) / (x[2]-x[1])
@inbounds SpCoefs = [0 linear_coefficient y[1]]
# In this case it defaults to curve extrapolation (which is same as linear here)
# So we just alter in case constant is specified.
if extrapolation[1] == Constant || extrapolation[2] == Constant
matrix_without_extrapolation = hcat(IntStarts , SpCoefs)
@inbounds matrix_with_extrapolation = extrapolate(matrix_without_extrapolation, extrapolation, x[2], y)
return @inbounds new{T}(matrix_with_extrapolation[:,1], matrix_with_extrapolation[:,2:4])
else
return new{T}(IntStarts, SpCoefs)
end
end
if ismissing(gradients)
gradients = imputeGradients(x,y)
end
if !ismissing(left_gradient)
@inbounds gradients[1] = left_gradient
end
if !ismissing(right_gradient)
@inbounds gradients[length(gradients)] = right_gradient
end
IntStarts, SpCoefs = getCoefficientMatrix(x,y,gradients, extrapolation)
if T<:AbstractFloat
G = T
else
G = Float64
end
return new{G}(G.(IntStarts), G.(SpCoefs))
end
function Schumaker(IntStarts_::Array{T,1}, coefficient_matrix_::Array{T,2}) where {T<:Real}
return new{T}(IntStarts_, coefficient_matrix_)
end
function Schumaker{Q}(IntStarts_::Array{<:Real,1}, coefficient_matrix_::Array{<:Real,2}) where Q<:Real
return new{Q}(Q.(IntStarts_), Q.(coefficient_matrix_))
end
function Schumaker(x::Array{Date,1},y::Array{<:Real,1} ; gradients::Union{Missing,Array{<:Real,1}} = missing, left_gradient::Union{Missing,<:Real} = missing, right_gradient::Union{Missing,<:Real} = missing,
extrapolation::Tuple{Schumaker_ExtrapolationSchemes,Schumaker_ExtrapolationSchemes} = (Curve,Curve))
days_as_ints = Dates.days.(x)
T = promote_type(eltype(y), eltype(days_as_ints))
if T<:AbstractFloat
G = T
else
G = Float64
end
return Schumaker{G}(days_as_ints , y; gradients = gradients , extrapolation = extrapolation, left_gradient = left_gradient, right_gradient = right_gradient)
end
function Schumaker{Q}(x::AbstractArray, y::AbstractArray ; gradients::Union{Missing,AbstractArray} = missing, left_gradient::Union{Missing,Real} = missing, right_gradient::Union{Missing,Real} = missing,
extrapolation::Tuple{Schumaker_ExtrapolationSchemes,Schumaker_ExtrapolationSchemes} = (Curve,Curve)) where Q<:Real
got_both = (!).(ismissing.(x) .| ismissing.(y))
@inbounds new_x = convert.(Q, x[got_both])
@inbounds new_y = convert.(Q, y[got_both])
@inbounds new_gradients = ismissing(gradients) ? missing : convert.(Q, gradients[got_both])
converted_left = ismissing(left_gradient) ? missing : convert(Q, left_gradient)
converted_right = ismissing(right_gradient) ? missing : convert(Q, right_gradient)
new_left = got_both[1] ? converted_left : missing
new_right = got_both[length(got_both)] ? converted_right : missing
if length(new_x) == 0 error("After removing missing elements there are no points left to estimate schumaker spline") end
return Schumaker(new_x , new_y; gradients = new_gradients , extrapolation = extrapolation, left_gradient = new_left, right_gradient = new_right)
end
function Schumaker(x::Union{AbstractArray{T,1},AbstractArray{Union{Missing,T},1}},y::Union{AbstractArray{R,1},AbstractArray{Union{Missing,R},1}} ; gradients::Union{Missing,AbstractArray{<:Real,1}} = missing, left_gradient::Union{Missing,Real} = missing, right_gradient::Union{Missing,Real} = missing,
extrapolation::Tuple{Schumaker_ExtrapolationSchemes,Schumaker_ExtrapolationSchemes} = (Curve,Curve)) where T<:Real where R<:Real
promo_type = promote_type(T,R)
return Schumaker{promo_type}(x, y; gradients = gradients, extrapolation = extrapolation, left_gradient = left_gradient, right_gradient = right_gradient)
end
end
Base.broadcastable(e::Schumaker) = Ref(e)
"""
Evaluates the spline at a point. The point can be specified as a Real number (Int, Float, etc) or a Date.
Derivatives can also be taken.
### Inputs
* `spline` - A `Schumaker` type spline
* `PointToExamine` - The point at which to evaluate the integral
* `derivative` - The derivative being sought. This should be 0 to just evaluate the spline, 1 for the first derivative or 2 for a second derivative.
Higher derivatives are all zero (because it is a quadratic spline). Negative values do not give integrals. Use evaluate_integral instead.
### Returns
* A value of the spline or appropriate derivative in the same format as specified in the spline.
"""
function evaluate(spline::Schumaker, PointToExamine::T, derivative::Integer = 0) where T<:Real
# Derivative of 0 means normal spline evaluation.
# Derivative of 1, 2 are first and second derivatives respectively.
IntervalNum = searchsortedlast(spline.IntStarts_, PointToExamine)
IntervalNum = max(IntervalNum, 1)
@inbounds xmt = AbstractFloat(PointToExamine - spline.IntStarts_[IntervalNum])
Coefs = @inbounds spline.coefficient_matrix_[IntervalNum,:]
if derivative == 0
return sum(@. Coefs .* [xmt^2, xmt, 1.0])
elseif derivative == 1
return sum(@. Coefs .* [2*xmt, 1.0, 0.0])
elseif derivative == 2
return sum(@. Coefs .* [2.0, 0.0, 0.0])
elseif derivative < 0
error("This function cannot do integrals. Use evaluate_integral instead")
else
return 0.0
end
end
function evaluate(spline::Schumaker, PointToExamine::Date, derivative::Int = 0)
days_as_int = Dates.days.(PointToExamine)
return evaluate(spline,days_as_int, derivative)
end
function (s::Schumaker)(x::Union{Real,Date}, deriv::Integer = 0)
return evaluate(s, x, deriv)
end
"""
Estimates the integral of the spline between lhs and rhs. These end points can be input
as Reals or Dates.
### Inputs
* `spline` - A Schumaker type spline
* `lhs` - The left hand limit of the integral
* `rhs` - The right hand limit of the integral
### Returns
* A `Float64` value of the integral.
"""
function evaluate_integral(spline::Schumaker, lhs::Real, rhs::Real)
first_interval = searchsortedlast(spline.IntStarts_, lhs)
last_interval = searchsortedlast(spline.IntStarts_, rhs)
number_of_intervals = last_interval - first_interval
if number_of_intervals == 0
return section_integral(spline , lhs, rhs)
elseif number_of_intervals == 1
@inbounds first = section_integral(spline , lhs , spline.IntStarts_[first_interval + 1])
@inbounds lst = section_integral(spline , spline.IntStarts_[last_interval] , rhs)
return first + lst
else
interior_areas = 0.0
@inbounds first = section_integral(spline , lhs , spline.IntStarts_[first_interval + 1])
@simd for i in 1:(number_of_intervals-1)
@inbounds sec_int = section_integral(spline , spline.IntStarts_[first_interval + i] , spline.IntStarts_[first_interval + i+1] )
interior_areas += sec_int
end
@inbounds last = section_integral(spline , spline.IntStarts_[last_interval] , rhs)
return first + interior_areas + last
end
end
function evaluate_integral(spline::Schumaker, lhs::Date, rhs::Date)
return evaluate_integral(spline, Dates.days.(lhs) , Dates.days.(rhs))
end
function section_integral(spline::Schumaker, lhs::Real, rhs::Real)
# Note that the lhs is used to infer the interval.
IntervalNum = searchsortedlast(spline.IntStarts_, lhs)
IntervalNum = max(IntervalNum, 1)
@inbounds Coefs = spline.coefficient_matrix_[ IntervalNum , :]
@inbounds r_xmt = rhs - spline.IntStarts_[IntervalNum]
@inbounds l_xmt = lhs - spline.IntStarts_[IntervalNum]
Lint_array = [(1/3)*l_xmt^3, 0.5*l_xmt^2, l_xmt]
Rint_array = [(1/3)*r_xmt^3, 0.5*r_xmt^2, r_xmt]
return sum(@. Coefs .* Rint_array) - sum(@. Coefs .* Lint_array)
end
"""
find_derivative_spline(spline::Schumaker)
Returns a SchumakerSpline that is the derivative of the input spline
"""
function find_derivative_spline(spline::Schumaker)
coefficient_matrix = Array{Float64,2}(undef, size(spline.coefficient_matrix_)... )
coefficient_matrix[:,3] .= spline.coefficient_matrix_[:,2]
coefficient_matrix[:,2] .= 2 .* spline.coefficient_matrix_[:,1]
coefficient_matrix[:,1] .= 0.0
return Schumaker(spline.IntStarts_, coefficient_matrix)
end
"""
imputeGradients(x::Vector{T}, y::Vector{T})
Imputes gradients based on a vector of x and y coordinates.
"""
function imputeGradients(x::Vector{<:Real}, y::Vector{<:Real})
n = length(x)
# Judd (1998), page 233, second last equation
@inbounds L = sqrt.( (x[2:n]-x[1:(n-1)]).^2 + (y[2:n]-y[1:(n-1)]).^2)
# Judd (1998), page 233, last equation
@inbounds d = (y[2:n]-y[1:(n-1)])./(x[2:n]-x[1:(n-1)])
# Judd (1998), page 234, Eqn 6.11.6
@inbounds Conditionsi = d[1:(n-2)].*d[2:(n-1)] .> 0
@inbounds MiddleSiwithoutApplyingCondition = (L[1:(n-2)].*d[1:(n-2)]+L[2:(n-1)].* d[2:(n-1)]) ./ (L[1:(n-2)]+L[2:(n-1)])
sb = Conditionsi .* MiddleSiwithoutApplyingCondition
# Judd (1998), page 234, Second Equation line plus 6.11.6 gives this array of slopes.
@inbounds ff = [((-sb[1]+3*d[1])/2); sb ; ((3*d[n-1]-sb[n-2])/2)]
return ff
end
"""
Splits an interval into 2 subintervals and creates the quadratic coefficients
### Inputs
* `gradients` - A 2 entry vector with gradients at either end of the interval
* `y` - A 2 entry vector with y values at either end of the interval
* `x` - A 2 entry vector with x values at either end of the interval
### Returns
* A 2 x 4 matrix. The first column is the x values of start of the two subintervals. The last 3 columns are quadratic coefficients in two subintervals.
"""
function schumakerIndInterval(gradients::Vector{<:Real}, y::Vector{<:Real}, x::Vector{<:Real})
# The SchumakerIndInterval function takes in each interval individually
# and returns the location of the knot as well as the quadratic coefficients in each subinterval.
# Judd (1998), page 232, Lemma 6.11.1 provides this if condition:
if (sum(gradients)*(x[2]-x[1]) == 2*(y[2]-y[1]))
tsi = x[2]
else
# Judd (1998), page 233, Algorithm 6.3 along with equations 6.11.4 and 6.11.5 provide this whole section
@inbounds delta = (y[2] -y[1])/(x[2]-x[1])
@inbounds Condition = ((gradients[1]-delta)*(gradients[2]-delta) >= 0)
@inbounds Condition2 = abs(gradients[2]-delta) < abs(gradients[1]-delta)
if (Condition)
tsi = sum(x)/2
elseif (Condition2)
@inbounds tsi = (x[1] + (x[2]-x[1])*(gradients[2]-delta)/(gradients[2]-gradients[1]))
else
@inbounds tsi = (x[2] + (x[2]-x[1])*(gradients[1]-delta)/(gradients[2]-gradients[1]))
end
end
# Judd (1998), page 232, 3rd last equation of page.
alpha = tsi-x[1]
beta = x[2]-tsi
# Judd (1998), page 232, 4th last equation of page.
@inbounds sbar = (2*(y[2]-y[1])-(alpha*gradients[1]+beta*gradients[2]))/(x[2]-x[1])
# Judd (1998), page 232, 3rd equation of page. (C1, B1, A1)
@inbounds Coeffs1 = [ (sbar-gradients[1])/(2*alpha) gradients[1] y[1] ]
if (beta == 0)
Coeffs2 = Coeffs1
else
# Judd (1998), page 232, 4th equation of page. (C2, B2, A2)
@inbounds Coeffs2 = [ (gradients[2]-sbar)/(2*beta) sbar Coeffs1 * [alpha^2, alpha, 1] ]
end
Machine4Epsilon = 4*eps()
if (tsi < x[1] + Machine4Epsilon )
return [x[1] Coeffs2]
elseif (tsi + Machine4Epsilon > x[2] )
return [x[1] Coeffs1]
else
return [x[1] Coeffs1 ; tsi Coeffs2]
end
end
"""
Calls `SchumakerIndInterval` many times to get full set of spline intervals and coefficients. Then calls extrapolation for out of sample behaviour
### Inputs
* `gradients` - A vector of gradients at each point
* `x` - A vector of `x` coordinates
* `y` - A vector of `y` coordinates
* `extrapolation` - A string in (Curve, Linear or Constant) that gives behaviour outside of interpolation range.
### Returns
* A vector of interval starts
* A vector of interval ends
* A matrix of all coefficients
"""
function getCoefficientMatrix(x::Array{<:Real,1}, y::Array{<:Real,1}, gradients::Array{<:Real,1},
extrapolation::Tuple{Schumaker_ExtrapolationSchemes,Schumaker_ExtrapolationSchemes})
n = length(x)
fullMatrix = schumakerIndInterval([gradients[1], gradients[2]], [y[1], y[2]], [x[1], x[2]] )
for intrval = 2:(n-1)
xs = [ x[intrval] , x[intrval + 1] ]
ys = [ y[intrval], y[intrval + 1] ]
grads = [ gradients[intrval], gradients[intrval + 1] ]
intMatrix = schumakerIndInterval(grads,ys,xs)
fullMatrix = vcat(fullMatrix,intMatrix)
end
fullMatrix = extrapolate(fullMatrix, extrapolation, x[n], y)
return fullMatrix[:,1], fullMatrix[:,2:4]
end
"""
Adds a row on top and bottom of coefficient matrix to give out of sample prediction.
### Inputs
* `fullMatrix` - output from `GetCoefficientMatrix` first few lines
* `extrapolation` - A tuple with two enums in (Curve, Linear, Constant) that gives behaviour outside of interpolation range.
* `x` - A vector of x coordinates
* `y` - A vector of y coordinates
### Returns
* A new version of fullMatrix with out of sample prediction built into it.
"""
function extrapolate(fullMatrix::Array{<:Real,2}, extrapolation::Tuple{Schumaker_ExtrapolationSchemes,Schumaker_ExtrapolationSchemes}, Topx::Real, y::Array{<:Real,1})
# In the fullMatrix the first column are the x values and then the next 3 columns are a, b, c in the expression a(x-start)^2 + b(x-start) + c
if (extrapolation[1] == Curve) && (extrapolation[2] == Curve)
return fullMatrix
end
# Preliminaries used throughout
dim = size(fullMatrix)[1]
Botx = fullMatrix[1,1]
Boty = y[1]
Topy = y[length(y)]
# Initialising variable so their domain is not restricted to the if statement spaces.
BotB = 0.0
BotC = 0.0
TopA = 0.0
TopB = 0.0
TopC = 0.0
# Now creating the extrapolation to the left.
if extrapolation[1] == Linear
BotB = fullMatrix[1 , 3]
BotC = Boty - BotB
elseif extrapolation[1] == Constant
BotB = 0.0
BotC = Boty
end # Note for the curve case we will simply not append the new block.
BotRow = [Botx-1e-10, 0.0, BotB, BotC]
# Now doing the extrapolation to the right.
if extrapolation[2] == Linear # This is a bit more complicated than before because the
# coefficients by themselves give the gradients at the left
# of the interval. Here we want the gradient at the right.
last_interval_width = Topx - fullMatrix[dim , 1]
@inbounds grad_at_right = 2 * fullMatrix[dim , 2] * last_interval_width + fullMatrix[dim , 3]
TopB = grad_at_right
TopC = Topy
elseif extrapolation[2] == Constant
TopB = 0.0
TopC = Topy
elseif extrapolation[2] == Curve # We are just going to add this one on regardless as otherwise end of data interval information is lost.
Gap = Topx - fullMatrix[dim ,1]
TopA = fullMatrix[dim ,2]
@inbounds TopB = fullMatrix[dim ,3] + 2 * TopA * Gap
@inbounds TopC = fullMatrix[dim ,4] + TopB * Gap - TopA * Gap*Gap
end
TopRow = [ Topx, TopA ,TopB ,TopC]
# Appending blocks and returning.
if extrapolation[1] != Curve fullMatrix = vcat(BotRow' , fullMatrix) end
fullMatrix = vcat(fullMatrix, TopRow')
return fullMatrix
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 564 | module SchumakerSpline
using Plots
using Dates
include("SchumakerFunctions.jl")
export Schumaker_ExtrapolationSchemes, Curve, Linear, Constant
export Schumaker, evaluate, evaluate_integral
include("roots_optima_intercepts.jl")
export find_derivative_spline, find_roots, find_optima, get_crossover_in_interval, get_intersection_points
include("map_to_shape.jl")
export reshape_values
include("splice_splines.jl")
export splice_splines
include("algebra.jl")
export +,-,*,/
include("plotting.jl")
export plot
include("higher_dimensions.jl")
export Schumaker2d
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 937 | import Base.+, Base.-, Base./, Base.*
function +(spl::Schumaker, num::Real)
new_coefficients_ = hcat(spl.coefficient_matrix_[:,1:2], spl.coefficient_matrix_[:,3] .+ num)
return Schumaker(spl.IntStarts_, new_coefficients_)
end
function -(spl::Schumaker, num::Real)
new_coefficients_ = hcat(spl.coefficient_matrix_[:,1:2], spl.coefficient_matrix_[:,3] .- num)
return Schumaker(spl.IntStarts_, new_coefficients_)
end
function *(spl::Schumaker, num::Real)
new_coefficients_ = spl.coefficient_matrix_ .* num
return Schumaker(spl.IntStarts_, new_coefficients_)
end
function /(spl::Schumaker, num::Real)
new_coefficients_ = spl.coefficient_matrix_ ./ num
return Schumaker(spl.IntStarts_, new_coefficients_)
end
function +(num::Real, spl::Schumaker)
return +(spl,num)
end
function -(num::Real, spl::Schumaker)
return (-1)*spl + num
end
function *(num::Real, spl::Schumaker)
return *(spl,num)
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 2415 |
"""
This uses a combination of Schumaker Splines to cover a 2 dimensional space.
We have a grid of splines. We first evaluate in one dimension (which leads us to a point between two adjacent splines).
Then we evaluate each of the two splines and interpolate.
### Members
* `IntStarts_` - A vector with the coordinates of each schumaker spline.
* `schumakers` - A vector of schumaker splines.
"""
struct Schumaker2d{T<:AbstractFloat}
IntStarts_::Array{T,1}
schumakers::Array{Schumaker{T},1}
function Schumaker2d(x_row::Vector{R}, x_col::Vector{Y}, ygrid::Array{T,2}; bycol=true,
extrapolation::Tuple{Schumaker_ExtrapolationSchemes,Schumaker_ExtrapolationSchemes} = (Curve,Curve)) where T<:Real where R<:Real where Y<:Real
pro = promote_type(promote_type(T,Y),R)
shape = size(ygrid)
dimension = 1 + Integer(bycol)
schums = Array{Schumaker{pro},1}(undef, shape[dimension])
for i in 1:shape[dimension]
ys = bycol ? ygrid[:,i] : ygrid[i,:]
xs = bycol ? x_row : x_col
schums[i] = Schumaker(xs, ys; extrapolation = extrapolation)
end
return new{pro}(bycol ? x_col : x_row,schums)
end
function Schumaker2d(x_row::Vector{R}, x_col::Vector{Y}, ygrid::Array{T,2}; bycol=true,
extrapolation::Tuple{Schumaker_ExtrapolationSchemes,Schumaker_ExtrapolationSchemes} = (Curve,Curve)) where T<:Integer where R<:Integer where Y<:Integer
fl = typeof(AbstractFloat(1.0))
return Schumaker2d( fl.(x_row), fl.(x_col), fl.(ygrid); bycol = bycol, extrapolation = extrapolation )
end
end
"""
evaluate(spline::Schumaker2d, p1::Real, p2::Real)
### Inputs
* `spline` - A Schumaker2d
* `p1` - The coordinate in the first dimension.
* `p2` - The coordinate in the second dimension.
### Returns
* A scalar
"""
function evaluate(spline::Schumaker2d, p1::Real, p2::Real)
distances = abs.(spline.IntStarts_ .- p1)
# Direct hits
direct_hits = findall(distances .< 100*eps())
if length(direct_hits) > 0
return spline.schumakers[direct_hits[1]](p2)
end
closest = findall(distances .< sort(distances)[2] + 100*eps())[[1,2]]
dists = distances[closest]
ys = map(s -> s(p2), spline.schumakers[closest])
y = sum(ys .* ( reverse(dists) ) ./ sum(dists))
return y
end
function (s::Schumaker2d)(p1::Real, p2::Real)
return evaluate(s, p1, p2)
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 2042 |
shape_map(x,upper,lower) = min(max(x,lower), upper)
"""
reshape_values(xvals::Vector{<:Real}, yvals::Vector{<:Real}; increasing::Bool = true,
concave::Bool = true, shape_map::Function = shape_map)
This reshapes a vector of yvalues. For instance if we are doing fixed point acceleration that
should result in a monotonic concave function (ie the consumption smoothing problem from the documentation examples)
then we may end up with occasional non monotonic/concave values due to a dodgy optimiser or some other numerical issue.
So we can use reshape_values to adjust the values that cannot be true.
### Inputs
* `xvals` - A vector of x coordinates.
* `yvals` - A vector of y coordinates
* `increasing` - Should the y values be increasing. If false then they must be decreasing
* `concave` - Should the y values be concave. If false then they must be convex
* `shape_map` - A function used to adjust values to be increasing-concave (or whatever settings)
### Returns
* An updated vector of y values.
"""
function reshape_values(xvals::Vector{<:Real}, yvals::Vector{<:Real}; increasing::Bool = true,
concave::Bool = true, shape_map::Function = shape_map)
lenlen = length(yvals)
new_shape_vec = Array{Float64,1}(undef, lenlen)
if lenlen > 0 new_shape_vec[1] = yvals[1] end
if lenlen > 1
upper = increasing ? Inf : new_shape_vec[1]
lower = increasing ? new_shape_vec[1] : -Inf
new_shape_vec[2] = shape_map(yvals[2],upper, lower)
end
for i in 3:lenlen
stepp = xvals[i] - xvals[i-1]
previous_grad = (new_shape_vec[i-1]-new_shape_vec[i-2])/(xvals[i-1] - xvals[i-2])
upper = new_shape_vec[i-1] + (increasing ? (concave ? previous_grad * stepp : Inf) : (concave ? previous_grad * stepp : 0))
lower = new_shape_vec[i-1] + (increasing ? (concave ? 0 : previous_grad * stepp) : (concave ? -Inf : previous_grad * stepp))
new_shape_vec[i] = shape_map(yvals[i],upper, lower)
end
return new_shape_vec
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 5641 | import Plots.plot
"""
plot(s1::Schumaker, interval::Tuple{R,R} = (s1.IntStarts_[1], s1.IntStarts_[length(s1.IntStarts_)]); derivs::Bool = false, grid_len::Integer = 200, plot_options::NamedTuple = (label = "Spline",), deriv_plot_options::NamedTuple = (label = "Spline - 1st deriv",),
deriv2_plot_options::NamedTuple = (label = "Spline - 2nd deriv",), plt = missing) where R<:Real
plot(s1::Schumaker, grid::AbstractArray{R,1}; derivs::Bool = false, plot_options::NamedTuple = (label = "Spline",), deriv_plot_options::NamedTuple = (label = "Spline - 1st deriv",),
deriv2_plot_options::NamedTuple = (label = "Spline - 2nd deriv",), plt = missing) where R<:Real
### Inputs
* `s1` - The Schumaker spline to chart
* `interval` - The interval over which to chart it.
* `grid_len` - The number of grid points to be used in plotting.
* `grid` - The grid. If used this is instead of the `interval` and `grid_len`
* `derivs` - Should the derivative splines also be plotted
* `plot_options` - The options to use in plotting
* `deriv_plot_options` - The options to use in the first derivative plot.
* `deriv2_plot_options`- The options to use in the second derivative plot.
* `plt` - A plot object. Feed this in if you want to add to a plot (rather than make a new one.)
### Returns
* A plot
"""
function plot(s1::Schumaker, interval::Tuple{R,R} = (s1.IntStarts_[1], s1.IntStarts_[length(s1.IntStarts_)]); derivs::Bool = false, grid_len::Integer = 200, plot_options::NamedTuple = (label = "Spline",), deriv_plot_options::NamedTuple = (label = "Spline - 1st deriv",),
deriv2_plot_options::NamedTuple = (label = "Spline - 2nd deriv",), plt = missing) where R<:Real
grid = collect(range(interval[1], interval[2], length=grid_len))
return plot(s1, grid; derivs = derivs, plot_options = plot_options, deriv_plot_options = deriv_plot_options, deriv2_plot_options = deriv2_plot_options, plt = plt)
end
function plot(s1::Schumaker, grid::AbstractArray{R,1}; derivs::Bool = false, plot_options::NamedTuple = (label = "Spline",), deriv_plot_options::NamedTuple = (label = "Spline - 1st deriv",),
deriv2_plot_options::NamedTuple = (label = "Spline - 2nd deriv",), plt = missing) where R<:Real
evals = s1.(grid)
plt = ismissing(plt) ? plot(grid, evals; plot_options...) : plot!(plt, grid, evals; plot_options...)
if derivs
evals_1 = evaluate.(s1,grid, 1)
plt = plot!(plt, grid, evals_1; deriv_plot_options...)
evals_2 = evaluate.(s1,grid, 2)
plt = plot!(plt, grid, evals_2; deriv2_plot_options...)
return plt
else
return plt
end
end
"""
plot(ss::Vector{Schumaker}, interval::Tuple{R,R} = (ss[1].IntStarts_[1], ss[1].IntStarts_[length(ss[1].IntStarts_)]); derivs::Bool = false, grid_len::Integer = 200, plot_options::Union{AbstractArray{Tuple,1},Missing} = missing,
deriv_plot_options::Union{AbstractArray{Tuple,1},Missing} = missing, deriv2_plot_options::Union{AbstractArray{Tuple,1},Missing} = missing, plt = missing) where R<:Real
plot(ss::Vector{Schumaker}, grid::Vector{R}; derivs::Bool = false, plot_options::Union{AbstractArray{Tuple,1},Missing} = missing, deriv_plot_options::Union{AbstractArray{Tuple,1},Missing} = missing,
deriv2_plot_options::Union{AbstractArray{Tuple,1},Missing} = missing, plt = missing) where R<:Real
### Inputs
* `ss` - a vector of Schumaker splines to chart
* `interval` - The interval over which to chart it.
* `grid_len` - The number of grid points to be used in plotting.
* `grid` - The grid. If used this is instead of the `interval` and `grid_len`
* `derivs` - Should the derivative splines also be plotted
* `plot_options` - The options to use in plotting
* `deriv_plot_options` - The options to use in the first derivative plot.
* `deriv2_plot_options`- The options to use in the second derivative plot.
* `plt` - A plot object. Feed this in if you want to add to a plot (rather than make a new one.)
### Returns
* A plot
"""
function plot(ss::Vector{Schumaker}, interval::Tuple{R,R} = (ss[1].IntStarts_[1], ss[1].IntStarts_[length(ss[1].IntStarts_)]); derivs::Bool = false, grid_len::Integer = 200, plot_options::Union{AbstractArray{Tuple,1},Missing} = missing,
deriv_plot_options::Union{AbstractArray{Tuple,1},Missing} = missing, deriv2_plot_options::Union{AbstractArray{Tuple,1},Missing} = missing, plt = missing) where R<:Real
grid = collect(range(interval[1], interval[2], length=grid_len))
return plot(ss, grid; derivs = derivs, plot_options = plot_options, deriv_plot_options = deriv_plot_options, deriv2_plot_options = deriv2_plot_options, plt = plt)
end
function plot(ss::Vector{Schumaker}, grid::Vector{R}; derivs::Bool = false, plot_options::Union{AbstractArray{Tuple,1},Missing} = missing, deriv_plot_options::Union{AbstractArray{Tuple,1},Missing} = missing,
deriv2_plot_options::Union{AbstractArray{Tuple,1},Missing} = missing, plt = missing) where R<:Real
plt = ismissing(plt) ? plot() : plt
for i in 1:length(ss)
plot_options_i = ismissing(plot_options) ? (label = string("Spline ", i),) : plot_options[i]
deriv_plot_options_i = ismissing(deriv_plot_options) ? (label = string("Spline ", i, " - 1st deriv"),) : deriv_plot_options[i]
deriv2_plot_options_i = ismissing(deriv2_plot_options) ? (label = string("Spline ", i, " - 2nd deriv"),) : deriv2_plot_options[i]
plt = plot(ss[i], grid; derivs = derivs, plot_options = plot_options_i, deriv_plot_options = deriv_plot_options_i, deriv2_plot_options = deriv2_plot_options_i, plt = plt)
end
return plt
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 12128 | """
test_if_intercept_in_interval(a1::Real,b1::Real,c1::Real,c2::Real,interval_width::Real)
This tests if a spline could have passed over zero in a certain interval. The a1,b1,c1 are the coefficients of the spline. The two xs are for the left and right and c2 is the right hand level.
Note that this function will not detect zeros that are precisely on the endpoints.
"""
function test_if_intercept_in_interval(a1::Real,b1::Real,c1::Real,c2::Real,interval_width::Real)
if (sign(c1) == 0) || abs(sign(c1) - sign(c2)) > 1.5 return true end # If we cross the barrier then there is at least one intercept in interval.
if sign(b1) == sign(2*a1*(interval_width)+b1) return false end # If we did not cross the barrier and the spline is monotonic then we did not cross
# Now we have the case where the gradient switches sign within an interval but the sign of the endpoints did not change.
# The easiest way to test will be to find the vertex of the parabola. See if it is within the interval and of a different sign to the endpoints.
# We don't actually have to test if the vertex is in the interval however - it has to be for the gradient sign to have flipped.
vertex_x = -b1/(2*a1) # Note that this is relative to x1.
vertex_y = a1 * (vertex_x)^2 + b1*(vertex_x) + c1
is_vertex_of_opposite_sign_in_y = abs(sign(c1) - sign(vertex_y)) > 0.5
return is_vertex_of_opposite_sign_in_y
end
"""
find_roots(spline::Schumaker{T}; root_value::Real = 0.0, interval::Tuple{<:Real,<:Real} = (spline.IntStarts_[1], spline.IntStarts_[length(spline.IntStarts_)])) where T<:Real
Finds roots - This is handy because in many applications schumaker splines are monotonic and globally concave/convex and so it is easy to find roots.
Here root_value can be set to get all points at which the function is equal to the root value. For instance if you want to find all points at which
the spline has a value of 1.0.
### Inputs
* `spline` - The spline you want to find the roots for.
* `root_value` - What level counts as a root.
* `interval` - What interval to explore for roots.
### Returns
* A `NamedTuple` describing all roots found together with the derivatives and second derivatives at that point.
"""
function find_roots(spline::Schumaker{T}; root_value::Real = 0.0, interval::Tuple{<:Real,<:Real} = (spline.IntStarts_[1], spline.IntStarts_[length(spline.IntStarts_)])) where T<:Real
roots = Array{T,1}(undef,0)
first_derivatives = Array{T,1}(undef,0)
second_derivatives = Array{T,1}(undef,0)
first_interval_start = searchsortedlast(spline.IntStarts_, interval[1])
last_interval_start = searchsortedlast(spline.IntStarts_, interval[2])
len = length(spline.IntStarts_)
go_from = max(1,first_interval_start)
go_until = last_interval_start < len ? last_interval_start : len-1
constants = spline.coefficient_matrix_[:,3]
constants_minus_root = constants .- root_value
for i in go_from:go_until
a1 = spline.coefficient_matrix_[i,1]
b1 = spline.coefficient_matrix_[i,2]
c1 = constants_minus_root[i]
c2 = constants_minus_root[i+1]
interval_width = spline.IntStarts_[i+1] - spline.IntStarts_[i] + 1000*eps() # This 1000 epsilon is here because of problems where one segment would predict it is an epsilon within
# the next segment and the next segment (correctly) thinks it is in the previous. So neither pick up the root. So with this we potentially record twice and then we can later on remove
# nearby roots. Still gave dodgy results at 10 epsilons. So I boosted it.
if test_if_intercept_in_interval(a1,b1,c1,c2,interval_width)
if abs(a1) > eps() # Is it quadratic
det = sqrt(b1^2 - 4*a1*c1)
both_roots = [(-b1 + det) / (2*a1), (-b1 - det) / (2*a1)] # The x coordinates here are relative to spline.IntStarts_[i].
left_root = minimum(both_roots)
right_root = maximum(both_roots)
# This means that the endpoints are double counted. Thus we will have to remove them later.
if (left_root >= 0) && (left_root <= interval_width)
append!(roots, spline.IntStarts_[i] + left_root)
append!(first_derivatives, 2 * a1 * left_root + b1)
append!(second_derivatives, 2 * a1)
end
if (right_root >= 0) && (right_root <= interval_width)
append!(roots, spline.IntStarts_[i] + right_root)
append!(first_derivatives, 2 * a1 * right_root + b1)
append!(second_derivatives, 2 * a1)
end
else # Is it linear? Note it cannot be constant or else it could not have jumped past zero in the interval.
new_root = spline.IntStarts_[i] - c1/b1
if !((length(roots) > 0) && (abs(new_root - last(roots)) < 1e-5))
append!(roots, spline.IntStarts_[i] - c1/b1)
append!(first_derivatives, b1)
append!(second_derivatives, 0.0)
end
end
end
end
# Now adding on roots that occur after the end of the last interval.
end_of_last_interval = spline.IntStarts_[length(spline.IntStarts_)]
if interval[2] >= end_of_last_interval
a = spline.coefficient_matrix_[len,1]
b = spline.coefficient_matrix_[len,2]
c = constants_minus_root[len]
if abs(a) > eps() # Is it quadratic
root_determinant = sqrt(b^2 - 4*a*c)
end_roots = end_of_last_interval .+ [(-b - root_determinant)/(2*a), (-b + root_determinant)/(2*a)]
end_roots2 = end_roots[(end_roots .>= end_of_last_interval) .& (end_roots .<= interval[2])]
num_new_roots = length(end_roots2)
if num_new_roots > 0
append!(roots, end_roots2)
append!(first_derivatives, (2 * a) .* end_roots2 .+ b)
append!(second_derivatives, repeat([2*a], num_new_roots))
end
elseif abs(b) > eps() # If it is linear.
new_root = [-c/b + end_of_last_interval]
new_root2 = new_root[(new_root .>= end_of_last_interval) .& (new_root .<= interval[2])]
if length(new_root2) > 0
append!(roots, new_root2)
append!(first_derivatives, (2 * a) .* new_root2 .+ b)
append!(second_derivatives, 2*a)
end
end # We do nothing in the case that we have a constant - no chance of root.
end
# Sometimes if there are two roots within an interval and the endpoint of the interval is also here we get too many roots.
# So here we get rid of stuff we don't want.
if length(roots) == 0
return (roots = roots, first_derivatives = first_derivatives, second_derivatives = second_derivatives)
else
roots_in_interval = (roots .>= interval[1]) .& (roots .<= interval[2])
if length(roots) > 1
gaps = roots[2:length(roots)] .- roots[1:(length(roots)-1)]
for i in 1:length(gaps)
if abs(gaps[i]) < 10000 * eps() roots_in_interval[i+1] = false end
end
end
return (roots = roots[roots_in_interval], first_derivatives = first_derivatives[roots_in_interval], second_derivatives = second_derivatives[roots_in_interval])
end
end
"""
find_optima(spline::Schumaker)
Finds optima - This is handy because in many applications schumaker splines are monotonic and globally concave/convex and so it is easy to find optima.
### Inputs
* `spline` - The spline you want to find optima for.
* `interval` - The interval over which you want to look for optima.
### Returns
* A NamedTuple containing the optima and the types of the optima (:Maximum or :Minimum)
"""
function find_optima(spline::Schumaker; interval::Tuple{<:Real,<:Real} = (spline.IntStarts_[1], spline.IntStarts_[length(spline.IntStarts_)]))
deriv_spline = find_derivative_spline(spline)
root_info = find_roots(deriv_spline; interval = interval)
optima = root_info.roots
optima_types = Array{Symbol,1}(undef,length(optima))
for i in 1:length(optima)
if root_info.first_derivatives[i] > 1e-15
optima_types[i] = :Minimum
elseif root_info.first_derivatives[i] < -1e-15
optima_types = :Maximum
else
optima_types = :SaddlePoint
end
end
return (optima = optima, optima_types = optima_types)
end
## Finding intercepts
"""
quadratic_formula_roots(a::Real,b::Real,c::Real)
A basic application of the textbook quadratic formula.
### Inputs
* `a` - The quadratic term
* `b` - The linear term
* `c` - The constant
### Returns
* A vector with the roots.
"""
function quadratic_formula_roots(a::Real,b::Real,c::Real)
determin = sqrt(b^2 - 4*a*c)
roots = [(-b + determin)/(2*a), (-b - determin)/(2*a)]
return roots
end
"""
get_crossover_in_interval(s1::Schumaker{T}, s2::Schumaker{R}, interval::Tuple{U,U}) where T<:Real where R<:Real where U<:Real
Finds the point at which two schumaker splines cross over each other within a single interval.
### Inputs
* `s1` - The first spline
* `s2` - The second spline
* `interval` - The interval you want to examine for crossovers.
### Returns
* A `Vector` describing crossover points.
"""
function get_crossover_in_interval(s1::Schumaker{T}, s2::Schumaker{R}, interval::Tuple{U,U}) where T<:Real where R<:Real where U<:Real
# Getting the coefficients for the first spline.
i = searchsortedlast(s1.IntStarts_, interval[1])
start1 = s1.IntStarts_[i]
a1,b1,c1 = Tuple(s1.coefficient_matrix_[i,:])
# Getting the coefficients for the second spline.
j = searchsortedlast(s2.IntStarts_, interval[1])
start2 = s2.IntStarts_[j]
a2,b2,c2 = Tuple(s2.coefficient_matrix_[j,:])
# Get implied coefficients for the s1 - s2 quadratic. Pretty simple algebra gets this.
# As a helper we define G = start2 - start1. We define A,B,C as coefficients of s1-s2.
# The final spline is in terms of (x-start1).
G = start1 - start2
A = a1 - a2
B = b1 - b2 - 2*a2*G
C = c1 - c2 - a2*(G^2) - b2*G
# Now we need to use quadratic formula to get the roots and pick the root in the interval.
roots = quadratic_formula_roots(A,B,C) .+ start1
roots_in_interval = roots[(roots .>= interval[1]-10*eps()) .& (roots .<= interval[2]+10*eps())]
return roots_in_interval
end
"""
get_intersection_points(s1::Schumaker{T}, s2::Schumaker{R}) where T<:Real where R<:Real
This funds the coordinates of the point at which spline s1 intercepts spline s2.
### Inputs
* `s1` - The first spline
* `s2` - The second spline
### Returns
* Locations of any crossover points.
"""
function get_intersection_points(s1::Schumaker{T}, s2::Schumaker{R}) where T<:Real where R<:Real
# What x locations to loop over
all_starts = sort(unique(vcat(s1.IntStarts_, s2.IntStarts_)))
start_of_overlap = maximum([minimum(s1.IntStarts_), minimum(s2.IntStarts_)])
overlap_starts = all_starts[all_starts .> start_of_overlap]
# Getting a container to return results
promo_type = promote_type(T,R)
locations_of_crossovers = Array{promo_type,1}()
# For the first part what function is higher.
last_one_greater = evaluate(s1, overlap_starts[1]) > evaluate(s2, overlap_starts[1])
for i in 2:length(overlap_starts)
start = overlap_starts[i]
val_1 = evaluate(s1, start)
val_2 = evaluate(s2, start)
# Need to take into account the ordering and record when it flips
one_greater = val_1 > val_2
if one_greater != last_one_greater
interval = Tuple([overlap_starts[i-1], overlap_starts[i]])
crossover = get_crossover_in_interval(s1, s2, interval)
if length(crossover) != 1
error("Only one crossover expected in interval from a continuous spline.")
end
push!(locations_of_crossovers, crossover[1])
end
last_one_greater = one_greater
end
return locations_of_crossovers
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 1665 |
"""
splice_splines(left_spline::Schumaker, right_spline::Schumaker, splice_point::Real)
This puts two splines together. Making a new spline.
Note that the stitched together spline is not guaranteed to be continuous or shape preserving anymore.
### Inputs
* `left_spline` - The spline to use on the left.
* `right_spline` - The spline to use on the right.
* `splice_point` - The x coordinate to stitch at.
### Returns
* A Schumaker struct
"""
function splice_splines(left_spline::Schumaker, right_spline::Schumaker, splice_point::Real)
end_in_left_spline = searchsortedlast(left_spline.IntStarts_, splice_point)
left_starts = left_spline.IntStarts_[1:end_in_left_spline]
left_coefficients = left_spline.coefficient_matrix_[1:end_in_left_spline,:]
start_in_right = searchsortedlast(right_spline.IntStarts_, splice_point)
right_starts = right_spline.IntStarts_[start_in_right:length(right_spline.IntStarts_)]
right_coefficients = right_spline.coefficient_matrix_[start_in_right:length(right_spline.IntStarts_),:]
# As the splice_point is probably not an interval start in the right_spline we need to adjust.
G = splice_point - right_starts[1]
a = right_coefficients[1,1]
b = right_coefficients[1,2]
c = right_coefficients[1,3]
right_starts[1] = splice_point
right_coefficients[1,2] = 2*G*a + b
right_coefficients[1,3] = a*(G^2) + b*G + c
IntStarts = vcat(left_starts, right_starts)
Coeffs = vcat(left_coefficients, right_coefficients)
return Schumaker(IntStarts, Coeffs)
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 2617 | using Test
@testset "Testing with dates " begin
using SchumakerSpline
using Dates
tol = 10*eps()
StartDate = Date(2018, 7, 21)
x = Array{Date}(undef,1000)
for i in 1:1000
x[i] = StartDate +Dates.Day(2* (i-1))
end
function f(x::Date)
days_between = Dates.days(x - StartDate)
return log(days_between+1) + sqrt(days_between)
end
y = f.(x)
spline = Schumaker(x,y)
for i in 1:length(x)
@test abs(evaluate(spline, x[i]) - y[i]) < tol
end
# Evaluation with a Float64.
@test isa(evaluate(spline, 11.5), Real)
# Testing second derivatives
second_derivatives = evaluate.(spline, x,2)
@test maximum(second_derivatives) < tol
# Testing Integrals
function analytic_integral(lhs,rhs)
lhs_in_days = Dates.days(lhs - StartDate)
rhs_in_days = Dates.days(rhs - StartDate)
return (rhs_in_days+1)*log(rhs_in_days+1)-rhs_in_days + (2/3)*rhs_in_days^(3/2) - ((lhs_in_days+1)*log(lhs_in_days+1) - lhs_in_days + (2/3)*lhs_in_days^(3/2))
end
lhs = StartDate
rhs = StartDate + Dates.Month(16)
numerical_integral = evaluate_integral(spline, lhs,rhs)
analytical = analytic_integral(lhs,rhs)
@test abs( analytical - numerical_integral ) < 1
## Testing with only one date provided.
x = Array{Date}(undef, 1)
x[1] = Date(2018, 7, 21)
y = Array{Float64}(undef, 1)
y[1] = 0.0
spline = Schumaker(x,y)
@test abs(evaluate(spline, Date(2018, 7, 21))) < tol
@test abs(evaluate(spline, Date(2019, 7, 21))) < tol
@test abs(evaluate(spline, Date(2000, 7, 21))) < tol
## Testing with two dates provided.
x = Array{Date}(undef,2)
x[1] = Date(2018, 7, 21)
x[2] = Date(2018, 8, 21)
y = Array{Float64}(undef,2)
y[1] = 0.0
y[2] = 1.0
spline = Schumaker(x,y)
@test abs(evaluate(spline, Date(2018, 7, 21))) < tol
@test abs(evaluate(spline, Date(2018, 7, 30))) > tol
@test abs(evaluate(spline, Date(2018, 8, 21)) - y[2]) < tol
@test abs(evaluate(spline, Date(2019, 8, 21)) - y[2]) > tol
spline = Schumaker(x, y , extrapolation = (Constant,Constant))
@test abs(evaluate(spline, Date(2018, 8, 21)) - y[2]) < tol
@test abs(evaluate(spline, Date(2019, 8, 21)) - y[2]) < tol
## Testing with three dates provided.
x = Array{Date}(undef,3)
x[1] = Date(2018, 7, 21)
x[2] = Date(2018, 8, 21)
x[3] = Date(2018, 9, 21)
y = Array{Float64}(undef,3)
y[1] = 0.0
y[2] = 1.0
y[3] = 1.3
spline = Schumaker(x,y)
@test abs(evaluate(spline, x[2]) - y[2]) < tol
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 3188 | using Test
@testset "Testing with Floats" begin
using SchumakerSpline
tol = 10*eps()
x = collect(range(1, stop=6, length=1000))
y = log.(x) + sqrt.(x)
analytical_first_derivative(e) = 1/e + 0.5 * e^(-0.5)
spline = Schumaker(x,y)
for i in 1:length(x)
@test abs(evaluate(spline, x[i]) - y[i]) < tol
end
# Testing First derivative.
first_derivatives = evaluate.(spline, x, 1)
@test maximum(abs.(first_derivatives .- analytical_first_derivative.(x))) < 0.002
# Testing second derivatives
second_derivatives = evaluate.(spline, x, 2)
@test maximum(second_derivatives) < tol
# Test higher derivative
higher_derivatives = evaluate.(spline, x, 3)
@test maximum(second_derivatives) < tol
# Testing Integrals
analytic_integral(lhs,rhs) = rhs*log(rhs) - rhs + (2/3) * rhs^(3/2) - ( lhs*log(lhs) - lhs + (2/3) * lhs^(3/2) )
lhs = 2.0
rhs = 2.5
numerical_integral = evaluate_integral(spline, lhs,rhs)
@test abs(analytic_integral(lhs,rhs) - numerical_integral) < 0.01
lhs = 1.2
rhs = 4.3
numerical_integral = evaluate_integral(spline, lhs,rhs)
@test abs(analytic_integral(lhs,rhs) - numerical_integral) < 0.01
lhs = 0.8
rhs = 4.0
numerical_integral = evaluate_integral(spline, lhs,rhs)
@test abs(analytic_integral(lhs,rhs) - numerical_integral) < 0.03
# Testing creation of a spline with gradient information.
first_derivs = analytical_first_derivative.(x)
spline = Schumaker(x,y; gradients = first_derivs)
first_derivatives = evaluate.(spline, x, 1)
@test maximum(abs.(first_derivatives .- analytical_first_derivative.(x))) < tol
# Testing creation of a spline with only the gradients on the edges.
first_derivs = analytical_first_derivative.(x)
spline = Schumaker(x,y; left_gradient = first_derivs[1], right_gradient = first_derivs[length(first_derivs)])
first_derivatives = evaluate.(spline, x, 1)
gaps = abs.(first_derivatives .- analytical_first_derivative.(x))
@test gaps[1] < tol
@test gaps[length(gaps)] < tol
@test minimum(gaps[2:(length(gaps)-1)]) > 10* tol
# Testing the other syntax for evaluation.
@test abs(spline(1.4) - evaluate(spline, 1.4)) < eps()
@test abs(spline(1.5) - evaluate(spline, 1.5)) < eps()
@test abs(spline(1.6) - evaluate(spline, 1.6)) < eps()
end
#=
# should be two random roots and one optima.
x = collect(range(-10, stop=10, length=1000))
function random_function(a::Int)
vertex = (mod(a * 97, 89)- 45)/5
y = -(x .- vertex).^2 .+ 1
sp = Schumaker(x,y)
return sp, vertex
end
using Optim
sp, vertex = random_function(2)
@time optimafinder = find_optima(sp)
@time optimize(x -> evaluate(sp,x[1]), -5.0, 5.0 )
=#
#= Testing AAD. Not in the full test batch to avoid another dependency
x = collect(range(-10, stop=10, length=1000))
function random_function(a::Int)
vertex = (mod(a * 97, 89)- 45)/5
y = -(x .- vertex).^2 .+ 1
sp = Schumaker(x,y)
return sp, vertex
end
sp, vertex = random_function(2)
function spl(x::Array{<:Real,1})
return sum(evaluate.(Ref(sp), x))
end
using ForwardDiff
ForwardDiff.gradient(spl, [2,3])
=#
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 1179 | using Test
@testset "Testing with Ints" begin
using SchumakerSpline
tol = 10*eps()
x = [1,2,3,4,5,6,7,8,9,10,11,12]
y = log.(x) + sqrt.(x)
@test typeof(x) == Vector{Int}
spline = Schumaker(x,y)
for i in 1:length(x)
@test abs(evaluate(spline, x[i]) - y[i]) < tol
end
# Evaluation with a Float64.
@test isa(evaluate(spline, 11.5), Real)
# Testing second derivatives
xArray = range(1, stop=6, length=1000)
second_derivatives = evaluate.(spline, xArray,2)
@test maximum(second_derivatives) < tol
# Testing Integrals
analytic_integral(lhs,rhs) = rhs*log(rhs) - rhs + (2/3) * rhs^(3/2) - ( lhs*log(lhs) - lhs + (2/3) * lhs^(3/2) )
lhs = 2.0
rhs = 2.5
numerical_integral = evaluate_integral(spline, lhs,rhs)
@test abs(analytic_integral(lhs,rhs) - numerical_integral) < 0.01
lhs = 2.1
rhs = 2.11
numerical_integral = evaluate_integral(spline, lhs,rhs)
@test abs(analytic_integral(lhs,rhs) - numerical_integral) < 0.01
lhs = 1
rhs = 4
numerical_integral = evaluate_integral(spline, lhs,rhs)
@test abs(analytic_integral(lhs,rhs) - numerical_integral) < 0.1
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 615 | using SchumakerSpline
using Test
# Run tests
println("Test with Float64s")
include("Test_with_Floats.jl")
println("Test with Ints")
include("Test_with_Ints.jl")
println("Test with Dates")
include("Test_with_Dates.jl")
println("Test Intercepts")
include("test_intercepts.jl")
println("Test Splicing of Splines")
include("test_splice_splines.jl")
println("Test Plots")
include("test_plots.jl")
println("Test Extrapolation")
include("test_extrapolation.jl")
println("Test mapping to shape")
include("test_map_to_shape.jl")
println("Test Algebra")
include("test_algebra.jl")
println("Test 2d")
include("test_2d.jl")
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 335 | using Test
@testset "Test two dimensional" begin
using SchumakerSpline
gridx = collect(1:10)
gridy = collect(1:10)
grid = gridx * gridy'
schum = Schumaker2d(gridx, gridy, grid; extrapolation = (Linear, Linear))
@test abs(schum(5,5) - 25) < 100 * eps()
@test abs(schum(5.5,5.5) - 30.25) < 100 * eps()
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 694 | using Test
@testset "Test Algebra" begin
using SchumakerSpline
tol = 100 * eps()
close(a,b) = abs(a-b) < tol
from = 0.5
to = 10
x1 = collect(range(from, stop=to, length=40))
y1 = (x1).^2
x2 = [0.5,0.75,0.8,0.93,0.9755,1.0,1.1,1.4,2.0]
y2 = sqrt.(x2)
s1 = Schumaker(x1,y1)
s2 = Schumaker(x2,y2)
con = 7.6
x = 0.9
@test close( (s1 +con)(x) , s1(x) + con)
@test close( (con + s1)(x) , s1(x) + con)
@test close( (s1 - con)(x) , s1(x) - con)
@test close( (con - s1)(x) , con - s1(x) )
@test close( (s1 *con)(x) , s1(x) * con)
@test close( (con * s1)(x) , s1(x) * con)
@test close( (s1 /con)(x) , s1(x) / con)
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 737 | using Test
@testset "Test Extrapolation" begin
using SchumakerSpline
from = 0.0
to = 10
x1 = collect(range(from, stop=to, length=40))
y1 = (x1).^2
s1 = Schumaker(x1,y1; extrapolation = (Constant, Curve))
#plot(s1)
@test abs(s1(x1[1]) - s1(x1[1]-0.5)) < eps()
@test abs(s1(x1[40]) - s1(x1[40]+0.5)) > 0.01
s2 = Schumaker(x1,y1; extrapolation = (Constant, Constant))
@test abs(s2(x1[40]) - s2(x1[40]+0.5)) < eps()
plot(s2)
s3 = Schumaker(x1,y1; extrapolation = (Constant, Linear))
@test abs((s3(x1[40]+0.5) - s3(x1[40])) - (s3(x1[40]+1.5) - s3(x1[40]+1.0))) < 2e-14
TopX = x1[40]
@test abs(s3(TopX - 10*eps()) + s3(TopX - 10*eps(),1)*0.5 - s3(TopX +0.5) ) < 1000*eps()
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 9138 | using Test
@testset "Intercepts" begin
using SchumakerSpline
from = 0.5
to = 10
x1 = collect(range(from, stop=to, length=40))
y1 = (x1).^2
x2 = [0.5,0.75,0.8,0.93,0.9755,1.0,1.1,1.4,2.0]
y2 = sqrt.(x2)
s1 = Schumaker(x1,y1)
s2 = Schumaker(x2,y2)
crossover_point = get_intersection_points(s1,s2)
@test abs(evaluate(s1, crossover_point[1]) - evaluate(s2, crossover_point[1])) < 100*eps()
y1 = (x1 .- 5).^2
x2 = [0.5,0.75,0.8,0.93,0.9755,1.0,1.1,1.4,2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
y2 = 1 .+ 0.5 .* x2
s1 = Schumaker(x1,y1)
s2 = Schumaker(x2,y2)
crossover_points = get_intersection_points(s1,s2)
@test abs(evaluate(s1, crossover_points[1]) - evaluate(s2, crossover_points[1])) < 100*eps()
@test abs(evaluate(s1, crossover_points[2]) - evaluate(s2, crossover_points[2])) < 100*eps()
y1 = (x1 .- 5).^2
x2 = [0.5,0.75,0.8,0.93,0.9755,1.0,1.1,1.4,2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
y2 = -0.5 .* x2
s1 = Schumaker(x1,y1)
s2 = Schumaker(x2,y2)
crossover_points = get_intersection_points(s1,s2)
@test length(crossover_points) == 0
# Testing Rootfinder and OptimaFinder
from = 0.0
to = 10.0
x = collect(range(from, stop=to, length=400))
# This should have no roots or optima.
y = log.(x) + sqrt.(x)
spline = Schumaker(x,y)
rootfinder = find_roots(spline)
optimafinder = find_optima(spline)
@test length(rootfinder.roots) == 1
@test length(optimafinder.optima) == 0
# But it has a point at which it has a value of four:
fourfinder = find_roots(spline; root_value = 4.0)
@test abs(evaluate(spline, fourfinder.roots[1]) - 4.0) < 1e-10
# and no points where it is negative four::
negfourfinder = find_roots(spline; root_value = -4.0)
@test length(negfourfinder.roots) == 0
fourfinder2 = find_roots(spline - 2.0; root_value = 2.0)
@test abs(fourfinder[:roots][1] - fourfinder2[:roots][1]) < eps()
# And if we strict domain to after 2.5 then we will find one root at 103ish
rootfinder22 = find_roots(spline; interval = (2.5,Inf))
@test spline(rootfinder22.roots[1]) < 1e-10
# This has a root but no optima:
y = y .-2.0
spline2 = Schumaker(x,y)
rootfinder = find_roots(spline2)
optimafinder = find_optima(spline2)
@test length(rootfinder.roots) == 1
@test abs(rootfinder.roots[1] - 1.878) < 0.001
@test length(optimafinder.optima) == 0
y = (x .- 3).^2 .+ 6 # Should be an optima at x = 3. But no roots.
spline3 = Schumaker(x,y)
rootfinder = find_roots(spline3)
optimafinder = find_optima(spline3)
@test length(rootfinder.roots) == 0
@test length(optimafinder.optima) == 1
@test abs(optimafinder.optima[1] - 3.0) < 1e-2
@test optimafinder.optima_types[1] == :Minimum
# This is a historical bug - It did not find the root because it happened in the interval right before 4.0
spline = Schumaker{Float64}([0.0, 0.166667, 0.25, 0.277597, 0.5, 0.581563, 0.75, 0.974241, 1.0, 95.1966, 100.0, 116.344, 200.0, 233.333], [1.92913 -9.4624 9.91755; 7.71653 -23.3254 8.39407; 40.1234 -101.466 6.50388; 0.617789 -3.15302 3.73426; 1.84611 -5.68404 3.06358; 0.432882 -2.23154 2.61226; 0.3396 -1.88818 2.24866;
25.7353 -51.7173 1.84233; 2.00801e-5 -0.183652 0.527206; 0.00772226 0.288784 -16.594; 0.00225409 0.0127389 -15.0287; 8.60438e-5 -0.0760628 -14.2184; 0.000216017 -0.0446771 -19.9793; 5.40043e-5 -0.0550539 -21.2285])
interval = (1e-14, 4.0)
root_value = 0.0
optima = find_roots(spline; root_value = root_value, interval = interval)
@test length(optima.roots) == 1
@test abs(optima.roots[1] - 3.875) < 0.005
spline = Schumaker{Float64}([0.0, 0.166667, 0.25, 0.277597, 0.5, 0.581563, 0.75, 0.913543, 1.0, 1.65249, 2.0, 2.33612, 3.0, 3.5334, 4.0, 4.66667], [-0.645627 2.21521 0.0; -2.58251 2.0 0.351267; -13.4282 1.56958 0.5; -0.206757 0.828427 0.533089; -0.617841 0.736461 0.707107; -0.144873 0.635674 0.763065;
-0.155837 0.58687 0.866025; -0.557609 0.535898 0.957836; -0.0193613 0.43948 1.0; -0.068257 0.414214 1.27851; -0.072796 0.366774 1.41421; -0.0186602 0.317837 1.52927; -0.0235395 0.293061 1.73205; -0.030761 0.267949 1.88167; -0.0108734 0.239243 2.0; -0.00271836 0.224745 2.15466])
root_value = 2.0
optima = find_roots(spline; root_value = root_value)
@test length(optima.roots) == 1
@test abs(optima.roots[1] - 4.0) < 1e-10
# This covers the case where there is an intercept but it comes after the last value of IntStarts but within the interval.
spline = Schumaker{Float64}([0.0, 0.166667, 0.25, 0.277597, 0.5, 0.581563, 0.75, 0.913543, 1.0, 1.75963, 2.0, 2.66667], [1.92913 -10.7562 11.8412; 7.71653 -28.5007 10.1021; 40.1234 -128.376 7.78064;
0.617789 -3.56736 4.2684; 1.84611 -6.92217 3.50557; 0.432882 -2.52187 2.95326; 0.465641 -2.46738 2.54076; 1.66614 -5.5315 2.1497; 0.0496917 -1.00572 1.68391; 0.496301 -1.87488 0.948612;
0.0929378 -0.847743 0.526628; 0.0232345 -0.618539 0.00277145])
interval = (1e-14, 4.0)
optima = find_roots(spline; interval = interval)
@test length(optima.roots) == 1
@test abs(spline(optima.roots[1])) < 1e-10
# In this case the root is in the last interval which is linear rather than quadratic.
spline = Schumaker{Float64}([-1.0e-10, 0.0, 0.02, 0.03, 0.0460546, 0.1, 0.170253, 0.3, 0.361175, 0.5, 0.598784, 0.75, 0.881918, 1.0, 1.27089, 1.5, 1.8286, 2.0, 2.66667, 4.0], [-0.0 0.0 1.74211; 66.2224 -229.519 34.2346; 264.889 -872.979 29.6707; 182.141 -599.117 20.9674; 16.1324 -56.256 11.3957; 6.4478 -23.5354 8.40789; 1.89036 -8.26419 6.78629; 4.79607 -16.6813 5.74585; 0.931297 -4.40443 4.74333; 1.12989 -4.72817 4.14983; 0.482179 -2.64836 3.69379; 0.577267 -2.76551 3.30434; 0.720484 -2.99668 2.94957; 0.162497 -1.37639 2.60576; 0.227166 -1.44474 2.24484; 0.162381 -1.19387 1.92576; 0.59688 -1.97635 1.55098; 0.0393714 -0.694514 1.22978; 0.00984285 -0.598087 0.784266; -0.0 -0.583443 0.0121747])
interval = (1e-14, 5.0)
optima = find_roots(spline; interval = interval)
@test length(optima.roots) == 1
@test abs(spline(optima.roots[1])) < 1e-10
# This spline is NOT continuous. It jumps the root. And hence no root is found.
spline = Schumaker{Float64}([-1.0e-10, 0.0, 0.02, 0.03, 0.0607165, 0.1, 0.207868, 0.3, 0.360524, 0.5, 0.584994, 0.75, 0.859153, 1.0, 1.06934, 1.5, 1.75, 2.0, 2.66667, 4.0],
[-0.0 0.0 11.2616; 113.819 -400.816 87.8368; 455.276 -1499.77 79.866; 98.1356 -338.854 64.9138; 60.0001 -210.615 54.598; 10.1658 -47.5069 46.4169; 13.9347 -57.0219 41.4107; 30.028 -103.46 36.2755; 5.6543 -26.5882 30.1237; 9.39851 -35.9132 26.5253; 2.49363 -14.6014 23.5408; 4.42393 -19.0774 21.1993; 2.65699 -13.3898 19.1697; 3.03898 -13.6262 17.3365; 0.0787707 -5.70898 16.4063; 0.989202 -7.68514 13.9623; -2.41678 -0.111457 12.1028; -0.132839 -5.68619 11.9239; -0.0332096 -6.00949 8.07405; -0.0 -6.05822 -0.023263])
interval = (1e-14, 5.0)
optima = find_roots(spline; interval = interval)
@test length(optima.roots) == 0
# There was an issue with this spline not getting 3 roots as two of them are within one interval.
spline = Schumaker{Float64}([-1.0e-10, 0.0, 0.0208333, 0.03125, 0.040147, 0.0625, 0.1023, 0.125, 0.203443, 0.25, 0.393202, 0.5, 0.740062, 1.0, 1.26141, 2.0, 3.0, 4.0, 5.33333, 8.0], [-0.0 0.0 4.79187; 63.3179 -152.462 47.9744; 253.272 -515.484 44.8255; 426.822 -843.087 39.4834; 67.6186 -148.651 32.0162; 22.7273 -60.4593 28.7272; 69.8655 -146.831 26.3569; 12.8026 -37.776 23.0599; 36.3453 -78.2209 20.1754; 8.38371 -25.2826 16.6125; 15.0731 -34.0978 13.1639; 5.82125 -16.024 9.69419; 4.96507 -11.9915 6.18291; 5.60305 -10.2219 3.40135; 0.701903 -1.91131 1.1121; 0.979196 -1.0424 0.0833198; -1.7432 0.749616 0.0201181; -0.283294 -1.67429 -0.973462; -0.0708235 -2.08624 -3.70948; -0.0 -2.22356 -9.77643])
interval = (1e-14, 5.0)
root_value = 0.0
optima = find_roots(spline; root_value = root_value, interval = interval)
@test length(optima.roots) == 3
@test abs(spline(optima.roots[1])) < 1e-10
@test abs(spline(optima.roots[2])) < 1e-10
@test abs(spline(optima.roots[3])) < 1e-10
# There was also a problem that it is getting roots outside of our interval of interest.
interval = (1e-14, 3.0)
root_value = 0.0
optima = find_roots(spline; root_value = root_value, interval = interval)
@test length(optima.roots) == 2
# There was also a problem that it is getting roots outside of our interval of interest.
interval = (1e-14, 2.9)
root_value = 0.0
optima = find_roots(spline; root_value = root_value, interval = interval)
@test length(optima.roots) == 1
x = [1,2,3]
y = [3,5,6]
spline = Schumaker(x,y, ; extrapolation = (Constant,Linear))
interval = (1e-14, Inf)
root_value = 6
optima = find_roots(spline; root_value = 5, interval = interval)
@test length(optima.roots) == 1
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 2575 | using Test
@testset "Test mapping to a shape" begin
using SchumakerSpline
xvals = [1,2,3,4,5,6]
shape_map(x,upper,lower) = min(max(x,lower), upper)
# Increasing
yvals = [1.0, 2.0, 2.9, 3.9, 2.3, 1.4]
vals2 = reshape_values(xvals, yvals; increasing = true, concave = true, shape_map = shape_map)
@test all(abs.([1.0, 2.0, 2.9, 3.8, 3.8, 3.8] .- vals2) .< 10*eps()) # Changes
# Increasing and concave
yvals = [1.0, 2.0, 2.9, 3.9, 4.3, 4.4]
vals2 = reshape_values(xvals, yvals; increasing = true, concave = true, shape_map = shape_map)
@test all(abs.([1.0, 2.0, 2.9, 3.8, 4.3, 4.4] .- vals2) .< 10*eps()) # Changes
yvals = sqrt.(xvals)
vals2 = reshape_values(xvals, yvals; increasing = true, concave = true, shape_map = shape_map)
@test all(abs.(vals2 .- yvals) .< 10*eps()) # No changes as input already increasing concave
# Increasing and convex
yvals = [1.0, 2.0, 3.1, 3.9, 4.3, 4.4]
vals2 = reshape_values(xvals, yvals; increasing = true, concave = false, shape_map = shape_map)
@test all(abs.([1.0, 2.0, 3.1, 4.2, 5.3, 6.4] .- vals2) .< 10*eps()) #changes
yvals = (xvals) .^ 2
vals2 = reshape_values(xvals, yvals; increasing = true, concave = false, shape_map = shape_map)
@test all(abs.(vals2 .- yvals) .< 10*eps()) # No changes as input already increasing concave
# Decreasing
yvals = -[1.0, 2.0, 2.9, 3.9, 2.3, 1.4]
vals2 = reshape_values(xvals, yvals; increasing = false, concave = false, shape_map = shape_map)
@test all(abs.([-1.0, -2.0, -2.9, -3.8, -3.8, -3.8] .- vals2) .< 10*eps()) # Changes
# Decreasing and convex
yvals = -[1.0, 2.0, 2.9, 3.9, 4.3, 4.4]
vals2 = reshape_values(xvals, yvals; increasing = false, concave = false, shape_map = shape_map)
@test all(abs.(-[1.0, 2.0, 2.9, 3.8, 4.3, 4.4] .- vals2) .< 10*eps()) # Changes
yvals = -sqrt.(xvals)
vals2 = reshape_values(xvals, yvals; increasing = false, concave = false, shape_map = shape_map)
@test all(abs.(vals2 .- yvals) .< 10*eps()) # No changes as input already increasing concave
# Decreasing and concave
yvals = -[1.0, 2.0, 3.1, 3.9, 4.3, 4.4]
vals2 = reshape_values(xvals, yvals; increasing = false, concave = true, shape_map = shape_map)
@test all(abs.(-[1.0, 2.0, 3.1, 4.2, 5.3, 6.4] .- vals2) .< 10*eps()) #changes
yvals = -(xvals) .^ 2
vals2 = reshape_values(xvals, yvals; increasing = false, concave = true, shape_map = shape_map)
@test all(abs.(vals2 .- yvals) .< 10*eps()) # No changes as input already increasing concave
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 1290 | using Test
@testset "Test Plotting" begin
using SchumakerSpline
x = Array{Union{Missing,Float64}}(collect(0.0:0.01:2.0))
y = Array{Union{Missing,Float64}}(sqrt.(x))
y[5] = missing
x[2] = missing
s1 = Schumaker{Float64}(x, y)
# This should plot something over [0,2]
plt = plot(s1)
# This should plot something over [0,1] but only two points because it is input as an array and this is the exact array of plotpoints.
plt = plot(s1, [0.0,1.0])
# Now it is a tuple and so there will be grid_len intermediate points put it (200 by default)
plt = plot(s1, (0.0,1.0))
# And we can plot the derivatives too
plt = plot(s1, (0.0,1.0); derivs = true)
# This should add a second spline just below with same derivaitves
glt = plot(s1 -10.0, [0.5,1.0]; plot_options = (label = "shifted",), deriv_plot_options = (label = "shifted deriv1",), deriv2_plot_options = (label = "shifted deriv2",), plt = plt)
# This should only plot the first spline.
qlt = plot(s1 + 0.2, (0.0,1.0); plot_options = (label = "shifted",))
ss = Array{Schumaker,1}([s1, s1+0.2, s1*0.8+0.2])
plt = plot(ss, [0.0,1.0])
plt = plot(ss)
plt2 = plot(0-2.0*s1 + 0.1, [0.0,1.0]; derivs = false, plot_options = (label = "new one",), plt = plt)
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | code | 2199 | using Test
@testset "Test Splicing of Splines" begin
using SchumakerSpline
from = 0.5
to = 10
x1 = collect(range(from, stop=to, length=40))
y1 = (x1).^2
x2 = [0.5,0.75,0.8,0.93,0.9755,1.0,1.1,1.4,2.0]
y2 = sqrt.(x2)
left_spline = Schumaker(x1,y1)
right_spline = Schumaker(x2,y2)
crossover_point = get_intersection_points(left_spline,right_spline)
splice_point = crossover_point[1]
spliced = splice_splines(left_spline, right_spline, splice_point)
# Testing at splice point
@test abs(evaluate(spliced, splice_point) - evaluate(right_spline, splice_point)) < 100*eps()
# As this was a continuous split
@test abs(evaluate(spliced, splice_point- 100*eps()) - evaluate(spliced, splice_point + 100*eps())) < 10000*eps()
# Testing in left spline territory.
@test abs(evaluate(spliced, splice_point-0.5) - evaluate(left_spline, splice_point-0.5)) < 100*eps()
@test abs(evaluate(spliced, splice_point-0.5) - evaluate(right_spline, splice_point-0.5)) > 0.1
# Testing solidly into right spline territory.
@test abs(evaluate(spliced, splice_point+0.5) - evaluate(left_spline, splice_point+0.5)) > 100*eps()
@test abs(evaluate(spliced, splice_point+0.5) - evaluate(right_spline, splice_point+0.5)) < 0.1
splice_point = 1.7
spliced = splice_splines(left_spline, right_spline, splice_point)
# Testing at splice point
@test abs(evaluate(spliced, splice_point) - evaluate(right_spline, splice_point)) < 100*eps()
# As this was NOT a continuous split
@test abs(evaluate(spliced, splice_point- 100*eps()) - evaluate(spliced, splice_point + 100*eps())) > 0.1
# Testing in left spline territory.
@test abs(evaluate(spliced, splice_point-0.5) - evaluate(left_spline, splice_point-0.5)) < 100*eps()
@test abs(evaluate(spliced, splice_point-0.5) - evaluate(right_spline, splice_point-0.5)) > 0.1
# Testing solidly into right spline territory.
@test abs(evaluate(spliced, splice_point+0.5) - evaluate(left_spline, splice_point+0.5)) > 100*eps()
@test abs(evaluate(spliced, splice_point+0.5) - evaluate(right_spline, splice_point+0.5)) < 0.1
end
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | docs | 1007 | # SchumakerSpline
| Build | Coverage | Documentation |
|-------|----------|---------------|
| [](https://github.com/s-baumann/SchumakerSpline.jl/actions) | [](https://codecov.io/gh/s-baumann/SchumakerSpline.jl) | [](https://s-baumann.github.io/SchumakerSpline.jl/dev/index.html) |
A Julia package to create a shape preserving spline. This is guaranteed to be monotonic and concave or convex if the data is monotonic and concave or convex. It does not use any optimisation and is therefore quick and smoothly converges to a fixed point in economic dynamics problems including value function iteration. This package has the same functionality as the R package called [schumaker](https://cran.r-project.org/web/packages/schumaker/index.html).
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
|
[
"MIT"
] | 1.4.4 | 998ed1bc7ed4524ac5130af59cc25fc674b8a59e | docs | 524 | ```@meta
CurrentModule = SchumakerSpline
```
# Internal Functions
```@index
Pages = ["api.md"]
```
### Main Struct
```@docs
Schumaker
evaluate
evaluate_integral
```
### Extrapolation Schemes
```@docs
Schumaker_ExtrapolationSchemes
```
### Working with Splines
```@docs
find_derivative_spline
find_roots
find_optima
get_crossover_in_interval
get_intersection_points
reshape_values
splice_splines
```
### Two dimensional splines
```@docs
Schumaker2d
```
### Plotting of splines
```@docs
plot
```
| SchumakerSpline | https://github.com/s-baumann/SchumakerSpline.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.