licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 8344 | """
ActivationContribution{L,C,M} <: AbstractMutableComp
ActivationContribution(l)
ActivationContribution(l, method)
Calculate neuron utility based on activations and gradients using `method`.
Designed to be used as `layerfun` argument to [`fluxvertex`](@ref).
Can be a performance bottleneck in cases with large activations. Use [`NeuronUtilityEvery`](@ref) to mitigate.
Default `method` is described in <https://arxiv.org/abs/1611.06440>.
Short summary is that the first order taylor approximation of the optimization problem: "which neurons shall I remove to minimize impact on the loss function?"
boils down to: "the ones which minimize `abs(gradient * activation)`" (assuming parameter independence).
"""
struct ActivationContribution{L,C,M} <: AbstractMutableComp
layer::L
contribution::C
method::M
end
# We use eps(Float32) here because we don't want parameters from new layers to have:
# 1) higher utility than parameters from existing layers
# 2) zero utility since that will often make the optimizer remove them completely
# eps(Float32) is typically smaller than the optimizer tolerance, but NaiveNASlib tries to rescale
ActivationContribution(l::AbstractMutableComp, method = Ewma(0.05f0)) = ActivationContribution(l, fill(eps(Float32), nout(l)), method)
ActivationContribution(l, method = Ewma(0.05f0)) = ActivationContribution(l, Float32[], method)
@layer :expand ActivationContribution
wrapped(m::ActivationContribution) = m.layer
# We do train contribution in some sense, but we don't want Flux to do it
# We could create a "fake" gradient in the rrule and let the optimizer rule update it for us
# (rather than using our own Ewma), but it is probably not desirable to mix the model parameter update
# strategy with the activation contribution strategy.
Flux.trainable(m::ActivationContribution) = (;layer = Flux.trainable(m.layer))
# Just passthrough when not taking gradients.
(m::ActivationContribution)(x...) = wrapped(m)(x...)
function ChainRulesCore.rrule(config::RuleConfig{>:HasReverseMode}, m::T, x...) where T <:ActivationContribution
act, back = rrule_via_ad(config, wrapped(m), x...)
function ActivationContribution_back(Δ)
if length(m.contribution) === 0
newcontribution = m.method(missing, act, Δ)
resize!(m.contribution, length(newcontribution))
copyto!(m.contribution, newcontribution)
else
copyto!(m.contribution, m.method(m.contribution, act, Δ))
end
δs = back(Δ)
Tangent{T}(layer=δs[1]), δs[2:end]...
end
return act, ActivationContribution_back
end
actdim(nd::Integer) = nd - 1
function NaiveNASlib.Δsize!(m::ActivationContribution, inputs::AbstractVector, outputs::AbstractVector; kwargs...)
if length(m.contribution) !== 0
# This tends to happen when we are measuring contribution for a concatenation and we have added an extra input edge
# TODO: Try to find another fix, perhaps we need to ensure that nout(v) if v wraps an ActivationContribution always return
# the length of m.contribution
outputs[outputs .> length(m.contribution)] .= -1
newcontribution = select(m.contribution, 1 => outputs; newfun = (args...) -> eps(eltype(m.contribution)))
resize!(m.contribution, length(newcontribution))
copyto!(m.contribution, newcontribution)
#else
# no need to select anything
end
NaiveNASlib.Δsize!(wrapped(m), inputs, outputs; kwargs...)
end
"""
l2_squeeze(x, dimkeep)
Return l2 norm of `x` along all dimensions except `dimkeep` as a 1D array (singleton dimensions are removed).
"""
function l2_squeeze(x, dimskeep=1:ndims(x))
dims = filter(i -> i ∉ dimskeep, 1:ndims(x))
return sqrt.(dropdims(sum(x -> x^2, x, dims=dims), dims=Tuple(dims)))
end
l2_squeeze(z::Number, args...) = z
"""
mean_squeeze(f, x, dimkeep)
Return mean value of `f.(x)` along all dimensions except `dimkeep` as a 1D array (singleton dimensions are removed).
"""
function mean_squeeze(f, x, dimskeep=1:ndims(x))
dims = filter(i -> i ∉ dimskeep, 1:ndims(x))
return dropdims(mean(f, x, dims=dims), dims=Tuple(dims))
end
# To peel the onion...
neuronutility(v::AbstractVertex) = neuronutility(base(v))
neuronutility(v::InputSizeVertex) = ones(nout(v))
neuronutility(v::CompVertex) = neuronutility(v.computation)
neuronutility(m::AbstractMutableComp) = neuronutility(wrapped(m))
function neuronutility(lm::LazyMutable)
forcemutation(lm)
neuronutility(wrapped(lm))
end
# Return missing to maintain API since previous versions used missing as sentinel value instead of empty vector
neuronutility(m::ActivationContribution) = isempty(m.contribution) ? missing : m.contribution
neuronutility(l) = neuronutility(layertype(l), l)
# Default: mean of abs of weights + bias. Not a very good metric, but should be better than random
# Maybe do something about state in recurrent layers as well, but CBA to do it right now
neuronutility(::FluxParLayer, l) = l2_squeeze(weights(l), outdim(l)) .+ l2_squeeze(bias(l))
function neuronutility(::FluxConvolutional{N}, l) where N
ngroups(l) == 1 && return l2_squeeze(weights(l), outdim(l)) .+ l2_squeeze(bias(l))
kernelsize = size(weights(l))[1:N]
weightgroups = reshape(weights(l), kernelsize..., nout(l) ÷ ngroups(l), nin(l)[])
wm = l2_squeeze(weightgroups, indim(l))
bm = l2_squeeze(bias(l))
(length(wm) == 1 || length(wm) == length(bm)) && return wm .+ bm
# use this to get insight on whether to repeat inner or outer:
# cc = DepthwiseConv(reshape(Float32[1 1 1 1;2 2 2 2], 1, 1, 4, 2), Float32[0,0,0,0,1,1,1,1])
# cc(fill(10f0, (1,1,4,1)))
return repeat(wm, length(bm) ÷ length(wm)) .+ bm
end
neuronutility(::FluxParNorm, l) = l.affine ? l2_squeeze(l.γ) .+ l2_squeeze(l.β) : missing
# Not possible to do anything since we don't know the size. Implementors can however use this to fallback to other ways if this is not an error
neuronutility(lt, l) = missing
neuronutility_safe(v) = neuronutility_safe(trait(v), v)
neuronutility_safe(t::DecoratingTrait, v) = neuronutility_safe(base(t), v)
neuronutility_safe(::Immutable, v) = 1
neuronutility_safe(::MutationSizeTrait, v) = clean_values(cpu(neuronutility(v)))
neuronutility_safe(m::AbstractMutableComp) = clean_values(cpu(neuronutility(m)))
clean_values(::Missing) = 1
clean_values(a::AbstractArray) = length(a) === 0 ? 1 : replace(a, NaN => -100, Inf => -100, -Inf => -100)
"""
neuronutilitytaylor(currval, act, grad)
Calculate contribution of activations towards loss according to https://arxiv.org/abs/1611.06440.
Short summary is that the first order taylor approximation of the optimization problem: "which neurons shall I remove to minimize impact on the loss function?"
boils down to: "the ones which minimize abs(gradient * activation)" (assuming parameter independence).
"""
neuronutilitytaylor(currval, act, grad) = mean_squeeze(abs, (mean_squeeze(identity, act .* grad, (actdim(ndims(act)), ndims(act)))), 1)
# Kinda wished they had branded this better as 'taylor' can mean many things.
"""
Ewma{R<:Real, M}
Ewma(α::Real, method)
Exponential moving average of neuron utility calculated by `method`.
Parameter `α` acts as a forgetting factor, i.e larger values means faster convergence but more noisy estimate.
"""
struct Ewma{R<:Real, M}
α::R
method::M
function Ewma(α::R, method::M) where {R,M}
0 <= α <= 1 || error("α must be between 0 and 1, was $α")
new{R,M}(α, method)
end
end
Ewma(α) = Ewma(α, neuronutilitytaylor)
(m::Ewma)(currval, act, grad) = agg(m, currval, m.method(currval, act, grad))
function agg(m::Ewma, x, y)
α = convert(float(eltype(x)), m.α)
α .* x .+ (1 - α) .* y
end
agg(m::Ewma, ::Missing, y) = y
"""
NeuronUtilityEvery{N,T}
NeuronUtilityEvery(n::Int, method::T)
Calculate neuron utility using `method` every `n`:th call.
Useful to reduce runtime overhead.
"""
mutable struct NeuronUtilityEvery{N,T}
cnt::Int
method::T
NeuronUtilityEvery(N::Int, method::T) where T = new{N, T}(0, method)
end
NeuronUtilityEvery(n::Int) = NeuronUtilityEvery(n, Ewma(0.05))
function (m::NeuronUtilityEvery{N})(currval, act, grad) where N
ret = m.cnt % N == 0 ? m.method(currval, act, grad) : currval
m.cnt += 1
return ret
end
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 861 | using PrecompileTools
let
@setup_workload begin
iv1 = denseinputvertex("iv1", 1)
v1 = fluxvertex("v1", Dense(nout(iv1) => 1), iv1)
v2 = concat("v2", v1, v1; layerfun=ActivationContribution)
v3 = concat("v3", v2,v1,iv1)
v4 = "v4" >> v3 + v3
v5 = "v5" >> v4 + v4 + v4
v6 = fluxvertex("v6", Dense(nout(v5) => 1), v5; layerfun = ActivationContribution ∘ LazyMutable)
g1 = CompGraph(iv1, v6)
x1 = ones(Float32, 1, 1)
@compile_workload begin
iv1 = denseinputvertex("iv1", 1)
fluxvertex("v1", Dense(nout(iv1) => 1), iv1)
g1(x1)
Flux.@code_adjoint g1(x1)
#Optimisers.setup(Optimisers.Descent(0.1f0), g1)
#Flux.gradient((g,x) -> sum(g(x)), g1, x1)
Δnout!(v3 => relaxed(2))
end
end
end | NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 3335 |
select(pars::AbstractArray{T,N}, elements_per_dim...; newfun = randoutzeroin) where {T, N} = NaiveNASlib.parselect(pars, elements_per_dim...; newfun)
select(::Missing, args...;kwargs...) = missing
select(s::Number, args...;kwargs...) = s
struct WeightParam end
struct BiasParam end
struct RecurrentWeightParam end
struct RecurrentState end
"""
neuroninsert(lt, partype)
Return a function which creates new parameters for layers of type `lt` to use for [`select`](@Ref).
"""
neuroninsert(t, partype) = randoutzeroin
neuroninsert(t, parname::Symbol) = neuroninsert(t, Val(parname))
neuroninsert(t, name::Val) = randoutzeroin
neuroninsert(lt::FluxParNorm, t::Val) = norminsert(lt, t)
norminsert(::FluxParNorm, ::Union{Val{:β},Val{:μ}}) = (args...) -> 0
norminsert(::FluxParNorm, ::Union{Val{:γ},Val{:σ²}}) = (args...) -> 1
# Coupling between input and output weights when grouped make it difficult to do anything else?
neuroninsert(lt::FluxConvolutional, partype) = ngroups(lt) == 1 ? randoutzeroin : (args...) -> 0
randoutzeroin(T, d, s...) = _randoutzeroin(T,d,s)
_randoutzeroin(T, d, s) = 0
_randoutzeroin(T, d, s::NTuple{2, Int}) = d == indim(FluxDense()) ? 0 : randn(T, s) ./ prod(s)
_randoutzeroin(T, d, s::NTuple{N, Int}) where N = d == indim(FluxConv{N-2}()) ? 0 : randn(T, s) ./ prod(s)
"""
KernelSizeAligned(Δsize; pad)
KernelSizeAligned(Δs::Integer...;pad)
Strategy for changing kernel size of convolutional layers where filters remain phase aligned. In other words, the same
element indices are removed/added for all filters and only 'outer' elements are dropped or added.
Call with vertex as input to change weights.
### Examples
```jldoctest
julia> using NaiveNASflux, Flux
julia> cv = fluxvertex(Conv((3,3), 1=>1;pad=SamePad()), conv2dinputvertex("in", 1));
julia> cv(ones(Float32, 4,4,1,1)) |> size
(4, 4, 1, 1)
julia> layer(cv).weight |> size
(3, 3, 1, 1)
julia> cv |> KernelSizeAligned(-1, 1; pad=SamePad());
julia> cv(ones(Float32, 4,4,1,1)) |> size
(4, 4, 1, 1)
julia> layer(cv).weight |> size
(2, 4, 1, 1)
```
"""
struct KernelSizeAligned{T, P}
Δsize::T
pad::P
end
KernelSizeAligned(Δs::Integer...;pad = ntuple(i -> 0, length(Δs))) = KernelSizeAligned(Δs, pad)
(s::KernelSizeAligned)(l) = selectfilters(layertype(l), l, s)
(s::KernelSizeAligned)(v::AbstractVertex) = mutate_weights(v, s)
otherpars(s::KernelSizeAligned, l) = paddingfor(layertype(l), l, s)
paddingfor(lt, l, s) = ()
paddingfor(::FluxConvolutional{N}, l, s) where N = (;pad = Flux.calc_padding(typeof(l), s.pad, size(weights(l))[1:N] .+ s.Δsize, l.dilation, l.stride))
selectfilters(t, l, s) = ()
selectfilters(::FluxConvolutional, l, s) = selectfilters(s, weights(l))
function selectfilters(s::KernelSizeAligned, pars)
csize = size(pars)
mids = csize ./ 2 .+ 0.5
byfun(dim) = i -> abs(mids[dim] - i) * 1 / (1 + mean(abs.(selectdim(pars, dim, i))))
ps = Pair{Int, Vector{Int}}[]
for (dim, Δsize) in enumerate(s.Δsize)
if Δsize < 0
els = sort(partialsort(1:csize[dim], 1:csize[dim]+Δsize, by=byfun(dim)))
push!(ps, dim => els)
elseif Δsize > 0
els = -ones(Int, csize[dim] + Δsize)
offs = Δsize ÷ 2
els[offs+1:csize[dim]+offs] = 1:csize[dim]
push!(ps, dim => els)
end
end
return ps
end
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 3127 |
abstract type FluxLayer end
layertype(l) = NaiveNASlib.shapetrait(l)
layer(l) = l
# Types for layers with parameters (e.g. weights and biases) and with similar handling
# w.r.t what shape of parameters means in terms of number of inputs and number of outputs
abstract type FluxParLayer <: FluxLayer end
abstract type Flux2D <: FluxParLayer end
struct GenericFlux2D <: Flux2D end
struct FluxDense <: Flux2D end
NaiveNASlib.shapetrait(::Dense) = FluxDense()
# Might be a Flux2D, but not exactly the same. Also want to leave the door open to when/if they accept 3D input too
# Type hierarchies are hard :/
abstract type FluxRecurrent <:FluxParLayer end
struct GenericFluxRecurrent <: FluxRecurrent end
struct FluxRnn <: FluxRecurrent end
struct FluxLstm <: FluxRecurrent end
struct FluxGru <: FluxRecurrent end
NaiveNASlib.shapetrait(l::Flux.Recur) = NaiveNASlib.shapetrait(l.cell)
NaiveNASlib.shapetrait(::Flux.RNNCell) = FluxRnn()
NaiveNASlib.shapetrait(::Flux.LSTMCell) = FluxLstm()
NaiveNASlib.shapetrait(::Flux.GRUCell) = FluxGru()
abstract type FluxConvolutional{N} <: FluxParLayer end
struct GenericFluxConvolutional{N} <: FluxConvolutional{N} end
# Groups here is an eyesore. Its just to not have to tag a breaking version for Flux 0.13 due
# to some functions needing to tell the number of groups from the layertype alone
struct FluxConv{N} <: FluxConvolutional{N}
groups::Int
end
FluxConv{N}() where N = FluxConv{N}(1)
struct FluxConvTranspose{N} <: FluxConvolutional{N}
groups::Int
end
FluxConvTranspose{N}() where N = FluxConvTranspose{N}(1)
struct FluxCrossCor{N} <: FluxConvolutional{N} end
NaiveNASlib.shapetrait(l::Conv{N}) where N = FluxConv{N}(l.groups)
NaiveNASlib.shapetrait(l::ConvTranspose{N}) where N = FluxConvTranspose{N}(l.groups)
NaiveNASlib.shapetrait(::CrossCor{N}) where N = FluxCrossCor{N}()
abstract type FluxTransparentLayer <: FluxLayer end
# Invariant layers with parameters, i.e nin == nout always and parameter selection must
# be performed
abstract type FluxParInvLayer <: FluxTransparentLayer end
struct FluxScale <: FluxParInvLayer end
struct FluxLayerNorm <: FluxParInvLayer end
abstract type FluxParNorm <: FluxParInvLayer end
struct FluxBatchNorm <: FluxParNorm end
struct FluxInstanceNorm <: FluxParNorm end
struct FluxGroupNorm <: FluxParNorm end
NaiveNASlib.shapetrait(::Flux.Scale) = FluxScale()
NaiveNASlib.shapetrait(::LayerNorm) = FluxLayerNorm()
NaiveNASlib.shapetrait(::BatchNorm) = FluxBatchNorm()
NaiveNASlib.shapetrait(::InstanceNorm) = FluxInstanceNorm()
NaiveNASlib.shapetrait(::GroupNorm) = FluxGroupNorm()
# Transparent layers, i.e nin == nout always and there are no parameters
abstract type FluxNoParLayer <: FluxTransparentLayer end
struct FluxPoolLayer <: FluxNoParLayer end
struct FluxDropOut <: FluxNoParLayer end
layertype(l::MaxPool) = FluxPoolLayer()
layertype(l::MeanPool) = FluxPoolLayer()
layertype(l::Dropout) = FluxDropOut()
layertype(l::AlphaDropout) = FluxDropOut()
layertype(l::GlobalMaxPool) = FluxPoolLayer()
layertype(l::GlobalMeanPool) = FluxPoolLayer()
# Compositions? Might not have any common methods...
# MaxOut, Chain?
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 3100 | NaiveNASlib.nin(t::FluxLayer, l) = throw(ArgumentError("Not implemented for $t"))
NaiveNASlib.nout(t::FluxLayer, l) = throw(ArgumentError("Not implemented for $t"))
NaiveNASlib.nin(::FluxParLayer, l) = [size(weights(l), indim(l))]
NaiveNASlib.nin(::FluxConvolutional, l) = [size(weights(l), indim(l)) * ngroups(l)]
NaiveNASlib.nin(::FluxParInvLayer, l) = [nout(l)]
NaiveNASlib.nout(::FluxParLayer, l) = size(weights(l), outdim(l))
NaiveNASlib.nout(::FluxScale, l) = length(weights(l))
NaiveNASlib.nout(::FluxParInvLayer, l::LayerNorm) = nout(l.diag)
NaiveNASlib.nout(::FluxParNorm, l) = l.chs
NaiveNASlib.nout(::FluxRecurrent, l) = div(size(weights(l), outdim(l)), outscale(l))
outscale(l) = outscale(layertype(l))
outscale(::FluxRnn) = 1
outscale(::FluxLstm) = 4
outscale(::FluxGru) = 3
indim(l) = indim(layertype(l))
outdim(l) = outdim(layertype(l))
actdim(l) = actdim(layertype(l))
actrank(l) = actrank(layertype(l))
indim(t::FluxLayer) = throw(ArgumentError("Not implemented for $t"))
outdim(t::FluxLayer) = throw(ArgumentError("Not implemented for $t"))
actdim(t::FluxLayer) = throw(ArgumentError("Not implemented for $t"))
actrank(t::FluxLayer) = throw(ArgumentError("Not implemented for $t"))
indim(::Flux2D) = 2
outdim(::Flux2D) = 1
actdim(::Flux2D) = 1
actrank(::Flux2D) = 1
indim(::FluxScale) = 1
outdim(::FluxScale) = 1
actdim(::FluxScale) = 1
actrank(::FluxScale) = 1
indim(::FluxRecurrent) = 2
outdim(::FluxRecurrent) = 1
actdim(::FluxRecurrent) = 1
actrank(::FluxRecurrent) = 2
indim(::FluxConvolutional{N}) where N = 2+N
outdim(::FluxConvolutional{N}) where N = 1+N
actdim(::FluxConvolutional{N}) where N = 1+N
actrank(::FluxConvolutional{N}) where N = 1+N
indim(::Union{FluxConv{N}, FluxCrossCor{N}}) where N = 1+N
outdim(::Union{FluxConv{N}, FluxCrossCor{N}}) where N = 2+N
# Note: Absence of bias mean that bias is a Bool (false), so beware!
weights(l) = weights(layertype(l), l)
bias(l) = bias(layertype(l), l)
weights(::FluxDense, l) = l.weight
bias(::FluxDense, l) = l.bias
weights(::FluxConvolutional, l) = l.weight
bias(::FluxConvolutional, l) = l.bias
weights(::FluxScale, l) = l.scale
bias(::FluxScale, l) = l.bias
weights(lt::FluxRecurrent, l::Flux.Recur) = weights(lt, l.cell)
bias(lt::FluxRecurrent, l::Flux.Recur) = bias(lt, l.cell)
weights(::FluxRecurrent, cell) = cell.Wi
bias(::FluxRecurrent, cell) = cell.b
hiddenweights(l) = hiddenweights(layertype(l), l)
hiddenweights(lt::FluxRecurrent, l::Flux.Recur) = hiddenweights(lt, l.cell)
hiddenweights(::FluxRecurrent, cell) = cell.Wh
hiddenstate(l) = hiddenstate(layertype(l), l)
hiddenstate(lt::FluxRecurrent, l::Flux.Recur) = hiddenstate(lt, l.cell)
hiddenstate(::FluxRecurrent, cell) = cell.state0
hiddenstate(::FluxLstm, cell::Flux.LSTMCell) = [h for h in cell.state0]
state(l) = state(layertype(l), l)
state(::FluxRecurrent, l) = l.state
state(::FluxLstm, l) = [h for h in l.state]
ngroups(l) = ngroups(layertype(l), l)
ngroups(lt, l) = 1
ngroups(lt::FluxConvolutional, l) = ngroups(lt)
ngroups(::FluxConvolutional) = 1
ngroups(lt::FluxConv) = lt.groups
ngroups(lt::FluxConvTranspose) = lt.groups
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 9305 |
"""
InputShapeVertex <: AbstractVertex
InputShapeVertex(v::AbstractVertex, t::FluxLayer)
Input type vertex which also has information about what type of layer the input is shaped for.
"""
struct InputShapeVertex{V<:AbstractVertex, L<:FluxLayer} <: AbstractVertex
base::V
t::L
end
@layer :expand InputShapeVertex
const inputshapemotivation = """
Providing the input type is not strictly necessary for the package to work and in many cases a normal `inputvertex`
will do.
One example of when it is useful is the [`concat`](@ref) function which needs to know the input type to
automatically determine which dimension to concatenate.
"""
"""
inputvertex(name, size, type::FluxLayer)
Return an immutable input type vertex with the given `name` and `size` and a `type` which can be used to
indicate what type of input is expected.
$inputshapemotivation
"""
NaiveNASlib.inputvertex(name, size, type::FluxLayer) = InputShapeVertex(inputvertex(name, size), type)
"""
convinputvertex(name, nchannel, ndim)
Return an input type vertex with the given `name` which promises convolution shaped input
with `nchannel` channels and `ndim` number of dimensions for feature maps (e.g. 2 for images)
suitable for `Flux`s convolution layers.
$inputshapemotivation
"""
convinputvertex(name, nchannel, ndim) = inputvertex(name, nchannel, GenericFluxConvolutional{ndim}())
doc_convninputvertex(n) = """
conv$(n)dinputvertex(name, nchannel)
Return an input type vertex with the given `name` which promises convolution shaped input
with `nchannel` channels suitable for `Flux`s convolution layers.
Equivalent to [`convinputvertex(name, nchannel, $(n))`](@ref).
$inputshapemotivation
"""
@doc doc_convninputvertex(1)
conv1dinputvertex(name, nchannel) = convinputvertex(name, nchannel, 1)
@doc doc_convninputvertex(2)
conv2dinputvertex(name, nchannel) = convinputvertex(name, nchannel, 2)
@doc doc_convninputvertex(3)
conv3dinputvertex(name, nchannel) = convinputvertex(name, nchannel, 3)
"""
denseinputvertex(name, size)
Return an input type vertex with the given `name` which promises 2D shaped input
with `size` number of features suitable for e.g. `Flux`s `Dense` layer.
$inputshapemotivation
"""
denseinputvertex(name, size) = inputvertex(name, size, GenericFlux2D())
"""
rnninputvertex(name, size)
Return an input type vertex with the given `name` which promises 2D shaped input
with `size` number of features suitable for `Flux`s recurrent layers.
$inputshapemotivation
"""
rnninputvertex(name, size) = inputvertex(name, size, GenericFluxRecurrent())
layertype(v::InputShapeVertex) = v.t
layer(v::InputShapeVertex) = LayerTypeWrapper(v.t) # so that layertype(layer(v)) works
NaiveNASlib.base(v::InputShapeVertex) = v.base
NaiveNASlib.name(v::InputShapeVertex) = name(base(v))
NaiveNASlib.nout(v::InputShapeVertex) = nout(base(v))
NaiveNASlib.nin(v::InputShapeVertex) = nin(base(v))
NaiveNASlib.outputs(v::InputShapeVertex) = outputs(base(v))
NaiveNASlib.inputs(::InputShapeVertex) = []
# Only to prevent stack overflow above
struct LayerTypeWrapper{L}
t::L
end
layertype(l::LayerTypeWrapper) = l.t
Base.show(io::IO, l::LayerTypeWrapper) = show(io, l.t)
"""
SizeNinNoutConnected <: NaiveNASlib.DecoratingTrait
SizeNinNoutConnected(t)
Trait for computations for which a change in output size results in a change in input size but which
is not fully `SizeTransparent`.
Example of this is grouped convolutions where output size must be an integer multiple of the input size.
Does not create any constraints or objectives, only signals that vertices after a
`SizeNinNoutConnected` might need to change size if the size of the `SizeNinNoutConnected` vertex changes.
"""
struct SizeNinNoutConnected{T <: NaiveNASlib.MutationTrait} <: NaiveNASlib.DecoratingTrait
base::T
end
NaiveNASlib.base(t::SizeNinNoutConnected) = t.base
NaiveNASlib.all_in_Δsize_graph(mode, ::SizeNinNoutConnected, args...) = NaiveNASlib.all_in_Δsize_graph(mode, SizeInvariant(), args...)
const doc_layerfun_and_traitfun = """
Keyword argument `layerfun` can be used to wrap the computation, e.g. in an [`ActivationContribution`](@ref).
Keyword argument `traitfun` can be used to wrap the `MutationTrait` of the vertex in a `DecoratingTrait`
"""
"""
fluxvertex(l, in::AbstractVertex; layerfun=LazyMutable, traitfun=validated())
Return a vertex which wraps the layer `l` and has input vertex `in`.
$doc_layerfun_and_traitfun
"""
fluxvertex(l, in::AbstractVertex; layerfun=LazyMutable, traitfun=validated()) = fluxvertex(layertype(l), l, in, layerfun, traitfun)
"""
fluxvertex(name::AbstractString, l, in::AbstractVertex; layerfun=LazyMutable, traitfun=validated())
Return a vertex with name `name` which wraps the layer `l` and has input vertex `in`.
Name is only used when displaying or logging and does not have to be unique (although it probably is a good idea).
$doc_layerfun_and_traitfun
"""
fluxvertex(name::AbstractString, l, in::AbstractVertex; layerfun=LazyMutable, traitfun=validated()) = fluxvertex(layertype(l), l, in, layerfun, traitfun ∘ named(name))
fluxvertex(::FluxParLayer, l, in::AbstractVertex, layerfun, traitfun) = absorbvertex(layerfun(MutableLayer(l)), in, traitdecoration = traitfun)
fluxvertex(::FluxConvolutional, l, in::AbstractVertex, layerfun, traitfun) = absorbvertex(layerfun(MutableLayer(l)), in; traitdecoration= ngroups(l) == 1 ? traitfun : traitfun ∘ SizeNinNoutConnected)
fluxvertex(::FluxParInvLayer, l, in::AbstractVertex, layerfun, traitfun) = invariantvertex(layerfun(MutableLayer(l)), in, traitdecoration=traitfun ∘ FixedSizeTrait)
fluxvertex(::FluxNoParLayer, l, in::AbstractVertex, layerfun, traitfun) = invariantvertex(layerfun(NoParams(l)), in, traitdecoration=traitfun)
"""
concat(v::AbstractVertex, vs::AbstractVertex...; traitfun=identity, layerfun=identity)
Return a vertex which concatenates input along the activation (e.g. channel if convolution, first dimension if dense) dimension.
Inputs must have compatible activation shapes or an exception will be thrown.
$doc_layerfun_and_traitfun
See also `NaiveNASlib.conc`.
"""
function concat(v::AbstractVertex, vs::AbstractVertex...; traitfun=identity, layerfun=identity)
allactdims = unique(mapreduce(actdim, vcat, [v, vs...]))
if length(allactdims) != 1
throw(DimensionMismatch("Can not concatenate activations with different shapes! Got: $(join(allactdims, ", ", " and "))"))
end
allactranks = unique(mapreduce(actrank, vcat, [v, vs...]))
if length(allactranks) != 1
throw(DimensionMismatch("Can not concatenate activations with different shapes! Got: $(join(allactranks, ", ", " and "))"))
end
conc(v, vs...; dims=allactdims[], traitdecoration=traitfun, outwrap=layerfun)
end
"""
concat(name::AbstractString, v::AbstractVertex, vs::AbstractVertex...; traitfun=identity, layerfun=identity)
Return a vertex with name `name` which concatenates input along the activation (e.g. channel if convolution, first dimension if dense) dimension.
Name is only used when displaying or logging and does not have to be unique (although it probably is a good idea).
Inputs must have compatible activation shapes or an exception will be thrown.
$doc_layerfun_and_traitfun
See also `NaiveNASlib.conc`.
"""
concat(name::AbstractString, v::AbstractVertex, vs::AbstractVertex...; traitfun = identity, layerfun=identity) = concat(v, vs..., traitfun=traitfun ∘ named(name), layerfun=layerfun)
"""
layer(v)
Return the computation wrapped inside `v` and inside any mutable wrappers.
# Examples
```jldoctest
julia> using NaiveNASflux, Flux
julia> layer(fluxvertex(Dense(2,3), inputvertex("in", 2)))
Dense(2 => 3) # 9 parameters
```
"""
layer(v::AbstractVertex) = layer(base(v))
layer(v::CompVertex) = layer(v.computation)
layer(::InputVertex) = nothing
layertype(v::AbstractVertex) = layertype(base(v))
layertype(v::CompVertex) = layertype(v.computation)
layertype(::InputVertex) = nothing
actdim(v::AbstractVertex) = actdim.(layer.(NaiveNASlib.findterminating(v, inputs)))
actrank(v::AbstractVertex) = actrank.(layer.(NaiveNASlib.findterminating(v, inputs)))
mutate_weights(v::AbstractVertex, w) = mutate_weights(base(v), w)
mutate_weights(v::CompVertex, w) = mutate_weights(v.computation, w)
"""
setlayer!(x, propval)
Set the properties `propval` to the layer wrapped in `x` where `propval` is a named tuple with fieldname->value pairs.
This typically means create a new layer with the given values and set the wrapped layer to it.
### Examples
```julia-repl
julia> v = fluxvertex(Dense(3, 4, relu), inputvertex("in", 3));
julia> layer(v)
Dense(3 => 4, relu) # 16 parameters
julia> NaiveNASflux.setlayer!(v, (;σ=tanh));
julia> layer(v)
Dense(3 => 4, tanh) # 16 parameters
```
"""
function setlayer!(x, propval) end
setlayer!(v::AbstractVertex, propval) = setlayer!(base(v), propval)
setlayer!(v::CompVertex, propval) = setlayer!(v.computation, propval)
setlayer!(m::AbstractMutableComp, propval) = setlayer!(wrapped(m), propval)
setlayer!(m::ResetLazyMutable, propval) = setlayer!(m.wrapped, propval)
setlayer!(m::MutationTriggered, propval) = setlayer!(m.wrapped, propval)
function setlayer!(m::MutableLayer, propval)
m.layer = setproperties(m.layer, propval)
end | NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 5621 | @testset "ChainRules" begin
@testset "nograd" begin
import NaiveNASflux.nograd
@test Flux.gradient(+, 1,2) == (1,1)
@test Flux.gradient(1,2) do x,y
nograd() do
x+y
end
end == (nothing,nothing)
end
import Optimisers
teststructs(g::CompGraph{<:Any, <:Tuple}, res, exp; seen=Base.IdSet()) = foreach(enumerate(outputs(g))) do (i, vo)
teststructs(vo, seen, res.outputs[i] ,exp)
end
teststructs(g::CompGraph{<:Any, <:AbstractVertex}, res, exp; seen=Base.IdSet()) = teststructs(g.outputs, seen, res.outputs ,exp)
function teststructs(v::AbstractVertex, seen, res, exp)
v in seen && return
push!(seen, v)
_teststructs(v, seen, res, exp, Symbol(name(v)))
end
function _teststructs(::NaiveNASflux.InputShapeVertex, seen, res, exp, name) end
function _teststructs(v::AbstractVertex, seen, res::RT, exp, name) where RT
if layertype(v) isa NaiveNASflux.FluxParLayer
@testset "Check structure for $(name) of type $(typeof(v))" begin
@test hasfield(RT, :base)
end
if hasfield(RT, :base)
_teststructs(base(v), seen, res.base, exp, name)
end
end
end
function _teststructs(v::CompVertex, seen, res::RT, exp, name) where RT
@testset "Check structure for $(name) of type $(typeof(v))" begin
@test hasfield(RT, :computation)
_teststructs(v.computation, res.computation, exp, name)
end
foreach(enumerate(inputs(v))) do (i, vi)
isnothing(res.inputs) && return
teststructs(vi, seen, res.inputs[i], exp)
end
end
function _teststructs(m::LazyMutable, res::RT, exp, name) where RT
@testset "Check structure for $(name) of type $(typeof(m))" begin
@test hasfield(RT, :mutable)
end
_teststructs(m.mutable, res.mutable, exp, name)
end
function _teststructs(m::ActivationContribution, res::RT, exp, name) where RT
@testset "Check structure for $(name) of type $(typeof(m))" begin
@test hasfield(RT, :layer)
end
_teststructs(m.layer, res.layer, exp, name)
end
function _teststructs(m::NaiveNASflux.MutableLayer, res::RT, exp, name) where RT
@testset "Check structure for $(name)" begin
@test hasfield(RT, :layer)
explayer = getindex(exp, name)
reslayer = res.layer
_teststructs(reslayer, explayer)
end
end
_teststructs(res, exp) = @test res == exp
_teststructs(res::Dense, exp::Dense) = _teststructs(Optimisers.trainable(res), Optimisers.trainable(exp))
function _teststructs(res::T, exp::T) where T <: Optimisers.Leaf
_teststructs(res.rule, exp.rule)
_teststructs(res.state, exp.state)
end
function _teststructs(nt1::T, nt2::T) where T <: NamedTuple
@testset "Check param $p" for p in keys(nt1)
_teststructs(getfield(nt1, p), getfield(nt2, p))
end
end
@testset "Model gradient $(lfun == identity ? "" : " with layerfun=$lfun")" for lfun in (
identity,
LazyMutable,
ActivationContribution,
LazyMutable ∘ ActivationContribution
)
chain = Chain(v1 = Dense(2,3), v2 = Dense(3, 4))
indata = reshape(collect(Float32, 1:6),2,3)
iv = denseinputvertex("in", 2)
v1 = fluxvertex("v1", chain[1], iv; layerfun=lfun)
v2 = fluxvertex("v2", chain[2], v1; layerfun=lfun)
graph = CompGraph(iv, v2)
ps = Flux.params(chain)
exp = Flux.gradient(() -> sum(chain(indata)), ps)
res = Flux.gradient(() -> sum(graph(indata)), ps)
for p in ps
@test exp[p] == res[p]
end
@test gradient(sum ∘ graph, indata) == gradient(sum ∘ chain, indata)
expex = Flux.gradient(c -> sum(c(indata)), chain)
resex = Flux.gradient(g -> sum(g(indata)), graph)
teststructs(graph, resex..., expex[1].layers)
@testset "Optimisers" begin
graphstate = Optimisers.setup(Optimisers.Adam(), graph)
chainstate = Optimisers.setup(Optimisers.Adam(), chain)
@testset "Setup state" begin
teststructs(graph, graphstate, chainstate.layers)
end
# TODO: Why deepcopy needed? fmap(copy, graph) does not seem to work?
graphstate, newgraph = Optimisers.update(graphstate, deepcopy(graph), resex...)
chainstate, newchain = Optimisers.update(chainstate, chain, expex...)
@testset "New state" begin
teststructs(newgraph, graphstate, chainstate.layers)
end
@testset "New model" begin
teststructs(newgraph, Optimisers.trainable(newgraph), Optimisers.trainable(newchain).layers)
end
end
@test Δnout!(v1, 1)
resinc = Flux.gradient(() -> sum(graph(indata)), Flux.params(graph))
for (p1, p2) in zip(Flux.params(chain[1]).order, Flux.params(v1).order)
if ndims(p1) == 1
@test exp[p1] == resinc[p2][1:end-1]
else
@test exp[p1] == resinc[p2][1:end-1,:]
end
end
for (p1, p2) in zip(Flux.params(chain[2]).order, Flux.params(v2).order)
if ndims(p1) == 1
@test exp[p1] == resinc[p2]
else
@test exp[p1] == resinc[p2][:, 1:end-1]
end
end
end
end | NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 139 |
@testset "Examples" begin
import Markdown: @md_str
include("examples/quicktutorial.jl")
include("examples/xorpruning.jl")
end
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 21766 | @testset "Mutable computation" begin
using NaiveNASflux: AbstractMutableComp, MutableLayer, LazyMutable, weights, bias, select, mutate, hiddenweights, hiddenstate, state, outscale
using Functors: fmap
inszero = pairs((insert = (lt, pn) -> (args...) -> 0,))
_nins(m) = [1:nin(m)[]]
@testset "Dense MutableLayer" begin
m = MutableLayer(Dense(2,3))
@test nin(m) == [2]
@test nout(m) == nout(m.layer) == 3
@test m(Float32[1.0, 2.0]) == m.layer(Float32[1.0, 2.0])
m.layer = Dense(3,4)
bias(m.layer)[1:end] = 1:4
@test nin(m) == [3]
@test nout(m) == nout(m.layer) == 4
@test m(Float32[1.0, 2.0, 3.0]) == m.layer(Float32[1.0, 2.0, 3.0])
@testset "Select inputs" begin
inds = [1,3]
Wexp, bexp = weights(m.layer)[:, inds], bias(m.layer)
NaiveNASlib.Δsize!(m, [inds], 1:nout(m))
assertlayer(m.layer, Wexp, bexp)
end
@testset "Select outputs" begin
inds = [1,2,4]
Wexp, bexp = weights(m.layer)[inds, :], bias(m.layer)[inds]
NaiveNASlib.Δsize!(m, _nins(m), inds)
assertlayer(m.layer, Wexp, bexp)
end
@testset "Insert inputs" begin
inds = [1,-1, 2]
Wexp, bexp = hcat(weights(m.layer)[:, 1], zeros(Float32, 3), weights(m.layer)[:, 2]), bias(m.layer)
NaiveNASlib.Δsize!(m, [inds], 1:nout(m); inszero...)
assertlayer(m.layer, Wexp, bexp)
end
@testset "Insert outputs" begin
inds = [-1, 1, -1, 3, -1]
Wexp = permutedims(hcat(zeros(Float32, 3), weights(m.layer)[1, :], zeros(Float32, 3), weights(m.layer)[3, :], zeros(Float32,3)))
bexp = Float32[0, bias(m.layer)[1], 0, bias(m.layer)[3], 0]
NaiveNASlib.Δsize!(m, _nins(m), inds; inszero...)
assertlayer(m.layer, Wexp, bexp)
end
@testset "No bias" begin
m = MutableLayer(Dense(rand(3,2), false))
@test bias(layer(m)) == false
@test nin(m) == [2]
@test nout(m) == 3
inds = [2,3]
Wexp = weights(layer(m))[inds, :]
NaiveNASlib.Δsize!(m,_nins(m), inds)
assertlayer(layer(m), Wexp, false)
end
end
@testset "Convolutional layers" begin
@testset "Conv MutableLayer" begin
m = MutableLayer(Conv((2,3),(4=>5)))
@test nin(m) == [4]
@test nout(m) == nout(m.layer) == 5
input = reshape(collect(Float32, 1:3*4*4), 3, 4, 4, 1)
@test m(input) == m.layer(input)
@testset "Select inputs" begin
inds = [1,3]
Wexp, bexp = weights(m.layer)[:,:,inds,:], bias(m.layer)
NaiveNASlib.Δsize!(m, [inds], 1:nout(m))
assertlayer(m.layer, Wexp, bexp)
end
@testset "Select outputs" begin
inds = [1,2,4]
Wexp, bexp = weights(m.layer)[:,:,:,inds], bias(m.layer)[inds]
NaiveNASlib.Δsize!(m, _nins(m), inds)
assertlayer(m.layer, Wexp, bexp)
end
@testset "Insert inputs" begin
inds = [1,-1, 2]
wsize = deleteat!(collect(size(weights(m.layer))), 3)
# Nothing beats working in four dimensions...
# Stack 3D arrays in a 4:th dimension and then swap dim 3 and 4
Wexp = permutedims(cat(
weights(m.layer)[:,:,1,:],
zeros(Float32, wsize...),
weights(m.layer)[:,:,2,:], dims=4), [1,2,4,3])
bexp = bias(m.layer)
NaiveNASlib.Δsize!(m, [inds], 1:nout(m); inszero...)
assertlayer(m.layer, Wexp, bexp)
end
@testset "Insert outputs" begin
inds = [-1, 1, -1, 3, -1]
wsize = deleteat!(collect(size(weights(m.layer))), 4)
Wexp = cat(
zeros(Float32, wsize...),
weights(m.layer)[:,:,:,1],
zeros(Float32, wsize...),
weights(m.layer)[:,:,:,3],
zeros(Float32,wsize...), dims=4)
bexp = Float32[0, bias(m.layer)[1], 0, bias(m.layer)[3], 0]
NaiveNASlib.Δsize!(m, _nins(m), inds; inszero...)
assertlayer(m.layer, Wexp, bexp)
end
@testset "No bias" begin
m = MutableLayer(Conv(Flux.convfilter((2,3), 4=>5), false))
@test bias(layer(m)) == false
@test nin(m) == [4]
@test nout(m) == 5
inds = [2,3]
Wexp = weights(layer(m))[:,:,:,inds]
NaiveNASlib.Δsize!(m, _nins(m), inds)
assertlayer(layer(m), Wexp, false)
end
end
@testset "ConvTranspose MutableLayer" begin
m = MutableLayer(ConvTranspose((2,3),(4=>5)))
@test nin(m) == [4]
@test nout(m) == nout(m.layer) == 5
input = reshape(collect(Float32, 1:3*4*4), 3, 4, 4, 1)
@test m(input) == m.layer(input)
inputs = [1,3]
outputs = [1,2,4]
Wexp, bexp = weights(m.layer)[:,:,outputs,inputs], bias(m.layer)[outputs]
NaiveNASlib.Δsize!(m, [inputs], outputs)
assertlayer(m.layer, Wexp, bexp)
end
@testset "DepthwiseConv MutableLayer" begin
m = MutableLayer(DepthwiseConv((2,2),(3=>6*3)))
@test nin(m) == [3]
@test nout(m) == nout(m.layer) == 18
input = reshape(collect(Float32, 1:3*3*3), 3, 3, 3, 1)
@test m(input) == m.layer(input)
@testset "Select params" begin
wins = [1, 3]
wouts = [1, 2, 5, 6]
outputs = mapreduce(i -> wouts .+ (i-1) .* 6, vcat, wins)
Wexp = reshape(reshape(weights(m.layer), 2, 2, 6, 3)[:,:,wouts,wins], 2, 2, 1, :)
bexp = bias(m.layer)[outputs]
NaiveNASlib.Δsize!(m, [wins], outputs)
assertlayer(m.layer, Wexp, bexp)
@test size(m(ones(Float32, 3,3,2,2)))[3:4] == (8, 2)
end
@testset "Insert params" begin
inputs = [1, 2, -1]
outputs = [1, 2, -1, -1, -1, -1, 3, 4, -1, -1, -1, -1]
NaiveNASlib.Δsize!(m, [inputs], outputs)
@test nin(m) == [3]
@test nout(m) == 12
@test size(m(ones(Float32, 3,3,3,2)))[3:4] == (12, 2)
end
end
@testset "CrossCor MutableLayer" begin
m = MutableLayer(CrossCor((2,3),(4=>5)))
@test nin(m) == [4]
@test nout(m) == nout(m.layer) == 5
input = reshape(collect(Float32, 1:3*4*4), 3, 4, 4, 1)
@test m(input) == m.layer(input)
inputs = [1,3]
outputs = [1,2,4]
Wexp, bexp = weights(m.layer)[:,:,inputs, outputs], bias(m.layer)[outputs]
NaiveNASlib.Δsize!(m, [inputs], outputs)
assertlayer(m.layer, Wexp, bexp)
end
end
@testset "Scale MutableLayer" begin
m = MutableLayer(Flux.Scale(4))
@test nin(m) == [nout(m)] == [4]
weights(m.layer)[1:end] = 1:4
bias(m.layer)[1:end] = 1:4
@test m(Float32[1,2,3,4]) == m.layer(Float32[1,2,3,4])
@testset "Select params" begin
inds = [1,3]
Wexp, bexp = weights(m.layer)[inds], bias(m.layer)[inds]
NaiveNASlib.Δsize!(m, [inds], inds)
assertlayer(m.layer, Wexp, bexp)
end
@testset "Insert params" begin
inds = [-1, 2, -1]
Wexp, bexp = Float32[0, weights(m.layer)[2], 0], Float32[0, bias(m.layer)[2], 0]
NaiveNASlib.Δsize!(m, [inds], inds; inszero...)
assertlayer(m.layer, Wexp, bexp)
end
end
@testset "Normalization layers" begin
@testset "LayerNorm MutableLayer" begin
m = MutableLayer(LayerNorm(3; affine=true))
@test nin(m) == [nout(m)] == [3]
weights(m.layer.diag)[1:end] = 1:3
bias(m.layer.diag)[1:end] = 1:3
@test m(Float32[1,2,3]) == m.layer(Float32[1,2,3])
@testset "Select params" begin
inds = [1,3]
Wexp, bexp = weights(m.layer.diag)[inds], bias(m.layer.diag)[inds]
NaiveNASlib.Δsize!(m, [inds], inds)
@test typeof(layer(m)) <: LayerNorm
assertlayer(m.layer.diag, Wexp, bexp)
@test nin(m) == [nout(m)] == [2]
end
@testset "Insert params" begin
inds = [-1, 2, -1]
Wexp, bexp = Float32[0, weights(m.layer.diag)[2], 0], Float32[0, bias(m.layer.diag)[2], 0]
NaiveNASlib.Δsize!(m, [inds], inds; inszero...)
@test typeof(layer(m)) <: LayerNorm
assertlayer(m.layer.diag, Wexp, bexp)
@test nin(m) == [nout(m)] == [3]
end
end
function assertnorm(l, meanexp, varexp)
if l == BatchNorm
@test vec(l.μ) == meanexp
@test vec(l.σ²) == varexp
end
@test l.β == meanexp
@test l.γ == varexp
end
setpar(x) = x
setpar(x::AbstractArray) = reshape(collect(Float32, 1:length(x)), size(x))
@testset "$lab MutableLayer" for (l, lab) in (
(BatchNorm, BatchNorm),
(InstanceNorm, InstanceNorm),
((n;kw...) -> GroupNorm(n,n; kw...), GroupNorm))
track_stats = l == BatchNorm ? (;track_stats = true) : ()
m = MutableLayer(l(5; affine=true, track_stats...))
l_orig = layer(m)
@test nin(m) == [nout(m)] == [5]
m.layer = Flux.fmap(setpar, layer(m))
@testset "Select params" begin
inds = [1,3,4]
expall = Float32.(inds)
NaiveNASlib.Δsize!(m, [inds], inds)
assertnorm(m.layer, inds, inds)
@test nin(m) == [nout(m)] == [3]
end
@testset "Insert params" begin
NaiveNASlib.Δsize!(m, [[-1, 2, -1]], [-1, 2, -1])
assertnorm(m.layer, [0, 3, 0], [1, 3, 1])
@test nin(m) == [nout(m)] == [3]
end
end
@testset "GroupNorm MutableLayer with groups" begin
@testset "Groups of 2" begin
m = MutableLayer(GroupNorm(6,3; affine=true))
m.layer = Flux.fmap(setpar, layer(m))
inds = [1,2,5,6]
NaiveNASlib.Δsize!(m, [inds], inds)
@test layer(m).β == inds
@test layer(m).γ == inds
end
@testset "Group size 8 to 9" begin
# Now when dimensions don't add up: size 8 becomes size 9
m = MutableLayer(GroupNorm(8,4; affine=true))
inds = [1,3,-1,-1,4,-1,7,-1,8]
NaiveNASlib.Δsize!(m, [inds], inds)
# Current alg for selecting which group to pick in this case is poor, don't wanna test it :)
@test length(layer(m).β) == 9
@test length(layer(m).γ) == 9
end
end
end
@testset "Recurrent layers" begin
function assertrecurrent(l, Wiexp, Whexp, bexp, hexp, sexp)
assertlayer(l, Wiexp, bexp)
@test hiddenweights(l) == Whexp
@test hiddenstate(l) == hexp
@test state(l) == sexp
end
function setparsrnn(l)
bias(l)[1:end] = collect(Float32, 1:nout(l)*outscale(l))
hiddenstate(l)[1:end] = collect(Float32, 1:nout(l))
state(l)[1:end] = collect(Float32, 1:nout(l))
end
function setparslstm(l)
bias(l)[1:end] = collect(Float32, 1:nout(l)*outscale(l))
foreach(h -> h[1:end] = collect(Float32, 1:nout(l)), hiddenstate(l))
foreach(h -> h[1:end] = collect(Float32, 1:nout(l)), state(l))
end
@testset "RNN MutableLayer" begin
m = MutableLayer(RNN(3, 4))
setparsrnn(layer(m))
@test nin(m) == [3]
@test nout(m) == nout(m.layer) == 4
@testset "Select inputs" begin
inds = [1, 3]
Wiexp = weights(layer(m))[:, inds]
Whexp = copy(hiddenweights(layer(m)))
bexp = copy(bias(layer(m)))
hexp = copy(hiddenstate(layer(m)))
sexp = copy(state(layer(m)))
NaiveNASlib.Δsize!(m, [inds], 1:nout(m))
assertrecurrent(layer(m), Wiexp, Whexp, bexp, hexp, sexp)
end
@testset "Insert outputs" begin
inds = [1,-1, 2]
Wiexp = permutedims(hcat(weights(layer(m))[1, :], zeros(Float32, 2), weights(layer(m))[2, :]))
wh = hiddenweights(layer(m))
Whexp = [wh[1, 1] 0 wh[1, 2]; zeros(Float32, 1, 3); wh[2, 1] 0 wh[2, 2]]
bexp = Float32[bias(layer(m))[1], 0, bias(layer(m))[2]]
hexp = Float32[hiddenstate(layer(m))[1], 0, hiddenstate(layer(m))[2]] |> hcat
sexp = Float32[state(layer(m))[1], 0, state(layer(m))[2]] |> hcat
NaiveNASlib.Δsize!(m, _nins(m), inds; inszero...)
assertrecurrent(layer(m), Wiexp, Whexp, bexp, hexp, sexp)
end
#Sanity check that the layer still seems to work after mutation
output = m(reshape(collect(Float32, 1:2*10), 2,10))
@test size(output) == (3, 10)
@test isnan.(output) == falses(size(output))
end
@testset "LSTM MutableLayer" begin
m = MutableLayer(LSTM(3, 4))
setparslstm(layer(m))
@test nin(m) == [3]
@test nout(m) == nout(m.layer) == 4
@testset "Select inputs" begin
inds = [1, 3]
Wiexp = weights(layer(m))[:, inds]
Whexp = copy(hiddenweights(layer(m)))
bexp = copy(bias(layer(m)))
hexp = copy(hiddenstate(layer(m)))
sexp = copy(state(layer(m)))
NaiveNASlib.Δsize!(m, [inds], 1:nout(m))
assertrecurrent(layer(m), Wiexp, Whexp, bexp, hexp, sexp)
end
@testset "Insert outputs" begin
inds = [1,-1, 2]
wi = weights(layer(m))
scalerange = (0:outscale(layer(m))-1) .* nout(layer(m))
Wiexp = permutedims(mapfoldl(offs -> hcat(wi[1+offs, :], zeros(Float32, 2), wi[2+offs, :]), hcat, scalerange))
wh = hiddenweights(layer(m))
Whexp = mapfoldl(offs -> [wh[1+offs, 1] 0 wh[1+offs, 2]; zeros(Float32, 1, 3); wh[2+offs, 1] 0 wh[2+offs, 2]], vcat, scalerange)
bexp = mapfoldl(offs -> Float32[bias(layer(m))[1+offs], 0, bias(layer(m))[2+offs]], vcat, scalerange)
hexp = map(hs -> Float32[hs[1], 0, hs[2]] |> hcat, hiddenstate(layer(m)))
sexp = map(hs -> Float32[hs[1], 0, hs[2]] |> hcat, state(layer(m)))
NaiveNASlib.Δsize!(m, _nins(m), inds; inszero...)
assertrecurrent(layer(m), Wiexp, Whexp, bexp, hexp, sexp)
end
#Sanity check that the layer still seems to work after mutation
output = m(reshape(collect(Float32, 1:2*10), 2,10))
@test size(output) == (3, 10)
@test isnan.(output) == falses(size(output))
end
@testset "GRU MutableLayer" begin
m = MutableLayer(GRU(3, 4))
setparsrnn(layer(m))
@test nin(m) == [3]
@test nout(m) == nout(m.layer) == 4
@testset "Select inputs" begin
inds = [1, 3]
Wiexp = weights(layer(m))[:, inds]
Whexp = copy(hiddenweights(layer(m)))
bexp = copy(bias(layer(m)))
hexp = copy(hiddenstate(layer(m)))
sexp = copy(state(layer(m)))
NaiveNASlib.Δsize!(m, [inds], 1:nout(m))
assertrecurrent(layer(m), Wiexp, Whexp, bexp, hexp, sexp)
end
@testset "Insert outputs" begin
inds = [1,-1, 2]
wi = weights(layer(m))
scalerange = (0:outscale(layer(m))-1) .* nout(layer(m))
Wiexp = permutedims(mapfoldl(offs -> hcat(wi[1+offs, :], zeros(Float32, 2), wi[2+offs, :]), hcat, scalerange))
wh = hiddenweights(layer(m))
Whexp = mapfoldl(offs -> [wh[1+offs, 1] 0 wh[1+offs, 2]; zeros(Float32, 1, 3); wh[2+offs, 1] 0 wh[2+offs, 2]], vcat, scalerange)
bexp = mapfoldl(offs -> Float32[bias(layer(m))[1+offs], 0, bias(layer(m))[2+offs]], vcat, scalerange)
hexp = Float32[hiddenstate(layer(m))[1], 0, hiddenstate(layer(m))[2]] |> hcat
sexp = Float32[state(layer(m))[1], 0, state(layer(m))[2]] |> hcat
NaiveNASlib.Δsize!(m, _nins(m), inds; inszero...)
assertrecurrent(layer(m), Wiexp, Whexp, bexp, hexp, sexp)
end
#Sanity check that the layer still seems to work after mutation
output = m(reshape(collect(Float32, 1:2*10), 2,10))
@test size(output) == (3, 10)
@test isnan.(output) == falses(size(output))
end
end
@testset "Copy MutableLayer with $label" for (label, cfun) in (
(deepcopy, deepcopy),
("fmap", g -> fmap(deepcopy, g))
)
m = MutableLayer(Dense(2,3))
cloned = cfun(m)
@test layer(cloned) !== layer(m)
@test cloned([1, 2]) == m([1, 2])
end
@testset "LazyMutable" begin
@testset "LazyMutable with Dense MutableLayer" begin
import NaiveNASflux: layertype
m = MutableLayer(Dense(3,4))
mlazy = LazyMutable(m)
Wexp = weights(layer(m))
bexp = bias(layer(m))
NaiveNASlib.Δsize!(mlazy, [[1, 3]], 1:nout(m))
assertlayer(layer(m), Wexp, bexp)
NaiveNASlib.Δsize!(mlazy, _nins(mlazy), [2, 4, -1]; inszero...)
assertlayer(layer(m), Wexp, bexp)
@test layer(m) == layer(mlazy)
@test layertype(m) == layertype(mlazy)
expected = mlazy(Float32[2,3])
@test nin(mlazy) == nin(m) == [2]
@test nout(mlazy) == nout(m) == 3
@test expected == m(Float32[2,3])
end
@testset "LazyMutable DepthwiseConv" begin
m = LazyMutable(MutableLayer(DepthwiseConv((2,2),(3=>6*3))))
@test nin(m) == [3]
@test nout(m) == 18
input = reshape(collect(Float32, 1:3*3*3), 3, 3, 3, 1)
@test m(input) == layer(m)(input)
wins = [1, 3]
wouts = [1, 2, 5, 6]
outs = mapreduce(i -> wouts .+ (i-1) .* 6, vcat, wins)
Wexp = reshape(reshape(weights(layer(m)), 2, 2, 6, 3)[:,:,wouts,wins], 2, 2, 1, :)
bexp = bias(layer(m))[outs]
NaiveNASlib.Δsize!(m, [wins], outs)
@test size(m(ones(Float32, 3,3,2,2)))[3:4] == (8, 2)
assertlayer(layer(m), Wexp, bexp)
end
@testset "LazyMutable reselect" begin
m = LazyMutable(MutableLayer(Dense(5,5)))
NaiveNASlib.Δsize!(m, [[-1, 1, 3, -1, 4]], 1:nout(m))
@test m.inputs == [[-1, 1, 3, -1, 4]]
NaiveNASlib.Δsize!(m, [[1,2,4,5]], 1:nout(m))
@test m.inputs == [[-1, 1,-1, 4]]
NaiveNASlib.Δsize!(m, _nins(m), [2, -1, 3, -1, 4])
@test m.outputs == [2, -1, 3, -1, 4]
@test m.inputs == [[-1, 1,-1, 4]]
NaiveNASlib.Δsize!(m, _nins(m), [1, 2, 5,-1])
@test m.outputs == [2, -1, 4, -1]
@test m.inputs == [[-1, 1,-1, 4]]
@test m(Float32[1,3,5,7]) == layer(m)(Float32[1,3,5,7])
end
@testset "Copy LazyMutable with $label" for (label, cfun) in (
(deepcopy, deepcopy),
("fmap", g -> fmap(deepcopy, g))
)
mlazy = LazyMutable(MutableLayer(Dense(2,3)))
cloned = cfun(mlazy)
@test layer(cloned) !== layer(mlazy)
@test cloned([1, 2]) == mlazy([1, 2])
end
@testset "Functor" begin
m = LazyMutable(MutableLayer(Dense(2,3)))
visitfun(x) = x
visitdense = false
function visitfun(l::AbstractArray{Float32})
visitdense = true
return l
end
NaiveNASlib.Δsize!(m, [[-1, -1, 1, 2]], 1:nout(m))
@test size(weights(layer(m))) == (3,2)
Flux.fmap(visitfun, m)
@test visitdense
@test size(weights(layer(m))) == (3,4)
end
@testset "Force mutation" begin
import NaiveNASflux: FluxDense
invertex = inputvertex("in", 3, FluxDense())
hlayer = fluxvertex("hlayer", Dense(3,4), invertex)
outlayer = fluxvertex("outlayer", Dense(4, 2), hlayer)
graph = CompGraph(invertex, outlayer)
Δnout!(hlayer, 2)
@test nout(layer(hlayer)) == 4
@test nin(layer(outlayer)) == [4]
NaiveNASflux.forcemutation(graph)
@test nout(layer(hlayer)) == 6
@test nin(layer(outlayer)) == [6]
end
end
end
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 15728 |
@testset "Neuron utility tests" begin
import NaiveNASflux: neuronutility_safe, neuronutility, layertype
import Flux: params
ml(l, lfun=LazyMutable; insize=nin(l)[]) = fluxvertex(l, inputvertex("in", insize, layertype(l)), layerfun = lfun)
function tr(l, data; loss=Flux.mse)
example = [(data, 1)];
Flux.train!((f,x,y) -> loss(l(x), y), l, example, Flux.setup(Descent(0.1), l))
end
function tr(l, output, inputs...)
example = [(inputs, output)];
Flux.train!((f,x,y) -> Flux.mse(l(x...), y), l, example, Flux.setup(Descent(0.1), l))
end
@testset "Utils" begin
actonly(curr, act, grad) = act
@testset "Ewma" begin
m = NaiveNASflux.Ewma(0.3, actonly)
@test m(missing, [1,2,3,4], :ignored) == [1,2,3,4]
@test m([1,2,3,4], [5,6,7,8], :ignored) ≈ [3.8, 4.8, 5.8, 6.8]
@test m(Float32[1,2,3,4], Float32[5,6,7,8], :ignored) ≈ Float32[3.8, 4.8, 5.8, 6.8]
end
@testset "NeuronUtilityEvery{3}" begin
m = NeuronUtilityEvery(3, actonly)
@test m(:old, :new, :ignored) == :new
@test m(:old, :new, :ignored) == :old
@test m(:old, :new, :ignored) == :old
@test m(:old, :new, :ignored) == :new
@test m(:old, :new, :ignored) == :old
@test m(:old, :new, :ignored) == :old
@test m(:old, :new, :ignored) == :new
@test m(:old, :new, :ignored) == :old
@test m(:old, :new, :ignored) == :old
end
end
@testset "Neuron utility Dense default" begin
l = ml(Dense(3,5))
@test size(neuronutility(l)) == (5,)
Δnout!(v -> 1, l => 3)
@test size(neuronutility(l)) == (8,)
Δnout!(v -> 1, l => -4)
@test size(neuronutility(l)) == (4,)
@test neuronutility(l) ≈ neuronutility_safe(l)
end
@testset "Neuron utility Dense default no bias" begin
l = ml(Dense(ones(5, 3), false))
@test size(neuronutility(l)) == (5,)
@test neuronutility(l) ≈ neuronutility_safe(l)
end
@testset "Neuron utility RNN default" begin
l = ml(RNN(3,5))
@test size(neuronutility(l)) == (5,)
@test neuronutility(l) ≈ neuronutility_safe(l)
end
@testset "Neuron utility Conv default" begin
l = ml(Conv((2,3), 4=>5))
@test size(neuronutility(l)) == (5,)
@test neuronutility(l) ≈ neuronutility_safe(l)
end
@testset "Neuron utility unkown default" begin
l = ml(MeanPool((2,2)); insize = 3)
@test ismissing(neuronutility(l))
@test neuronutility_safe(l) == 1
end
@testset "ActivationContribution no grad" begin
import NaiveNASflux: nograd
f(x) = nograd(() -> 2 .* x .^ 2)
l = ml(Dense(2,3), ActivationContribution)
@test neuronutility(l) == fill(eps(Float32), 3)
tr(l, ones(Float32, 2, 1), loss = f ∘ Flux.mse)
@test neuronutility(l) == fill(eps(Float32), 3)
@test length(params(l)) == length(params(layer(l)))
end
@testset "Functor and trainable" begin
import NaiveNASflux: weights, bias
l = ml(Dense(2,3), ActivationContribution)
neuronutility_org = neuronutility(l)
@test params(l) == params(layer(l))
l2 = fmap(x -> x isa AbstractArray ? fill(17, size(x)) : x, l)
@test unique(neuronutility(l2)) == unique(bias(layer(l2))) == unique(weights(layer(l2))) == [17]
@test neuronutility_org === neuronutility(l) == fill(eps(Float32), nout(l))
end
@testset "Neuron utility Dense act contrib" begin
l = ml(Dense(3,5), ActivationContribution)
@test neuronutility(l) == fill(eps(Float32), 5)
tr(l, ones(Float32, 3, 4))
@test size(neuronutility(l)) == (5,)
@test length(params(l)) == length(params(layer(l)))
end
@testset "Neuron utility Dense act contrib every 4" begin
l = ml(Dense(3,5), l -> ActivationContribution(l, NeuronUtilityEvery(4)))
@test neuronutility(l) == fill(eps(Float32), 5)
nvprev = copy(neuronutility(l))
tr(l, ones(Float32, 3, 4))
@test neuronutility(l) != nvprev
nvprev = copy(neuronutility(l))
tr(l, ones(Float32, 3, 4))
@test nvprev == neuronutility(l)
tr(l, ones(Float32, 3, 4))
tr(l, ones(Float32, 3, 4))
tr(l, ones(Float32, 3, 4))
@test nvprev != neuronutility(l)
end
@testset "Neuron utility RNN act contrib" begin
l = ml(RNN(3,5), ActivationContribution)
@test neuronutility(l) == fill(eps(Float32), 5)
tr(l, ones(Float32, 3, 8))
@test size(neuronutility(l)) == (5,)
@test length(params(l)) == length(params(layer(l)))
end
@testset "Neuron utility Conv act contrib" begin
l = ml(Conv((3,3), 2=>5, pad=(1,1)), ActivationContribution)
@test neuronutility(l) == fill(eps(Float32), 5)
tr(l, ones(Float32, 4,4,2,5))
@test size(neuronutility(l)) == (5,)
@test length(params(l)) == length(params(layer(l)))
end
@testset "Neuron utility MaxPool act contrib" begin
l = ml(MaxPool((3,3)), ActivationContribution, insize=2)
@test ismissing(neuronutility(l))
tr(l, ones(Float32, 4,4,2,5))
@test size(neuronutility(l)) == (2,)
end
@testset "Neuron utility GlobalMeanPool act contrib" begin
l = ml(GlobalMeanPool(), ActivationContribution, insize=2)
@test ismissing(neuronutility(l))
tr(l, ones(Float32, 4,4,2,5))
@test size(neuronutility(l)) == (2,)
end
@testset "Elem add ActivationContribution" begin
ac(l) = ActivationContribution(l)
v = ac >> ml(Dense(2,3)) + ml(Dense(4,3))
tr(v, [1 1 1]', [1 2 3]', [4 5 6]')
@test size(neuronutility(v)) == (3,)
g = CompGraph(vcat(inputs.(inputs(v))...), v)
@test size(g(ones(Float32, 2,2), ones(Float32, 4, 2))) == (nout(v), 2)
end
@testset "Concat ActivationContribution" begin
v = concat(ml(Dense(2,3)), ml(Dense(4,5)), layerfun=ActivationContribution)
tr(v,ones(nout(v), 1), [1 2 3]', [4 5 6 7 8]')
@test size(neuronutility(v)) == (nout(v),)
g = CompGraph(vcat(inputs.(inputs(v))...), v)
@test size(g(ones(Float32, 2,2), ones(Float32, 4, 2))) == (nout(v), 2)
end
@testset "Function ActivationContribution" begin
# Not really an absorbvertex, but ActivationContribution should work on stuff which is not FLux layers.
v = invariantvertex(ActivationContribution(x -> 2x), ml(Dense(2,3)))
tr(v, ones(nout(v), 1))
@test size(neuronutility(v)) == (nout(v),)
g = CompGraph(vcat(inputs.(inputs(v))...), v)
@test size(g(ones(Float32, 2,2))) == (nout(v), 2)
end
@testset "Mutate ActivationContribution" begin
l = ml(Dense(3,5), ActivationContribution ∘ LazyMutable)
Δnout!(v -> 1:nout(v), l, -1)
@test size(l(ones(Float32, 3,2))) == (4, 2)
@test size(neuronutility(l)) == (4,)
end
@testset "Mutate ActivationContribution MaxPool" begin
l1 = ml(Conv((3,3), 2=>5, pad=(1,1)), ActivationContribution ∘ LazyMutable)
l2 = fluxvertex(MaxPool((3,3), pad=(1,1)), l1, layerfun = ActivationContribution ∘ LazyMutable)
g = CompGraph(inputs(l1), l2)
# Mutate before activation contribution for l2 has been initialized
Δnout!(v -> 1:nout(v), l1 => 1)
@test size(neuronutility(l1)) == (6,)
@test ismissing(neuronutility(l2))
# This will initialize it
@test size(g(ones(Float32, 4,4,2,3))) == (2,2,6,3)
Δnout!(v -> 1:nout(v), l1 => -2)
tr(g, ones(Float32, 2,2,4,3), ones(Float32, 4,4,2,3))
@test size(neuronutility(l1)) == size(neuronutility(l2)) == (4,)
Δnin!(v -> 1:nout(v), l2 => -1)
tr(g, ones(Float32, 2,2,3,3), ones(Float32, 4,4,2,3))
@test size(neuronutility(l1)) == size(neuronutility(l2)) == (3,)
end
@testset "Add input edge to ActivationContribution concat" begin
v0 = denseinputvertex("in", 3)
v1 = fluxvertex("v1", Dense(nout(v0), 4), v0; layerfun=ActivationContribution)
v2 = fluxvertex("v2", Dense(nout(v0), 3), v0; layerfun=ActivationContribution)
v3 = concat("v3", v1; layerfun=ActivationContribution)
v4 = fluxvertex("v4", Dense(nout(v3), 2), v3; layerfun=ActivationContribution)
v5 = concat("v5", v4, v3, v3; layerfun=ActivationContribution)
g = CompGraph(v0, v5)
Flux.gradient(() -> sum(g(ones(Float32, nout(v0), 1))))
# make sure values have materialized so we don't accidentally have a scalar utility
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1)
@test create_edge!(v2, v3)
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1) + nout(v2)
@test length(NaiveNASlib.defaultutility(v5)) == nout(v5) == 2 * nout(v3) + nout(v4)
@test size(g(ones(Float32, nout(v0), 1))) == (nout(v5), 1)
end
@testset "Add input edge to ActivationContribution concat fail" begin
import NaiveNASlib: PostAlign, ΔSizeFailNoOp, FailAlignSizeNoOp
import NaiveNASflux: neuronutility
v0 = denseinputvertex("in", 3)
v1 = fluxvertex("v1", Dense(nout(v0), 4), v0; layerfun=ActivationContribution)
v2 = fluxvertex("v2", Dense(nout(v0), 3), v0; layerfun=ActivationContribution)
v3 = concat("v3", v1; layerfun=ActivationContribution)
v4 = fluxvertex("v4", Dense(nout(v3), 2), v3; layerfun=ActivationContribution)
v5 = concat("v5", v4, v3, v3; layerfun=ActivationContribution)
g = CompGraph(v0, v5)
Flux.gradient(() -> sum(g(ones(Float32, nout(v0), 1))))
# make sure values have materialized so we don't accidentally have a scalar utility
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1)
nvbefore_v3 = copy(neuronutility(v3))
nvbefore_v5 = copy(neuronutility(v5))
@test create_edge!(v2, v3; strategy=PostAlign(ΔSizeFailNoOp(), FailAlignSizeNoOp())) == false
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1)
@test length(NaiveNASlib.defaultutility(v5)) == nout(v5) == 2 * nout(v3) + nout(v4)
@test nvbefore_v3 == neuronutility(v3)
@test nvbefore_v5 == neuronutility(v5)
@test size(g(ones(Float32, nout(v0), 1))) == (nout(v5), 1)
end
@testset "Add input edge to ActivationContribution concat maze" begin
import NaiveNASlib: PostAlign
v0 = denseinputvertex("in", 3)
v1 = fluxvertex("v1", Dense(nout(v0), 4), v0; layerfun=ActivationContribution)
v2 = fluxvertex("v2", Dense(nout(v0), 3), v0; layerfun=ActivationContribution)
v3 = concat("v3", v1; layerfun=ActivationContribution)
v4 = concat("v4", v3; layerfun=ActivationContribution)
v5 = concat("v5", v3; layerfun=ActivationContribution)
v6 = concat("v6", v3,v4,v5; layerfun=ActivationContribution)
g = CompGraph(v0, v6)
Flux.gradient(() -> sum(g(ones(Float32, nout(v0), 1))))
# make sure values have materialized so we don't accidentally have a scalar utility
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1)
@test create_edge!(v2, v3; strategy=PostAlign())
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1) + nout(v2)
@test length(NaiveNASlib.defaultutility(v4)) == nout(v4) == nout(v3)
@test length(NaiveNASlib.defaultutility(v5)) == nout(v5) == nout(v3)
@test length(NaiveNASlib.defaultutility(v6)) == nout(v6) == nout(v3) + nout(v4) + nout(v5)
@test size(g(ones(Float32, nout(v0), 1))) == (nout(v6), 1)
end
@testset "Remove input edge to ActivationContribution concat" begin
v0 = denseinputvertex("in", 3)
v1 = fluxvertex("v1", Dense(nout(v0), 4), v0; layerfun=ActivationContribution)
v2 = fluxvertex("v2", Dense(nout(v0), 3), v0; layerfun=ActivationContribution)
v3 = concat("v3", v1, v2; layerfun=ActivationContribution)
v4 = fluxvertex("v4", Dense(nout(v3), 2), v3; layerfun=ActivationContribution)
v5 = concat("v5", v4, v3, v3; layerfun=ActivationContribution)
g = CompGraph(v0, v5)
Flux.gradient(() -> sum(g(ones(Float32, nout(v0), 1))))
# make sure values have materialized so we don't accidentally have a scalar utility
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1) + nout(v2)
@test remove_edge!(v2, v3)
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1)
@test length(NaiveNASlib.defaultutility(v5)) == nout(v5) == 2 * nout(v3) + nout(v4)
@test size(g(ones(Float32, nout(v0), 1))) == (nout(v5), 1)
end
@testset "Remove input edge to ActivationContribution concat fail" begin
import NaiveNASlib: PostAlign, ΔSizeFailNoOp, FailAlignSizeNoOp
import NaiveNASflux: neuronutility
v0 = denseinputvertex("in", 3)
v1 = fluxvertex("v1", Dense(nout(v0), 4), v0; layerfun=ActivationContribution)
v2 = fluxvertex("v2", Dense(nout(v0), 3), v0; layerfun=ActivationContribution)
v3 = concat("v3", v1, v2; layerfun=ActivationContribution)
v4 = fluxvertex("v4", Dense(nout(v3), 2), v3; layerfun=ActivationContribution)
v5 = concat("v5", v4, v3, v3; layerfun=ActivationContribution)
g = CompGraph(v0, v5)
Flux.gradient(() -> sum(g(ones(Float32, nout(v0), 1))))
# make sure values have materialized so we don't accidentally have a scalar utility
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1) + nout(v2)
nvbefore_v3 = copy(neuronutility(v3))
nvbefore_v5 = copy(neuronutility(v5))
@test remove_edge!(v2, v3; strategy= PostAlign(ΔSizeFailNoOp(), FailAlignSizeNoOp())) == false
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1) + nout(v2)
@test length(NaiveNASlib.defaultutility(v5)) == nout(v5) == 2 * nout(v3) + nout(v4)
@test nvbefore_v3 == neuronutility(v3)
@test nvbefore_v5 == neuronutility(v5)
@test size(g(ones(Float32, nout(v0), 1))) == (nout(v5), 1)
end
@testset "Remove input edge to ActivationContribution concat maze" begin
import NaiveNASlib: PostAlign
v0 = denseinputvertex("in", 3)
v1 = fluxvertex("v1", Dense(nout(v0), 4), v0; layerfun=ActivationContribution)
v2 = fluxvertex("v2", Dense(nout(v0), 3), v0; layerfun=ActivationContribution)
v3 = concat("v3", v1, v2; layerfun=ActivationContribution)
v4 = concat("v4", v3; layerfun=ActivationContribution)
v5 = concat("v5", v3; layerfun=ActivationContribution)
v6 = concat("v6", v3,v4,v5; layerfun=ActivationContribution)
g = CompGraph(v0, v6)
Flux.gradient(() -> sum(g(ones(Float32, nout(v0), 1))))
# make sure values have materialized so we don't accidentally have a scalar utility
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1) + nout(v2)
@test remove_edge!(v2, v3; strategy=PostAlign())
@test length(NaiveNASlib.defaultutility(v3)) == nout(v3) == nout(v1)
@test length(NaiveNASlib.defaultutility(v4)) == nout(v4) == nout(v3)
@test length(NaiveNASlib.defaultutility(v5)) == nout(v5) == nout(v3)
@test length(NaiveNASlib.defaultutility(v6)) == nout(v6) == nout(v3) + nout(v4) + nout(v5)
@test size(g(ones(Float32, nout(v0), 1))) == (nout(v6), 1)
end
end
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 788 | using NaiveNASflux, Flux, Test
using NaiveNASlib.Advanced, NaiveNASlib.Extend
function assertlayer(l, Wexp, bexp)
@test size(Wexp) == size(weights(l))
@test size(bexp) == size(bias(l))
@test Wexp == weights(l)
@test bexp == bias(l)
end
@testset "NaiveNASflux.jl" begin
@info "Testing util"
include("util.jl")
@info "Testing select"
include("select.jl")
@info "Testing mutable"
include("mutable.jl")
@info "Testing vertex"
include("vertex.jl")
@info "Testing chainrules"
include("chainrules.jl")
@info "Testing neuronutility"
include("neuronutility.jl")
@info "Testing examples"
include("examples.jl")
if Int !== Int32
import Documenter
Documenter.doctest(NaiveNASflux)
end
end
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 2196 | @testset "Select" begin
@testset "KernelSizeAligned" begin
import NaiveNASflux: selectfilters
pars = reshape(1:2*3*4*5, 5,4,3,2)
ps = selectfilters(KernelSizeAligned(2, -1), pars)
@test ps[1] == (1 => [-1, 1, 2, 3, 4, 5, -1])
@test ps[2] == (2 => [2,3,4])
end
@testset "KernelSizeAligned MutableLayer $convtype" for convtype in (Conv, ConvTranspose, DepthwiseConv)
import NaiveNASflux: weights
m = fluxvertex(convtype((3,4), 5=>5, pad=(1,1,1,2)), inputvertex("in", 5), layerfun=identity)
indata = ones(Float32, 5,5,5,2)
@test size(weights(layer(m)))[1:2] == (3,4)
@test size(m(indata)) == size(indata)
KernelSizeAligned((-1, 2), (1,0,3,2))(m)
@test size(weights(layer(m)))[1:2] == (2,6)
@test size(m(indata)) == size(indata)
KernelSizeAligned((1, -1), SamePad())(m)
@test size(weights(layer(m)))[1:2] == (3,5)
@test size(m(indata)) == size(indata)
end
@testset "KernelSizeAligned LazyMutable" begin
import NaiveNASflux: weights
m = fluxvertex(Conv((3,4), 5=>6, pad=(1,1,1,2)), inputvertex("in", 5), layerfun=LazyMutable)
indata = ones(Float32, 5,5,5,2)
@test size(weights(layer(m))) == (3,4,5,6)
@test size(m(indata))[1:2] == size(indata)[1:2]
KernelSizeAligned((-1, 2), (1,0,3,2))(m)
@test size(m(indata)) == (size(indata,1), size(indata,2), 6, size(indata,4))
@test size(weights(layer(m))) == (2,6,5,6)
KernelSizeAligned((2, -1), (1,2,2,2))(m)
NaiveNASlib.applyΔsize!(NaiveNASlib.NeuronIndices(), m, [1:nin(m)[]], [1,3,5])
@test size(m(indata)) == (size(indata,1), size(indata,2), 3, size(indata,4))
@test size(weights(layer(m))) == (4,5,5,3)
end
@testset "KernelSizeAligned Dense is Noop with layerfun $lfun" for lfun in (identity, LazyMutable)
m = fluxvertex(Dense(3,4), inputvertex("in", 3), layerfun = lfun)
@test nin(m) == [3]
indata = ones(Float32, nin(m)[], 2)
@test size(m(indata)) == (4,2)
KernelSizeAligned((-1,-1), (1,1))(m)
@test size(m(indata)) == (4,2)
end
end
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 3305 | @testset "Utils" begin
struct BogusLayer <: NaiveNASflux.FluxLayer end
@testset "Sizes" begin
@test nin(Dense(3,4)) == [3]
@test nout(Dense(3,4)) == 4
@test nin(Conv((2,), 3=>4)) == [3]
@test nout(Conv((2,), 3=>4)) == 4
@test nin(Conv((1,2), 3=>6)) == [3]
@test nout(Conv((1,2), 3=>6)) == 6
@test nin(Conv((1,2,3), 4=>5)) == [4]
@test nout(Conv((1,2,3), 4=>5)) == 5
@test nin(ConvTranspose((2,), 3=>4)) == [3]
@test nout(ConvTranspose((2,), 3=>4)) == 4
@test nin(ConvTranspose((1,2), 3=>6)) == [3]
@test nout(ConvTranspose((1,2), 3=>6)) == 6
@test nin(ConvTranspose((1,2,3), 4=>5)) == [4]
@test nout(ConvTranspose((1,2,3), 4=>5)) == 5
@test nin(DepthwiseConv((2,), 3=>4*3)) == [3]
@test nout(DepthwiseConv((2,), 3=>4*3)) == 12
@test nin(DepthwiseConv((1,2), 3=>6*3)) == [3]
@test nout(DepthwiseConv((1,2), 3=>6*3)) == 18
@test nin(DepthwiseConv((1,2,3), 4=>5*4)) == [4]
@test nout(DepthwiseConv((1,2,3), 4=>5*4)) == 20
@test nin(CrossCor((2,), 3=>4)) == [3]
@test nout(CrossCor((2,), 3=>4)) == 4
@test nin(CrossCor((1,2), 3=>6)) == [3]
@test nout(CrossCor((1,2), 3=>6)) == 6
@test nin(CrossCor((1,2,3), 4=>5)) == [4]
@test nout(CrossCor((1,2,3), 4=>5)) == 5
@test nin(Flux.Scale(3)) == [nout(Flux.Scale(3))] == [3]
@test nin(LayerNorm(3)) == [nout(LayerNorm(3))] == [3]
@test nin(BatchNorm(3)) == [nout(BatchNorm(3))] == [3]
@test nin(InstanceNorm(3)) == [nout(InstanceNorm(3))] == [3]
@test nin(GroupNorm(3,1)) == [nout(GroupNorm(3,1))] == [3]
@test nin(RNN(3,4)) == [3]
@test nout(RNN(3,4)) == 4
@test nin(LSTM(3,4)) == [3]
@test nout(LSTM(3,4)) == 4
@test nin(GRU(3,4)) == [3]
@test nout(GRU(3,4)) == 4
end
@testset "Dims" begin
import NaiveNASflux: actdim, indim, outdim
import NaiveNASflux: GenericFlux2D, GenericFluxConvolutional, GenericFluxRecurrent
@test actdim(Dense(3,4)) == actdim(GenericFlux2D()) == 1
@test actdim(GenericFluxConvolutional{2}()) == 3
@test actdim(Conv((1,2), 3=>6)) == 3
@test actdim(ConvTranspose((1,2), 3=>6)) == 3
@test actdim(DepthwiseConv((1,2), 3=>6)) == 3
@test actdim(CrossCor((1,2), 3=>6)) == 3
@test actdim(Flux.Scale(1)) == indim(Flux.Scale(2)) == outdim(Flux.Scale(3)) == 1
@test actdim(GenericFluxRecurrent()) == 1
@test actdim(RNN(3,4)) == 1
@test actdim(LSTM(3,4)) == 1
@test actdim(GRU(3,4)) == 1
@test_throws ArgumentError actdim(BogusLayer())
@test_throws ArgumentError indim(BogusLayer())
@test_throws ArgumentError outdim(BogusLayer())
end
@testset "ngroups" begin
import NaiveNASflux: ngroups
@test ngroups(DepthwiseConv((2,), 3 => 9)) == ngroups(Conv((2,), 3 => 9; groups=3)) == ngroups(ConvTranspose((2,), 3 => 9; groups=3)) == 3
@test ngroups(Conv((3,3), 10 => 30; groups=5)) == ngroups(ConvTranspose((3,3), 10 => 30; groups=5)) == 5
@test ngroups(Conv((3,3), 10 => 30; groups=2)) == ngroups(ConvTranspose((3,3), 10 => 30; groups=2)) == 2
end
end
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 29404 |
@testset "InputShapeVertex" begin
import NaiveNASflux: FluxDense, FluxConv, FluxRnn
import NaiveNASflux: actrank, actdim
v = inputvertex("in", 3, FluxDense())
@test layertype(v) == FluxDense()
@test name(v) == "in"
@test nout(v) == 3
@testset "Copy with $label" for (label, cfun) in (
(deepcopy, deepcopy),
("fmap", g -> fmap(identity, g))
)
c = cfun(v)
@test layertype(c) == FluxDense()
@test name(c) == "in"
@test nout(c) == 3
end
@test nout(inputvertex("in", 4, FluxDense())) == nout(denseinputvertex("in", 4))
@test nout(inputvertex("in", 3, FluxConv{1}())) == nout(conv1dinputvertex("in", 3))
@test nout(inputvertex("in", 3, FluxConv{2}())) == nout(conv2dinputvertex("in", 3))
@test nout(inputvertex("in", 3, FluxConv{3}())) == nout(conv3dinputvertex("in", 3))
@test nout(inputvertex("in", 2, FluxRnn())) == nout(rnninputvertex("in", 2))
@test actdim(inputvertex("in", 4, FluxDense())) == actdim(denseinputvertex("in", 4))
@test actdim(inputvertex("in", 3, FluxConv{2}())) == actdim(convinputvertex("in", 3, 2))
@test actdim(inputvertex("in", 3, FluxConv{1}())) == actdim(conv1dinputvertex("in", 3))
@test actdim(inputvertex("in", 3, FluxConv{2}())) == actdim(conv2dinputvertex("in", 3))
@test actdim(inputvertex("in", 3, FluxConv{3}())) == actdim(conv3dinputvertex("in", 3))
@test actdim(inputvertex("in", 2, FluxRnn())) == actdim(rnninputvertex("in", 2))
@test actrank(inputvertex("in", 4, FluxDense())) == actrank(denseinputvertex("in", 4))
@test actrank(inputvertex("in", 3, FluxConv{2}())) == actrank(convinputvertex("in", 3, 2))
@test actrank(inputvertex("in", 3, FluxConv{1}())) == actrank(conv1dinputvertex("in", 3))
@test actrank(inputvertex("in", 3, FluxConv{2}())) == actrank(conv2dinputvertex("in", 3))
@test actrank(inputvertex("in", 3, FluxConv{3}())) == actrank(conv3dinputvertex("in", 3))
@test actrank(inputvertex("in", 2, FluxRnn())) == actrank(rnninputvertex("in", 2))
end
@testset "$pt Vertex" for (pt, arg) in (
(MaxPool, ((4,4),)),
(MeanPool, ((4,4),)),
(GlobalMaxPool, ()),
(GlobalMeanPool, ()))
using Random
l = pt(arg...)
v = fluxvertex(l, conv2dinputvertex("in", 3))
@test NaiveNASflux.layertype(v) isa NaiveNASflux.FluxPoolLayer
indata = randn(MersenneTwister(1), Float32, 6,6,3,2)
@test l(indata) == v(indata)
end
@testset "$dt Vertex" for dt in (Dropout, AlphaDropout)
using Random
l = dt(0.4)
v = fluxvertex(l, conv2dinputvertex("in", 3))
@test NaiveNASflux.layertype(v) isa NaiveNASflux.FluxDropOut
indata = randn(MersenneTwister(1), Float32, 3,2)
@test l(indata) == v(indata)
end
@testset "Size mutations" begin
import NaiveNASflux: weights, bias, layertype
@testset "Dense to Dense" begin
inpt = inputvertex("in", 4)
dl1 = Dense(4, 5)
dl2 = Dense(5, 3)
bias(dl1)[1:end] = 1:5
bias(dl2)[1:end] = 1:3
dense1 = fluxvertex(dl1, inpt)
dense2 = fluxvertex(dl2, dense1)
@test inputs(dense2) == [dense1]
@test outputs(dense1) == [dense2]
@test inputs(dense1) == [inpt]
@test outputs(inpt) == [dense1]
@test layer(dense1) == dl1
@test layer(dense2) == dl2
@test layertype(dense1) == FluxDense()
@test layertype(dense2) == FluxDense()
@test [nout(inpt)] == nin(dense1) == [4]
@test [nout(dense1)] == nin(dense2) == [5]
@test nout(dense2) == 3
inds = Bool[1, 1, 0, 1, 1]
NaiveNASlib.Δsize!(dense1) do v
v == dense1 || return 1
return inds .- 0.5
end
@test [nout(inpt)] == nin(dense1) == [4]
@test [nout(dense1)] == nin(dense2) == [4]
@test nout(dense2) == 3
W2exp, b2exp = weights(dl2)[:, inds], bias(dl2)
W1exp, b1exp = weights(dl1)[inds, :], bias(dl1)[inds]
@test size(CompGraph(inpt, dense2)(collect(Float32, 1:nout(inpt)))) == (3,)
assertlayer(layer(dense2), W2exp, b2exp)
assertlayer(layer(dense1), W1exp, b1exp)
end
@testset "Invariant parametric layer" begin
inpt = inputvertex("in", 3)
cv = fluxvertex(Conv((1,1), nout(inpt) => 4), inpt)
# layerfun = identity or else LazyMutable will take case or the sizes for us
bv = fluxvertex(BatchNorm(nout(cv)), cv; layerfun=identity)
@test nin(bv) == [nout(cv)] == [4]
Δnin!(v -> 1, bv, -1)
@test nin(bv) == [nout(cv)] == [3]
# Lets go out of our way to make it size inconsistent to verify that nin/nout is the size of the BatchNorm parameters.
NaiveNASlib.applyΔsize!(NaiveNASlib.NeuronIndices(), bv, [1:2], 1:2)
@test [nout(bv)] == nin(bv) == [2]
@test nout(cv) == 3
end
@testset "Invariant non-parametric layer" begin
inpt = inputvertex("in", 3)
cv = fluxvertex(Conv((1,1), nout(inpt) => 4), inpt)
bv = fluxvertex(MeanPool((2,2)), cv)
@test nin(bv) == [nout(cv)] == [4]
Δnin!(v -> 1, bv, -1)
@test nin(bv) == [nout(cv)] == [3]
end
@testset "DepthwiseConv" begin
import NaiveNASflux: outdim, wrapped, FluxConv
lazymutable(v::AbstractVertex) = lazymutable(base(v))
lazymutable(v::CompVertex) = lazymutable(v.computation)
lazymutable(m::NaiveNASflux.AbstractMutableComp) = lazymutable(wrapped(m))
lazymutable(m::LazyMutable) = m
lazyouts(v) = lazymutable(v).outputs
lazyins(v) = lazymutable(v).inputs
@testset "Depthwise single layer" begin
import NaiveNASflux: neuronutility
# just to check that I have understood the wiring of the weight
@testset "4 inputs times 2" begin
inpt = inputvertex("in", 4, FluxConv{2}())
dc = fluxvertex("dc", DepthwiseConv(reshape(Float32[10 10 10 10;20 20 20 20], 1, 1, 4, 2), Float32[0,0,0,0,1,1,1,1]), inpt)
@test neuronutility(dc) == [20, 40, 20, 40, 21, 41, 21, 41]
@test reshape(dc(fill(1f0, (1,1,4,1))), :) == [10, 20, 10, 20, 11, 21, 11, 21]
@test Δnout!( dc => -4)
@test lazyouts(dc) == [2, 4, 6, 8]
@test reshape(dc(fill(1f0, (1,1,4,1))), :) == [20, 20, 21, 21]
# pass insert function so we get zeros instead of randn
@test Δsize!(WithKwargs(ΔNout(dc, 4); insert =(args...) -> (args...) -> 0))
@test lazyouts(dc) == [ 1,-1, 2,-1, 3,-1, 4,-1]
@test reshape(dc(fill(1f0, (1,1,4,1))), :) == [20, 0, 20, 0, 21, 0, 21, 0]
end
@testset "2 inputs times 3" begin
inpt = inputvertex("in", 2, FluxConv{2}())
dc = fluxvertex("dc", DepthwiseConv(reshape(Float32[10 10;20 20;30 30], 1, 1, 2, 3), Float32[0,0,1,1,2,2]), inpt)
@test reshape(dc(fill(1f0, (1,1,2,1))), :) == [10, 20, 31, 11, 22, 32]
@test Δnout!(dc => -2)
@test lazyouts(dc) == [2,3,5,6]
@test reshape(dc(fill(1f0, (1,1,2,1))), :) == [20, 31, 22, 32]
# pass insert function so we get zeros instead of randn
@test Δsize!(WithKwargs(ΔNout(dc, 4); insert =(args...) -> (args...) -> 0))
@test lazyouts(dc) == [ 1, 2, -1,-1, 3, 4, -1,-1]
@test reshape(dc(fill(1f0, (1,1,2,1))), :) == [20, 31, 0, 0, 22, 32, 0, 0]
end
@testset "1 input times 5" begin
inpt = inputvertex("in", 1, FluxConv{2}())
dc = fluxvertex("dc", DepthwiseConv(reshape(Float32.(10:10:50), 1, 1, 1, 5), Float32.(1:5)), inpt)
@test reshape(dc(fill(1f0, (1,1,1,1))), :) == [11, 22, 33, 44, 55]
@test Δnout!(dc=>-2)
@test lazyouts(dc) == 3:5
@test reshape(dc(fill(1f0, (1,1,1,1))), :) == [33, 44, 55]
# pass insert function so we get zeros instead of randn
@test Δsize!(WithKwargs(ΔNout(dc, 3); insert =(args...) -> (args...) -> 0))
@test lazyouts(dc) == vcat(1:3, fill(-1, 3))
@test reshape(dc(fill(1f0, (1,1,1,1))), :) == [33, 44, 55, 0, 0, 0]
end
@testset "3 inputs times 7" begin
inpt = inputvertex("in", 3, FluxConv{2}())
dc = fluxvertex("dc", DepthwiseConv(reshape(repeat(Float32.(10:10:70), 3), 1,1,3,7), Float32.(1:21)), inpt)
@test reshape(dc(fill(10f0, (1,1,3,1))), :) == repeat(100:100:700, 3) .+ (1:21)
@test Δnout!(dc => -9) do v
v == dc || return 1
val = ones(nout(v))
val[[2,13,14]] .= -10
val[1] = 10
return val
end
# TODO: Examine failure and expected result more carefully when not tired af
@test lazyouts(dc) == [1,3,4,5,8,10,11,12,15,17,18,19]
@test reshape(dc(fill(10f0, (1,1,3,1))), :) == [101,303,404,505,108,310,411,512,115,317,418,519]
# pass insert function so we get zeros instead of randn
@test Δsize!(WithKwargs(ΔNout(dc, 6); insert =(args...) -> (args...) -> 0))
@test lazyouts(dc) == vcat(1:4, -1,-1, 5:8, -1,-1, 9:12, -1,-1)
@test reshape(dc(fill(10f0, (1,1,3,1))), :) == [101,303,404,505,0, 0, 108,310,411,512,0 ,0, 115,317,418,519,0, 0]
end
@testset "GroupedConvAllowNinChangeStrategy" begin
import NaiveNASflux: GroupedConvAllowNinChangeStrategy
import NaiveNASlib: ΔNout
inpt = inputvertex("in", 2, FluxConv{2}())
dc = fluxvertex("dc", DepthwiseConv((1,1), nout(inpt) => 3*nout(inpt)), inpt)
# Get output multiplier == 4 (nout = 4 * nin) by adding one more outgroup (4 = 3 + 1)
okstrat = GroupedConvAllowNinChangeStrategy([1], [4], ΔNout(dc => 2))
@test Δsize!(okstrat, dc)
@test nout(dc) == 8
@test nin(dc) == [2]
failstrat = GroupedConvAllowNinChangeStrategy([10], [0], ΔNout(dc => 2))
@test @test_logs (:warn, r"Could not change nout of dc") match_mode=:any Δsize!(failstrat, dc) == false
end
@testset "GroupedConvSimpleΔSizeStrategy" begin
using NaiveNASflux: GroupedConvSimpleΔSizeStrategy
using NaiveNASlib: ΔNout
inpt = inputvertex("in", 2, FluxConv{2}())
dc = fluxvertex("dc", DepthwiseConv((1,1), nout(inpt) => 3*nout(inpt)), inpt)
okstrat = GroupedConvSimpleΔSizeStrategy(4, ΔNout(dc => 2))
@test Δsize!(okstrat, dc)
@test nout(dc) == 8
@test nin(dc) == [2]
# We tested complete failure above, so lets make the relaxation work here
failstrat = GroupedConvSimpleΔSizeStrategy(5, ΔNout(dc => 3))
@test_logs (:warn, r"Could not change nout of dc") Δsize!(failstrat, dc)
@test nout(dc) == 10
@test nin(dc) == [2]
end
end
@testset "DepthwiseConv groupsize 2 into groupsize 1" begin
inpt = inputvertex("in", 4, FluxConv{2}())
dc1 = fluxvertex("dc1", DepthwiseConv((2,2), nout(inpt) => 2 * nout(inpt)), inpt)
dc2 = fluxvertex("dc2", DepthwiseConv((2,2), nout(dc1) => nout(dc1)), dc1)
@test @test_logs (:warn, r"Could not change nout of") Δnout!(v -> 1, dc1 => 2)
@test [nout(dc1)] == nin(dc2) == [nout(dc2) ]== [12]
@test lazyins(dc1) == [1:nout(inpt)]
@test [lazyouts(dc1)] == lazyins(dc2) == [[1, 2, -1, 3, 4, -1, 5, 6, -1, 7, 8, -1]]
# Add deterministic utilityfunction which wants to do non-contiguous selection across groups
@test @test_logs (:warn, r"Could not change nout of") Δnout!(dc1 => -2) do v
util = repeat([1, 2], nout(v) ÷ 2)
util[3] = -10 # don't want group 3 to make testcase deterministic
return util
end
@test [nout(dc1)] == nin(dc2) == [nout(dc2)] == [8]
@test lazyins(dc1) == [1:nout(inpt)]
@test [lazyouts(dc1)] == lazyins(dc2) == [1:8]
# Test that we actually succeeded in making a valid model
y1 = dc1(ones(Float32, 3,3, nout(inpt), 2))
@test size(y1)[end-1] == nout(dc1)
y2 = dc2(y1)
@test size(y2)[end-1] == nout(dc2)
end
@testset "DepthwiseConv groupsize 3 into groupsize 5" begin
inpt = inputvertex("in", 4, FluxConv{2}())
dc1 = fluxvertex("dc1", DepthwiseConv((2,2), nout(inpt) => 3 * nout(inpt)), inpt)
dc2 = fluxvertex("dc2", DepthwiseConv((2,2), nout(dc1) => 5 * nout(dc1)), dc1)
dc3 = fluxvertex("dc3", DepthwiseConv((2,2), nout(dc2) => nout(dc2)), dc2)
# TODO: Check compgraph output pre and post?
# Need to insert zeros then?
@test @test_logs (:warn, r"Could not change nout of") Δnout!(v -> 1, dc1 => 2)
@test [nout(dc1)] == nin(dc2) == [16]
@test [nout(dc2)] == nin(dc3) == [96] # TODO: Why so big??
# Add deterministic utilityfunction which wants to do non-contiguous selection across groups
@test @test_logs (:warn, r"Could not change nout of") Δnout!(dc1 => -2) do v
util = repeat([1, 2], nout(v) ÷ 2)
util[1] = -10 # don't want group 1 to make testcase deterministic
return util
end
@test [nout(dc1)] == nin(dc2) == [12]
@test [nout(dc2)] == nin(dc3) == [96]
@test lazyins(dc1) == [1:nout(inpt)]
# NaiveNASlib might not pick non-new indices (i.e not -1) due to our artificial weight function above
# default neuron utility function would give zero utility to new neurons
@test [lazyouts(dc1)] == lazyins(dc2) == [[2, 3, -1, 5, 6, -1, 8, 9, -1, 11, 12, -1]]
# All neurons had a positive utility, so NaiveNASlib should inrease to next valid size
@test [lazyouts(dc2)] == lazyins(dc3)
# Test that we actually succeeded in making a valid model
y1 = dc1(ones(Float32,5,5, nout(inpt), 2))
@test size(y1)[end-1] == nout(dc1)
y2 = dc2(y1)
@test size(y2)[end-1] == nout(dc2)
y3 = dc3(y2)
@test size(y3)[end-1] == nout(dc3)
end
@testset "Depthwise conv change input size from Conv" begin
import NaiveNASflux: weights
inpt = inputvertex("in", 4, FluxConv{2}())
v1 = fluxvertex("v1", Conv((1,1), nout(inpt) => 3), inpt)
v2 = fluxvertex("v2", DepthwiseConv((1,1), nout(v1) => 2 * nout(v1)), v1)
v3 = fluxvertex("v3", Conv((1,1), nout(v2) => 3), v2)
graph = CompGraph(inpt, v3)
indata = randn(Float32, 1,1,4,1)
expout = graph(indata)
@test Δnout!(v -> 1, v1 => 1, v2 => 2)
@test [nout(v1)] == nin(v2) == [4]
@test [nout(v2)] == nin(v3) == [8]
@test lazyouts(v2) == [1, 2, 3, 4, 5, 6, -1, -1]
# Approx here due to MAC OS CI getting differences in last decimals
@test graph(indata) ≈ expout
@test Δnout!(v2 => -2) do v
v == v2 || return 1
return vcat(1:6, -10, -10) # Prefer to drop the last two inds
end
@test [nout(v1)] == nin(v2) == [3]
@test [nout(v2)] == nin(v3) == [6]
@test lazyouts(v2) == 1:6
# Approx here due to MAC OS CI getting differences in last decimals
@test graph(indata) ≈ expout
@test size(v2(ones(Float32, 1,1,nout(v1),1))) == (1,1,nin(v3)[],1)
end
@testset "Depthwise conv change output size" begin
import NaiveNASflux: weights
inpt = inputvertex("in", 4, FluxConv{2}())
v1 = fluxvertex("v1", Conv((1,1), nout(inpt) => 3), inpt)
v2 = fluxvertex("v2", DepthwiseConv((1,1), nout(v1) => 2 * nout(v1)), v1)
v3 = fluxvertex("v3", Conv((1,1), nout(v2) => 3), v2)
graph = CompGraph(inpt, v3)
indata = randn(Float32, 1,1,4,1)
expout = graph(indata)
@test Δnout!(v -> 1, v2 => 3 * nin(v1)[])
@test [nout(v1)] == nin(v2) == [3]
@test [nout(v2)] == nin(v3) == [18]
@test lazyouts(v2) == [1, 2, -1, -1, -1, -1, 3, 4, -1, -1, -1, -1, 5, 6, -1, -1, -1, -1]
# Approx here due to MAC OS CI getting differences in last decimals
@test graph(indata) ≈ expout
@test Δnout!(v2 => -6) do v
v == v2 || return 1
repeat(vcat(ones(4), -1, -1), 3)
end
@test [nout(v1)] == nin(v2) == [3]
@test [nout(v2)] == nin(v3) == [12]
@test lazyouts(v2) == [1, 2, 3, 4, 7, 8, 9, 10, 13, 14, 15, 16]
# Approx here due to MAC OS CI getting differences in last decimals
@test graph(indata) ≈ expout
end
end
@testset "Concatenate activations" begin
function testgraph(layerfun, nin1, nin2)
vfun = (v, s) -> fluxvertex(layerfun(nout(v), s), v)
return testgraph_vfun(vfun, nin1, nin2)
end
function testgraph_vfun(vertexfun, nin1::Integer, nin2::Integer)
in1 = inputvertex("in1", nin1)
in2 = inputvertex("in2", nin2)
return testgraph_vfun(vertexfun, in1, in2)
end
function testgraph_vfun(vertexfun, in1, in2)
l1 = vertexfun(in1, 3)
l2 = vertexfun(in2, 7)
joined = concat(l1, l2)
l3 = vertexfun(joined, 9)
@test nout(joined) == nout(l1) + nout(l2) == 10
return CompGraph([in1, in2], l3)
end
@testset "Concatenate Dense" begin
nin1 = 2
nin2 = 5
@test size(testgraph(Dense, nin1, nin2)(ones(Float32, nin1), ones(Float32, nin2))) == (9,)
end
@testset "Concatenate $rnntype" for rnntype in (RNN, GRU, LSTM)
nin1 = 2
nin2 = 5
indata1 = reshape(collect(Float32, 1:nin1*4), nin1, 4)
indata2 = reshape(collect(Float32, 1:nin2*4), nin2, 4)
@test size(testgraph(rnntype, nin1, nin2)(indata1, indata2)) == (9,4)
end
@testset "Concatenate $convtype" for convtype in (Conv, ConvTranspose, CrossCor)
nin1 = 2
nin2 = 5
indata1 = reshape(collect(Float32, 1:nin1*4*4), 4, 4, nin1, 1)
indata2 = reshape(collect(Float32, 1:nin2*4*4), 4, 4, nin2, 1)
convfun = (nin,nout) -> convtype((3,3), nin=>nout, pad = 1)
@test size(testgraph(convfun, nin1, nin2)(indata1, indata2)) == (4,4,9,1)
end
@testset "Concatenate Pooled Conv" begin
nin1 = 2
nin2 = 5
indata1 = reshape(collect(Float32, 1:nin1*4*4), 4, 4, nin1, 1)
indata2 = reshape(collect(Float32, 1:nin2*4*4), 4, 4, nin2, 1)
function vfun(v, s)
cv = fluxvertex(Conv((3,3), nout(v)=>s, pad = (1,1)), v)
return fluxvertex(MaxPool((3,3), pad=(1,1), stride=(1,1)), cv)
end
@test size(testgraph_vfun(vfun, nin1, nin2)(indata1, indata2)) == (4,4,9,1)
end
@testset "Concatenate InputShapeVertex" begin
nin1 = 6
nin2 = 4
in1 = inputvertex("in1", nin1, FluxDense())
in2 = inputvertex("in2", nin2, FluxDense())
@test size(testgraph_vfun((v,s) -> v, in1, in2)(ones(Float32, nin1), ones(Float32, nin2))) == (10,)
end
@testset "Concatenate BatchNorm only" begin
nin1 = 3
nin2 = 7
indata1 = reshape(collect(Float32, 1:nin1*4*4), 4, 4, nin1, 1)
indata2 = reshape(collect(Float32, 1:nin2*4*4), 4, 4, nin2, 1)
in1 = conv2dinputvertex("in1", nin1)
in2 = conv2dinputvertex("in2", nin2)
vfun(v,s) = fluxvertex(BatchNorm(nout(v)), v)
@test size(testgraph_vfun(vfun, in1, in2)(indata1, indata2)) == (4,4,10,1)
end
@testset "Concatenate elementwise and concatenated" begin
import NaiveNASflux: FluxDense
in1 = denseinputvertex("in1", 3)
in2 = denseinputvertex("in2", 3)
v1 = "v1" >> in1 + in2
v2 = concat("v2", in1, in2)
v3 = concat("v3", v1, v1)
v4 = "v4" >> v2 + v3
v5 = "v5" >> v2 + v3
v6 = concat("v6", v4, v5, in1)
@test nin(v6) == [nout(v4), nout(v5), nout(in1)] == [6, 6, 3]
@test nout(v6) == 15
end
@testset "Concatentate dimension mismatch fail" begin
d1 = fluxvertex(Dense(2,3), inputvertex("in1", 2))
c1 = fluxvertex(Conv((3,3), 4=>5), inputvertex("in2", 4))
r1 = fluxvertex(RNN(6,7), inputvertex("in3", 6))
@test_throws DimensionMismatch concat(d1, c1)
@test_throws DimensionMismatch concat(r1, c1)
@test_throws DimensionMismatch concat(d1, r1)
end
@testset "Concat with name" begin
d1 = fluxvertex(Dense(2,3), inputvertex("in1", 2))
d2 = fluxvertex(Dense(2,5), inputvertex("in1", 2))
c = concat("c", d1, d2)
@test name(c) == "c"
@test nout(c) == 8
@test nin(c) == [3, 5]
end
end
@testset "Tricky structures" begin
import NaiveNASflux: NoParams
mutable struct Probe
activation
end
function (p::Probe)(x)
p.activation = x
return x
end
function probe(in)
p = Probe(nothing)
return invariantvertex(NoParams(p), in), p
end
conv3x3(inpt::AbstractVertex, nch::Integer) = fluxvertex(Conv((3,3), nout(inpt)=>nch, pad=(1,1)), inpt)
batchnorm(inpt) = fluxvertex(BatchNorm(nout(inpt)), inpt)
mmaxpool(inpt) = fluxvertex(MaxPool((2,2)), inpt)
@testset "Residual Conv block" begin
inpt = inputvertex("in", 3)
conv1 = conv3x3(inpt, 5)
pv1, p1 = probe(conv1)
bn1 = batchnorm(pv1)
conv2 = conv3x3(bn1, 5)
pv2, p2 = probe(conv2)
bn2 = batchnorm(pv2)
add = bn2 + bn1
mp = mmaxpool(add)
out = conv3x3(mp, 3)
graph = CompGraph([inpt], [out])
# Test that the graph works to begin with
indata = reshape(collect(Float32, 1:3*4*4), 4, 4, 3, 1)
@test size(graph(indata)) == (2, 2, 3, 1)
@test size(p1.activation) == (4, 4, 5, 1)
@test size(p2.activation) == (4, 4, 5, 1)
Δnin!(v -> 1:nout(v), out => -1)
@test size(graph(indata)) == (2, 2, 3, 1)
@test size(p1.activation) == (4, 4, 4, 1)
@test size(p2.activation) == (4, 4, 4, 1)
end
@testset "Residual fork Conv block" begin
inpt = inputvertex("in", 3)
conv1 = conv3x3(inpt, 5)
pv1, p1 = probe(conv1)
bn1 = batchnorm(pv1)
conv1a = conv3x3(bn1, 3)
pv1a, p1a = probe(conv1a)
bn1a = batchnorm(pv1a)
conv1b = conv3x3(bn1, 2)
pv1b, p1b = probe(conv1b)
bn1b = batchnorm(pv1b)
mv = concat(bn1a, bn1b)
add = mv + bn1
mp = mmaxpool(add)
out = conv3x3(mp, 3)
graph = CompGraph([inpt], [out])
# Test that the graph works to begin with
indata = reshape(collect(Float32, 1:3*4*4), 4, 4, 3, 1)
@test size(graph(indata)) == (2, 2, 3, 1)
@test size(p1.activation) == (4, 4, 5, 1)
@test size(p1a.activation) == (4, 4, 3, 1)
@test size(p1b.activation) == (4, 4, 2, 1)
Δnin!(v -> 1:nout(v), out => -1)
@test size(graph(indata)) == (2, 2, 3, 1)
@test size(p1.activation) == (4, 4, 4, 1)
@test size(p1a.activation) == (4, 4, 2, 1)
@test size(p1b.activation) == (4, 4, 2, 1)
end
rnnvertex(inpt, outsize) = fluxvertex("rnn", RNN(nout(inpt), outsize), inpt)
densevertex(inpt, outsize) = fluxvertex("dense", Dense(nout(inpt), outsize), inpt)
@testset "RNN to Dense" begin
inpt = inputvertex("in", 4)
rnn = rnnvertex(inpt, 5)
pv, p = probe(rnn)
dnn = densevertex(pv, 3)
graph = CompGraph([inpt], [dnn])
indata = [collect(Float32, 1:nin(rnn)[]) for i =1:10]
@test size(hcat(graph.(indata)...)) == (3,10)
@test size(p.activation) == (5,)
Δnin!(v -> 1:nout(v), dnn, 1)
@test size(hcat(graph.(indata)...)) == (3,10)
@test size(p.activation) == (6,)
Δnout!(v -> 1:nout(v), rnn, -2)
@test size(hcat(graph.(indata)...)) == (3,10)
@test size(p.activation) == (4,)
end
end
end
@testset "functor" begin
import Functors: functor, fmap
import Flux: params
import NaiveNASflux: weights, bias, FluxDense
inpt = inputvertex("in", 2, FluxDense())
v1 = fluxvertex(Dense(2, 3), inpt)
v2 = fluxvertex(Dense(3, 4), v1)
v3 = concat(v2, v1)
v4 = fluxvertex(Dense(nout(v3), 2), v3)
g1 = CompGraph(inpt, v4)
pars1 = params(g1).order
@test pars1[5] == weights(layer(v1))
@test pars1[6] == bias(layer(v1))
@test pars1[3] == weights(layer(v2))
@test pars1[4] == bias(layer(v2))
@test pars1[1] == weights(layer(v4))
@test pars1[2] == bias(layer(v4))
# Basically what Flux.gpu does except function is CuArrays.cu(x) instead of 2 .* x
testfun(x) = x
testfun(x::AbstractArray) = 2 .* x
g2 = fmap(testfun, g1)
pars2 = params(g2).order.data
@test pars2 == 2 .* pars1
indata = randn(nout(inpt), 1)
g3 = Flux.f64(g1)
@test g3(indata) ≈ g1(Float32.(indata)) rtol=1e-6
end
@testset "Trainable insert values" begin
using Random
import NaiveNASflux: weights, bias
import Flux: params, Descent, train!, mse
@testset "Dense-Dense-Dense" begin
Random.seed!(0)
iv = denseinputvertex("in", 3)
v1 = fluxvertex("v1", Dense(3,3), iv)
v2 = fluxvertex("v2", Dense(3,4), v1)
v3 = fluxvertex("v3", Dense(4,2), v2)
g = CompGraph(iv, v3)
indata = randn(Float32, 3,4)
expectedout = g(indata)
Δnout!(v -> 1, v1 => 2)
NaiveNASflux.forcemutation(g)
@test g(indata) ≈ expectedout
train!((g,x,y) -> mse(g(x), y), g, [(randn(Float32, nin(v1)[],8), randn(Float32, nout(v3) ,8))], Descent(0.5))
@test minimum(abs.(weights(layer(v1)))) > 0
@test minimum(abs.(weights(layer(v2)))) > 0
@test minimum(abs.(weights(layer(v3)))) > 0
end
@testset "Conv-Bn-Conv" begin
import NaiveNASflux: FluxConv
Random.seed!(0)
iv = inputvertex("in", 2, FluxConv{2}())
v1 = fluxvertex("v1", Conv((1,1), 2 => 2), iv)
v2 = fluxvertex("v2", BatchNorm(2), v1)
v3 = fluxvertex("v3", Conv((1,1), 2 => 2), v2)
g = CompGraph(iv, v3)
indata = randn(Float32, 2,2,2,8)
expectedout = g(indata)
Δnout!(v -> 1, v1 => 2)
NaiveNASflux.forcemutation(g)
@test g(indata) == expectedout
train!((g,x,y) -> mse(g(x), y), g, [(randn(Float32,2,2,2,8), randn(Float32,2,2,2,8))], Descent(0.5))
@test minimum(abs.(weights(layer(v1)))) > 0
@test minimum(abs.(weights(layer(v3)))) > 0
end
@testset "Conv-Conv-Conv" begin
import NaiveNASflux: FluxConv
Random.seed!(0)
iv = inputvertex("in", 2, FluxConv{2}())
v1 = fluxvertex("v1", Conv((1,1), 2 => 2), iv; layerfun=ActivationContribution ∘ LazyMutable)
v2 = fluxvertex("v2", Conv((1,1), 2 => 2), v1; layerfun=ActivationContribution ∘ LazyMutable)
v3 = fluxvertex("v3", Conv((1,1), 2 => 2), v2; layerfun=ActivationContribution ∘ LazyMutable)
g = CompGraph(iv, v3)
indata = randn(Float32, 2,2,2,8)
expectedout = g(indata)
Δnout!(v->1, v1 => 2, v2 => 1)
NaiveNASflux.forcemutation(g)
@test g(indata) == expectedout
train!((g,x,y) -> mse(g(x), y), g, [(randn(Float32,2,2,2,8), randn(Float32,2,2,2,8))], Descent(0.5))
@test minimum(abs.(weights(layer(v1)))) > 0
@test minimum(abs.(weights(layer(v2)))) > 0
@test minimum(abs.(weights(layer(v3)))) > 0
end
end
@testset "setlayer" begin
v = fluxvertex(Dense(3, 4, Flux.relu), inputvertex("in", 3))
NaiveNASflux.setlayer!(v, (;σ=tanh))
@test layer(v).σ == tanh
end
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 2735 | md"""
# Quick Tutorial
Check out the basic usage of [NaiveNASlib](https://github.com/DrChainsaw/NaiveNASlib.jl) for less verbose examples.
Here is a quick rundown of some common operations.
"""
@testset "Quick tutorial" begin #src
using NaiveNASflux, Flux, Test
# Create an input vertex which tells its output vertices that they can expect 2D convolutional input (i.e 4D arrays).
invertex = conv2dinputvertex("in", 3)
# Vertex type for Flux-layers is automatically inferred through [`fluxvertex`](@ref).
conv = fluxvertex(Conv((3,3), 3 => 5, pad=(1,1)), invertex)
batchnorm = fluxvertex(BatchNorm(nout(conv), relu), conv)
# Explore the graph.
@test inputs(conv) == [invertex]
@test outputs(conv) == [batchnorm]
@test nin(conv) == [3]
@test nout(conv) == 5
@test layer(conv) isa Flux.Conv
@test layer(batchnorm) isa Flux.BatchNorm
# Naming vertices is a good idea for debugging and logging purposes.
namedconv = fluxvertex("namedconv", Conv((5,5), 3=>7, pad=(2,2)), invertex)
@test name(namedconv) == "namedconv"
# Concatenate activations. Dimension is automatically inferred.
conc = concat("conc", namedconv, batchnorm)
@test nout(conc) == nout(namedconv) + nout(batchnorm)
# No problem to combine with convenience functions from NaiveNASlib.
residualconv = fluxvertex("residualconv", Conv((3,3), nout(conc) => nout(conc), pad=(1,1)), conc)
add = "add" >> conc + residualconv
@test name(add) == "add"
@test inputs(add) == [conc, residualconv]
# Computation graph for evaluation. It is basically a more general version of `Flux.Chain`.
graph = CompGraph(invertex, add)
# Access the vertices of the graph.
@test vertices(graph) == [invertex, namedconv, conv, batchnorm, conc, residualconv, add]
# `CompGraph`s can be evaluated just like any function.
x = ones(Float32, 7, 7, nout(invertex), 2)
@test size(graph(x)) == (7, 7, nout(add), 2) == (7 ,7, 12 ,2)
# Mutate number of neurons.
@test nout(add) == nout(residualconv) == nout(conv) + nout(namedconv) == 12
Δnout!(add => -3)
@test nout(add) == nout(residualconv) == nout(conv) + nout(namedconv) == 9
# Remove a layer.
@test nvertices(graph) == 7
remove!(batchnorm)
@test nvertices(graph) == 6
# Add a layer.
insert!(residualconv, v -> fluxvertex(BatchNorm(nout(v), relu), v))
@test nvertices(graph) == 7
# Change kernel size (and supply new padding).
namedconv |> KernelSizeAligned(-2,-2; pad=SamePad())
# Note: Parameters not changed yet...
@test size(NaiveNASflux.weights(layer(namedconv))) == (5, 5, 3, 7)
# ... because mutations are lazy by default so that no new parameters are created until the graph is evaluated.
@test size(graph(x)) == (7, 7, nout(add), 2) == (7, 7, 9, 2)
@test size(NaiveNASflux.weights(layer(namedconv))) == (3, 3, 3, 4)
end #src
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | code | 2904 | md"""
# Model Pruning Example
While NaiveNASflux does not come with any built in search policies, it is still possible to do some cool stuff with it.
Below is a very simple example of parameter pruning.
First we need some boilerplate to create the model and do the training:
"""
@testset "Pruning xor example" begin #src
using NaiveNASflux, Flux, Test
using Flux: train!, mse
import Random
Random.seed!(0)
niters = 50
# To cut down on the verbosity, start by making a helper function for creating a Dense layer as a graph vertex.
# The keyword argument `layerfun=`[`ActivationContribution`](@ref) will wrap the layer and compute an activity
# based neuron utility metric for it while the model trains.
densevertex(in, outsize, act) = fluxvertex(Dense(nout(in),outsize, act), in, layerfun=ActivationContribution)
# Ok, lets create the model and train it. We overparameterize quite heavily to avoid sporadic test failures :)
invertex = denseinputvertex("input", 2)
layer1 = densevertex(invertex, 32, relu)
layer2 = densevertex(layer1, 1, sigmoid)
original = CompGraph(invertex, layer2)
## Training params, nothing to see here
loss(f, x, y) = mse(f(x), y)
## Training data: xor truth table: y = xor(x) just so we don't need to download a dataset.
x = Float32[0 0 1 1;
0 1 0 1]
y = Float32[0 1 1 0]
trainiter = Iterators.repeated((x,y), niters)
## Train the model
train!(loss, original, trainiter, Flux.setup(Adam(0.1), original))
@test loss(original, x, y) < 0.001
# With that out of the way, lets try three different ways to prune the hidden layer (vertex nr 2 in the graph).
# To make examples easier to compare, lets decide up front that we want to remove half of the hidden layer neurons
# and try out three different ways of how to select which ones to remove.
nprune = 16
# Prune the neurons with lowest utility according to the metric in [`ActivationContribution`](@ref).
# This is the default if no utility function is provided.
pruned_least = deepcopy(original)
Δnout!(pruned_least[2] => -nprune)
# Prune the neurons with higest utility according to the metric in [`ActivationContribution`](@ref).
# This is obviously not a good idea if you want to preserve the accuracy.
pruned_most = deepcopy(original)
Δnout!(pruned_most[2] => -nprune) do v
vals = NaiveNASlib.defaultutility(v)
return 2*sum(vals) .- vals # Ensure all values are still > 0, even for last vertex
end
# Prune randomly selected neurons by giving random utility.
pruned_random = deepcopy(original)
Δnout!(v -> rand(nout(v)), pruned_random[2] => -nprune)
# Free lunch anyone?
@test loss(pruned_most, x, y) >
loss(pruned_random, x, y) >
loss(pruned_least, x, y) >=
loss(original, x, y)
# The metric calculated by [`ActivationContribution`](@ref) is actually quite good in this case.
@test loss(pruned_least, x, y) ≈ loss(original, x, y) atol = 1e-5
end #src
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | docs | 1450 | # NaiveNASflux
[](https://DrChainsaw.github.io/NaiveNASflux.jl/stable)
[](https://DrChainsaw.github.io/NaiveNASflux.jl/dev)
[](https://github.com/DrChainsaw/NaiveNASflux.jl/actions)
[](https://ci.appveyor.com/project/DrChainsaw/NaiveNASflux-jl)
[](https://codecov.io/gh/DrChainsaw/NaiveNASflux.jl)
NaiveNASflux uses [NaiveNASlib](https://github.com/DrChainsaw/NaiveNASlib.jl) to enable mutation operations of arbitrary [Flux](https://github.com/FluxML/Flux.jl) computation graphs. It is designed with Neural Architecture Search (NAS) in mind, but can be used for any purpose where doing changes to a model is desired.
Note that NaiveNASflux does not have any functionality to search for a model architecture. Check out [NaiveGAflux](https://github.com/DrChainsaw/NaiveGAflux.jl) for a simple proof of concept.
## Basic Usage
```julia
]add NaiveNASflux
```
See [documentation](https://DrChainsaw.github.io/NaiveNASflux.jl/stable) for usage instructions.
## Contributing
All contributions are welcome. Please file an issue before creating a PR.
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | docs | 864 | # Introduction
NaiveNASflux is an extension of [NaiveNASlib](https://github.com/DrChainsaw/NaiveNASlib.jl) which adds primitives for [Flux](https://github.com/FluxML/Flux.jl) layers so that they can be used in a computation graph which NaiveNASlib can modify. Apart from this, it adds very little new functionality.
## Reading Guideline
Due to how NaiveNASflux just glues Flux and NaiveNASlib, most of the things one can use NaiveNASflux for is described in the [documentation for NaiveNASlib](https://DrChainsaw.github.io/NaiveNASlib.jl/stable).
The [Quick Tutorial](@ref) gives an overview of some basic operations while the [Model Pruning Example](@ref) show simple usage without bringing in full fledged neural architecture search.
The API reference is split up into categories in an attempt to make it easy to answer "how do I achieve X?"-type questions. | NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | docs | 276 | # Vertex Creation
The functions added by NaiveNASflux are basically Flux-tailored convenience wrappers around those exported by NaiveNASlib.
```@docs
denseinputvertex
rnninputvertex
conv1dinputvertex
conv2dinputvertex
conv3dinputvertex
convinputvertex
fluxvertex
concat
```
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | docs | 285 | # Layer Wrappers
NaiveNASflux wraps Flux layers in mutable wrapper types by default so that the vertex operations can be mutated without having to recreate the whole model. Additional wrappers which might be useful are described here.
```@docs
ActivationContribution
LazyMutable
```
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 2.0.12 | 49f3d06de40de20ff0d8c7d855bd461d5061e96d | docs | 103 | # Misc. Utilities
```@docs
layer
```
```@docs
KernelSizeAligned
```
```@docs
NeuronUtilityEvery
```
| NaiveNASflux | https://github.com/DrChainsaw/NaiveNASflux.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 749 | module NonconvexUtils
export forwarddiffy,
abstractdiffy,
AD,
TraceFunction,
CustomGradFunction,
LazyJacobian,
CustomHessianFunction,
ImplicitFunction,
sparsify,
symbolify
using ChainRulesCore, AbstractDifferentiation, ForwardDiff, LinearAlgebra
using Zygote, LinearMaps, IterativeSolvers, NonconvexCore, SparseArrays
using NonconvexCore: flatten, tovecfunc, _sparsevec, _sparse_reshape
using MacroTools
using Symbolics: Symbolics
using SparseDiffTools: SparseDiffTools
include("forwarddiff_frule.jl")
include("abstractdiff.jl")
include("lazy.jl")
include("trace.jl")
include("custom.jl")
include("implicit.jl")
include("symbolic.jl")
include("sparse_forwarddiff.jl")
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 2766 | struct AbstractDiffFunction{F, B} <: Function
f::F
backend::B
end
ForwardDiffFunction(f) = AbstractDiffFunction(f, AD.ForwardDiffBackend())
(f::AbstractDiffFunction)(x) = f.f(x)
function ChainRulesCore.rrule(
f::AbstractDiffFunction, x::AbstractVector,
)
v, (∇,) = AbstractDifferentiation.value_and_jacobian(f.backend, f.f, x)
return v, Δ -> (NoTangent(), ∇' * Δ)
end
function ChainRulesCore.frule(
(_, Δx), f::AbstractDiffFunction, x::AbstractVector,
)
v, (∇,) = AbstractDifferentiation.value_and_jacobian(f.backend, f.f, x)
return v, ∇ * Δx
end
@ForwardDiff_frule (f::AbstractDiffFunction)(x::AbstractVector{<:ForwardDiff.Dual})
# does not assume vector input and output
forwarddiffy(f_or_m, x...) = abstractdiffy(f_or_m, AD.ForwardDiffBackend(), x...)
function abstractdiffy(f, backend, x...)
flat_f, _, unflatteny = tovecfunc(f, x...)
ad_flat_f = AbstractDiffFunction(flat_f, backend)
return (x...,) -> unflatteny(ad_flat_f(flatten(x)[1]))
end
function abstractdiffy(model::NonconvexCore.AbstractModel, backend; objective = true, ineq_constraints = true, eq_constraints = true, sd_constraints = true)
x = getmin(model)
if objective
obj = NonconvexCore.Objective(abstractdiffy(model.objective, backend, x), flags = model.objective.flags)
else
obj = model.objective
end
if ineq_constraints
ineq = length(model.ineq_constraints.fs) != 0 ? NonconvexCore.VectorOfFunctions(map(model.ineq_constraints.fs) do c
return NonconvexCore.IneqConstraint(abstractdiffy(c, backend, x), c.rhs, c.dim, c.flags)
end) : NonconvexCore.VectorOfFunctions(NonconvexCore.IneqConstraint[])
else
ineq = model.ineq_constraints
end
if eq_constraints
eq = length(model.eq_constraints.fs) != 0 ? NonconvexCore.VectorOfFunctions(map(model.eq_constraints.fs) do c
return NonconvexCore.EqConstraint(abstractdiffy(c, backend, x), c.rhs, c.dim, c.flags)
end) : NonconvexCore.VectorOfFunctions(NonconvexCore.EqConstraint[])
else
eq = model.eq_constraints
end
if sd_constraints
sd = length(model.sd_constraints.fs) != 0 ? NonconvexCore.VectorOfFunctions(map(model.sd_constraints.fs) do c
return NonconvexCore.SDConstraint(abstractdiffy(c, backend, x), c.dim)
end) : NonconvexCore.VectorOfFunctions(NonconvexCore.SDConstraint[])
else
sd = model.sd_constraints
end
if model isa NonconvexCore.Model
ModelT = NonconvexCore.Model
elseif model isa NonconvexCore.DictModel
ModelT = NonconvexCore.DictModel
else
throw("Unsupported model type.")
end
return ModelT(obj, eq, ineq, sd, model.box_min, model.box_max, model.init, model.integer)
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 2705 | struct CustomGradFunction{F, G} <: Function
f::F
g::G
end
(f::CustomGradFunction)(x::AbstractVector) = f.f(x)
function ChainRulesCore.rrule(f::CustomGradFunction, x::AbstractVector)
v = f.f(x)
return v, Δ -> begin
if f.g === nothing
if v isa Real
G = spzeros(eltype(v), length(x))
else
G = spzeros(eltype(v), length(v), length(x))
end
else
G = f.g(x)
end
if G isa AbstractVector
return (NoTangent(), G * Δ)
elseif G isa LazyJacobian
return (NoTangent(), G' * Δ)
else
spΔ = dropzeros!(sparse(copy(Δ)))
if length(spΔ.nzval) == 1
return (NoTangent(), G[spΔ.nzind[1], :] * spΔ.nzval[1])
else
return (NoTangent(), G' * Δ)
end
end
end
end
function ChainRulesCore.frule(
(_, Δx), f::CustomGradFunction, x::AbstractVector,
)
v = f.f(x)
if f.g === nothing
if v isa Real
∇ = spzeros(eltype(v), 1, length(x))
else
∇ = spzeros(eltype(v), length(v), length(x))
end
else
∇ = f.g(x)
end
project_to = ProjectTo(v)
if ∇ isa AbstractVector && Δx isa AbstractVector
if !(∇ isa LazyJacobian) && issparse(∇) && nnz(∇) == 0
return v, project_to(zero(eltype(Δx)))
else
return v, project_to(∇' * Δx)
end
else
if !(∇ isa LazyJacobian) && issparse(∇) && nnz(∇) == 0
return v, project_to(spzeros(eltype(Δx), size(∇, 1)))
else
return v, project_to(_sparse_reshape(∇ * Δx, size(v)...))
end
end
end
@ForwardDiff_frule (f::CustomGradFunction)(x::AbstractVector{<:ForwardDiff.Dual})
struct CustomHessianFunction{F, G, H} <: Function
f::F
g::G
h::H
function CustomHessianFunction(
f::F, g::G, h::H; hvp = false,
) where {F, G, H}
_h = hvp ? x -> LazyJacobian{true}(v -> h(x, v)) : h
return new{F, G, typeof(_h)}(f, g, _h)
end
end
(to::CustomHessianFunction)(x) = to.f(x)
function ChainRulesCore.rrule(f::CustomHessianFunction, x)
g = CustomGradFunction(f.g, f.h)
G = g(x)
return f(x), Δ -> (NoTangent(), G * Δ)
end
function ChainRulesCore.frule(
(_, Δx), f::CustomHessianFunction, x::AbstractVector,
)
g = CustomGradFunction(f.g, f.h)
v, ∇ = f(x), g(x)
project_to = ProjectTo(v)
if ∇ isa AbstractVector && Δx isa AbstractVector
return v, project_to(∇' * Δx)
else
return v, project_to(∇ * Δx)
end
end
@ForwardDiff_frule (f::CustomHessianFunction)(x::AbstractVector{<:ForwardDiff.Dual})
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 1462 | macro ForwardDiff_frule(sig)
_fd_frule(sig)
end
function _fd_frule(sig)
MacroTools.@capture(sig, f_(x__))
return quote
function $(esc(f))($(esc.(x)...))
f = $(esc(f))
x = ($(esc.(x)...),)
flatx, unflattenx = NonconvexCore.flatten(x)
CS = length(ForwardDiff.partials(first(flatx)))
flat_xprimals = ForwardDiff.value.(flatx)
flat_xpartials = reduce(vcat, transpose.(ForwardDiff.partials.(flatx)))
xprimals = unflattenx(flat_xprimals)
xpartials1 = unflattenx(flat_xpartials[:,1])
yprimals, ypartials1 = ChainRulesCore.frule(
(NoTangent(), xpartials1...), f, xprimals...,
)
flat_yprimals, unflatteny = NonconvexCore.flatten(yprimals)
flat_ypartials1, _ = NonconvexCore.flatten(ypartials1)
flat_ypartials = hcat(reshape(flat_ypartials1, :, 1), ntuple(Val(CS - 1)) do i
xpartialsi = unflattenx(flat_xpartials[:, i+1])
_, ypartialsi = ChainRulesCore.frule((NoTangent(), xpartialsi...), f, xprimals...)
return NonconvexCore.flatten(ypartialsi)[1]
end...)
T = ForwardDiff.tagtype(eltype(flatx))
flaty = ForwardDiff.Dual{T}.(
flat_yprimals, ForwardDiff.Partials.(NTuple{CS}.(eachrow(flat_ypartials))),
)
return unflatteny(flaty)
end
end
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 5793 | # Parameters x, variables y, residuals f
struct ImplicitFunction{matrixfree, F, C, L, T} <: Function
# A function which takes x as input and returns a tuple (ystar, df/dy) such that f(x, ystar) = 0. df/dy is optional and can be replaced by nothing to compute it via automatic differentiation. Jacobian should only be returned if it's more cheaply available than using AD, e.g. when using BFGS approximation of the Hessian in IPOPT.
forward::F
# The conditions function f(x, y) which must be 0 at ystar. Note that variables which don't show up in x and are closed over instead will be assumed to have no effect on the optimal solution. So it's the user's responsibility to ensure x includes all the interesting variables to be differentiated with respect to.
conditions::C
# A linear system solver to solve df/dy' \ v
linear_solver::L
# The acceptable tolerance for f(x, ystar) to use the implicit function theorem at x
tol::T
# A booolean to decide whether or not to error if the tolerance is violated, i.e. norm(f(x, ystar)) > tol. If false, we return a gradient of NaNs.
error_on_tol_violation::Bool
end
function ImplicitFunction(
forward::F, conditions::C; tol::T = 1e-5, error_on_tol_violation = false, matrixfree = false, linear_solver::L = _default_solver(matrixfree),
) where {F, C, L, T}
return ImplicitFunction{matrixfree, F, C, L, T}(
forward, conditions, linear_solver, tol, error_on_tol_violation,
)
end
function _default_solver(matrixfree)
if matrixfree
return (A, b) -> begin
L = LinearMap(A, length(b))
return gmres(L, b)
end
else
return (A, b) -> A \ b
end
end
(f::ImplicitFunction)(x) = f.forward(x)[1]
(f::ImplicitFunction)() = f.forward()[1]
function ChainRulesCore.rrule(
rc::RuleConfig, f::ImplicitFunction{matrixfree}, x,
) where {matrixfree}
ystar, _dfdy = f.forward(x)
flat_ystar, unflatten_y = flatten(ystar)
forward_returns_jacobian = _dfdy !== nothing
if forward_returns_jacobian
dfdy = _dfdy
if matrixfree
# y assumed flat if dfdy is passed in
pby = v -> dfdy' * v
else
pby = nothing
end
else
_conditions_y = flat_y -> begin
return flatten(f.conditions(x, unflatten_y(flat_y)))[1]
end
if matrixfree
dfdy = nothing
_, _pby = rrule_via_ad(rc, _conditions_y, flat_ystar)
pby = v -> _pby(v)[2]
else
# Change this to AbstractDifferentiation
dfdy = Zygote.jacobian(_conditions_y, flat_ystar)[1]
pby = nothing
end
end
_conditions_x = (conditions, x) -> begin
return flatten(conditions(x, ystar))[1]
end
residual, pbx = rrule_via_ad(rc, _conditions_x, f.conditions, x)
return ystar, ∇ -> begin
if norm(residual) > f.tol && f.error_on_tol_violation
throw(ArgumentError("The acceptable tolerance for the implicit function theorem is not satisfied for the current problem. Please double check your function definition, increase the tolerance, or set `error_on_tol_violation` to false to ignore the violation and return `NaN`s for the gradient."))
end
if matrixfree
∇f, ∇x = Base.tail(pbx(f.linear_solver(pby, -flatten(∇)[1])))
else
∇f, ∇x = Base.tail(pbx(f.linear_solver(dfdy', -flatten(∇)[1])))
end
∇imf = Tangent{typeof(f)}(
conditions = Tangent{typeof(f.conditions)}(;
ChainRulesCore.backing(∇f)...,
),
)
if norm(residual) <= f.tol
return (∇imf, ∇x)
else
return (nanlike(∇imf), nanlike(∇x))
end
end
end
function ChainRulesCore.rrule(
rc::RuleConfig, f::ImplicitFunction{matrixfree},
) where {matrixfree}
ystar, _dfdy = f.forward()
flat_ystar, unflatten_y = flatten(ystar)
forward_returns_jacobian = _dfdy !== nothing
if forward_returns_jacobian
dfdy = _dfdy
if matrixfree
# y assumed flat if dfdy is passed in
pby = v -> dfdy' * v
else
pby = nothing
end
else
_conditions_y = flat_y -> begin
return flatten(f.conditions(unflatten_y(flat_y)))[1]
end
if matrixfree
dfdy = nothing
_, _pby = rrule_via_ad(rc, _conditions_y, flat_ystar)
pby = v -> _pby(v)[2]
else
# Change this to AbstractDifferentiation
dfdy = Zygote.jacobian(_conditions_y, flat_ystar)[1]
pby = nothing
end
end
_conditions = (conditions) -> begin
return flatten(conditions(ystar))[1]
end
residual, pbf = rrule_via_ad(rc, _conditions, f.conditions)
return ystar, ∇ -> begin
if norm(residual) > f.tol && f.error_on_tol_violation
throw(ArgumentError("The acceptable tolerance for the implicit function theorem is not satisfied for the current problem. Please double check your function definition, increase the tolerance, or set `error_on_tol_violation` to false to ignore the violation and return `NaN`s for the gradient."))
end
if matrixfree
∇f = pbf(f.linear_solver(pby, -flatten(∇)[1]))[2]
else
∇f = pbf(f.linear_solver(dfdy', -flatten(∇)[1]))[2]
end
∇imf = Tangent{typeof(f)}(
conditions = Tangent{typeof(f.conditions)}(;
ChainRulesCore.backing(∇f)...,
),
)
if norm(residual) <= f.tol
return (∇imf,)
else
return (nanlike(∇imf),)
end
end
end
function nanlike(x)
flat, un = flatten(x)
return un(similar(flat) .= NaN)
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 1364 | struct LazyJacobian{symmetric, J1, J2}
jvp::J1
jtvp::J2
end
function LazyJacobian(; jvp=nothing, jtvp=nothing, symmetric=false)
return LazyJacobian{symmetric}(jvp, jtvp)
end
function LazyJacobian{symmetric}(jvp = nothing, jtvp = nothing) where {symmetric}
if jvp === jtvp === nothing
throw(ArgumentError("Both the jvp and jtvp operators cannot be nothing."))
end
if symmetric
if jvp !== nothing
_jtvp = _jvp = jvp
else
_jvp = _jtvp = jtvp
end
else
_jvp = jvp
_jtvp = jtvp
end
return LazyJacobian{symmetric, typeof(_jvp), typeof(_jtvp)}(_jvp, _jtvp)
end
struct LazyJacobianTransposed{J}
j::J
end
LinearAlgebra.adjoint(j::LazyJacobian{false}) = LazyJacobianTransposed(j)
LinearAlgebra.transpose(j::LazyJacobian{false}) = LazyJacobianTransposed(j)
LinearAlgebra.adjoint(j::LazyJacobian{true}) = j
LinearAlgebra.transpose(j::LazyJacobian{true}) = j
LinearAlgebra.adjoint(j::LazyJacobianTransposed) = j.j
LinearAlgebra.transpose(j::LazyJacobianTransposed) = j.j
LinearAlgebra.:*(j::LazyJacobian, v::AbstractVecOrMat) = j.jvp(v)
LinearAlgebra.:*(v::AbstractVecOrMat, j::LazyJacobian) = j.jtvp(v')'
LinearAlgebra.:*(j::LazyJacobianTransposed, v::AbstractVecOrMat) = (v' * j')'
LinearAlgebra.:*(v::AbstractVecOrMat, j::LazyJacobianTransposed) = (j' * v')'
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 7416 | struct SparseForwardDiffFunction{F, F!, Y, J, JP, JC, JJ, JJ!, G, HH1, HP, HC, HH2} <: Function
f::F
f!::F!
y::Y
jac::J
jac_pattern::JP
jac_colors::JC
J::JJ
vecJ!::JJ!
vecJ::G
hess::HH1
hess_pattern::HP
hess_colors::HC
H::HH2
end
function SparseForwardDiffFunction(f, x::AbstractVector; hessian = false, jac_pattern = nothing, hess_pattern = nothing)
val = f(x)
_f = x -> _sparsevec(f(x))
f! = (y, x) -> begin
v = f(x)
y .= v
return y
end
y = _sparsevec(val)
jac_pattern = jac_pattern === nothing ? Symbolics.jacobian_sparsity(f!, y, x) : jac_pattern
if nnz(jac_pattern) > 0
jac = float.(jac_pattern)
jac_colors = SparseDiffTools.matrix_colors(jac)
else
T = eltype(y)
jac = sparse(Int[], Int[], T[], length(y), length(x))
jac_colors = Int[]
end
vecJ! = (G, x) -> begin
_jac = SparseDiffTools.forwarddiff_color_jacobian(_f, x, colorvec = jac_colors)
G .= _sparsevec(_jac)
return G
end
G = vec(Array(jac))
J = x -> begin
xT = eltype(x)
if length(jac.nzval) > 0
_jac = SparseDiffTools.forwarddiff_color_jacobian(_f, x, colorvec = jac_colors, sparsity = jac, jac_prototype = xT.(jac))
project_to = ChainRulesCore.ProjectTo(jac)
return project_to(copy(_jac))
else
return sparse(Int[], Int[], xT[], size(jac)...)
end
end
if hessian
hess_pattern = hess_pattern === nothing ? Symbolics.jacobian_sparsity(vecJ!, G, x) : hess_pattern
if nnz(hess_pattern) > 0
hess = float.(hess_pattern)
hess_colors = SparseDiffTools.matrix_colors(hess)
_J = x -> _sparsevec(J(x))
H = x -> begin
_hess = SparseDiffTools.forwarddiff_color_jacobian(_J, x, colorvec = hess_colors, sparsity = hess_pattern, jac_prototype = hess)
project_to = ChainRulesCore.ProjectTo(hess)
return project_to(copy(_hess))
end
else
T = eltype(G)
hess = sparse(Int[], Int[], T[], length(x), length(x))
hess_colors = Int[]
H = x -> hess
end
else
hess = nothing
hess_colors = nothing
H = nothing
end
return SparseForwardDiffFunction(f, f!, y, jac, jac_pattern, jac_colors, J, vecJ!, G, hess, hess_pattern, hess_colors, H)
end
(f::SparseForwardDiffFunction)(x) = f.f(x)
function ChainRulesCore.rrule(f::SparseForwardDiffFunction, x::AbstractVector)
if f.H === nothing
J = f.J
else
J = SparseForwardDiffFunction(f.J, f.vecJ!, f.vecJ, f.hess, f.hess_pattern, f.hess_colors, f.H, nothing, nothing, nothing, nothing, nothing, nothing)
end
val = f(x)
jac = J(x)
if eltype(f.jac) === eltype(jac)
nograd_cache!(f.jac, jac)
end
return val, Δ -> begin
if val isa Real
(NoTangent(), jac' * Δ)
else
spΔ = dropzeros!(sparse(_sparsevec(copy(Δ))))
if length(spΔ.nzval) == 1
(NoTangent(), jac[spΔ.nzind[1], :] * spΔ.nzval[1])
else
(NoTangent(), jac' * spΔ)
end
end
end
end
function ChainRulesCore.frule((_, Δx), f::SparseForwardDiffFunction, x::AbstractVector)
if f.H === nothing
J = f.J
else
J = SparseForwardDiffFunction(f.J, f.vecJ!, f.vecJ, f.hess, f.hess_pattern, f.hess_colors, f.H, nothing, nothing, nothing, nothing, nothing, nothing)
end
val = f(x)
jac = J(x)
if eltype(f.jac) === eltype(jac)
nograd_cache!(f.jac, jac)
end
if val isa Real
Δy = only(jac * Δx)
elseif val isa AbstractVector
spΔx = dropzeros!(sparse(_sparsevec(copy(Δx))))
if length(spΔx.nzval) == 1
Δy = jac[:, spΔx.nzind[1]] * spΔx.nzval[1]
else
Δy = jac * spΔx
end
else
spΔx = dropzeros!(sparse(_sparsevec(copy(Δx))))
Δy = _sparse_reshape(jac * spΔx, size(val)...)
end
project_to = ChainRulesCore.ProjectTo(val)
return val, project_to(Δy)
end
@ForwardDiff_frule (f::SparseForwardDiffFunction)(x::AbstractVector{<:ForwardDiff.Dual})
function nograd_cache!(A, B)
A .= B
return A
end
function ChainRulesCore.frule(_, ::typeof(nograd_cache!), A, B)
nograd_cache!(A, B), NoTangent()
end
function ChainRulesCore.rrule(::typeof(nograd_cache!), A, B)
nograd_cache!(A, B), _ -> (NoTangent(), NoTangent(), NoTangent())
end
struct UnflattennedFunction{F1, F2, V, U} <: Function
f::F1
flat_f::F2
v::V
unflatten::U
flatteny::Bool
end
(f::UnflattennedFunction)(x...) = f.f(x...)
function NonconvexCore.tovecfunc(f::UnflattennedFunction, x...; flatteny = true)
@assert flatteny == f.flatteny
return f.flat_f, f.v, f.unflatten
end
function sparsify(f, x...; flatteny = true, kwargs...)
flat_f, vx, unflatteny = tovecfunc(f, x...; flatteny)
if length(x) == 1 && x[1] isa AbstractVector
flat_f = f
sp_flat_f = SparseForwardDiffFunction(flat_f, vx; kwargs...)
return UnflattennedFunction(
x -> unflatteny(sp_flat_f(x)),
sp_flat_f,
vx,
unflatteny,
flatteny,
)
else
sp_flat_f = SparseForwardDiffFunction(flat_f, vx; kwargs...)
return UnflattennedFunction(
x -> unflatteny(sp_flat_f(flatten(x)[1])),
sp_flat_f,
vx,
unflatteny,
flatteny,
)
end
end
function sparsify(model::NonconvexCore.AbstractModel; objective = true, ineq_constraints = true, eq_constraints = true, sd_constraints = true, kwargs...)
x = getmin(model)
if objective
obj = NonconvexCore.Objective(sparsify(model.objective.f, x; kwargs...), model.objective.multiple, model.objective.flags)
else
obj = model.objective
end
if ineq_constraints
ineq = length(model.ineq_constraints.fs) != 0 ? NonconvexCore.VectorOfFunctions(map(model.ineq_constraints.fs) do c
return NonconvexCore.IneqConstraint(sparsify(c.f, x; kwargs...), c.rhs, c.dim, c.flags)
end) : NonconvexCore.VectorOfFunctions(NonconvexCore.IneqConstraint[])
else
ineq = model.ineq_constraints
end
if eq_constraints
eq = length(model.eq_constraints.fs) != 0 ? NonconvexCore.VectorOfFunctions(map(model.eq_constraints.fs) do c
return NonconvexCore.EqConstraint(sparsify(c.f, x; kwargs...), c.rhs, c.dim, c.flags)
end) : NonconvexCore.VectorOfFunctions(NonconvexCore.EqConstraint[])
else
eq = model.eq_constraints
end
if sd_constraints
sd = length(model.sd_constraints.fs) != 0 ? NonconvexCore.VectorOfFunctions(map(model.sd_constraints.fs) do c
return NonconvexCore.SDConstraint(sparsify(c.f, x; flatteny = false, kwargs...), c.dim)
end) : NonconvexCore.VectorOfFunctions(NonconvexCore.SDConstraint[])
else
sd = model.sd_constraints
end
if model isa NonconvexCore.Model
ModelT = NonconvexCore.Model
elseif model isa NonconvexCore.DictModel
ModelT = NonconvexCore.DictModel
else
throw("Unsupported model type.")
end
return ModelT(obj, eq, ineq, sd, model.box_min, model.box_max, model.init, model.integer)
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 5772 | struct SymbolicFunction{F, G, H, X} <: Function
f::F
g::G
h::H
x::X
end
function SymbolicFunction(f, _x::AbstractVector; hessian = false, sparse = false, simplify = false)
N = length(_x)
val = f(_x)
_T = eltype(val)
T = x -> begin
if (eltype(x) <: Symbolics.Num) || eltype(x) === _T
return x
elseif x isa SparseMatrixCSC && !(x isa Real) && nnz(x) == 0
return SparseMatrixCSC(x.m, x.n, x.colptr, x.rowval, _T[])
else
return _T.(x)
end
end
Symbolics.@variables tmpx[1:N]
x = [tmpx[i] for i in 1:N]
if val isa Real
if sparse
sgrad = Symbolics.sparsejacobian([f(x)], x; simplify)
_g, _ = Symbolics.build_function(sgrad, x; expression = Val{false})
g = x -> _sparsevec(T(_g(x)))
else
sgrad = Symbolics.jacobian([f(x)], x; simplify)
_g, _ = Symbolics.build_function(sgrad, x; expression = Val{false})
g = x -> vec(T(_g(x)))
end
if hessian
if sparse
shess = Symbolics.sparsejacobian(Vector(g(x)), x; simplify)
else
shess = Symbolics.jacobian(g(x), x; simplify)
end
_h, _ = Symbolics.build_function(shess, x; expression = Val{false})
h = x -> T(_h(x))
else
h = nothing
end
else
if sparse
sjac = Symbolics.sparsejacobian(f(x), x; simplify)
_g, _ = Symbolics.build_function(sjac, x; expression = Val{false})
g = x -> T(_g(x))
else
sjac = Symbolics.jacobian(f(x), x; simplify)
_g, _ = Symbolics.build_function(sjac, x; expression = Val{false})
g = x -> T(_g(x))
end
if hessian
if sparse
shess = Symbolics.sparsejacobian(vec(Matrix(g(x))), x; simplify)
else
shess = Symbolics.jacobian(vec(g(x)), x; simplify)
end
_h, _ = Symbolics.build_function(shess, x; expression = Val{false})
h = x -> T(_h(x))
else
h = nothing
end
end
return SymbolicFunction(f, g, h, _x)
end
(f::SymbolicFunction)(x) = f.f(x)
function ChainRulesCore.rrule(f::SymbolicFunction, x)
val = f.f(x)
g = SymbolicFunction(f.g, f.h, nothing, f.x)
if val isa Real
G = g(x)
return val, Δ -> (NoTangent(), G * Δ)
else
G = g(x)
return val, Δ -> begin
spΔ = dropzeros!(sparse(copy(Δ)))
if length(spΔ.nzval) == 1
return (NoTangent(), G[spΔ.nzind[1], :] * spΔ.nzval[1])
else
return (NoTangent(), G' * Δ)
end
end
end
end
function ChainRulesCore.frule(
(_, Δx), f::SymbolicFunction, x::AbstractVector,
)
val = f.f(x)
g = SymbolicFunction(f.g, f.h, nothing, f.x)
∇ = g(x)
if ∇ isa AbstractVector && Δx isa AbstractVector
Δy = ∇' * _sparsevec(Δx)
else
Δy = reshape(∇ * _sparsevec(Δx), size(val)...)
end
project_to = ChainRulesCore.ProjectTo(val)
return val, project_to(Δy)
end
@ForwardDiff_frule (f::SymbolicFunction)(x::AbstractVector{<:ForwardDiff.Dual})
function symbolify(f, x...; flatteny = true, kwargs...)
flat_f, vx, unflatteny = tovecfunc(f, x...; flatteny)
if length(x) == 1 && x[1] isa AbstractVector
flat_f = f
sym_flat_f = SymbolicFunction(flat_f, vx; kwargs...)
return UnflattennedFunction(
x -> unflatteny(sym_flat_f(x)),
sym_flat_f,
vx,
unflatteny,
flatteny,
)
else
sym_flat_f = SymbolicFunction(flat_f, vx; kwargs...)
return UnflattennedFunction(
x -> unflatteny(sym_flat_f(flatten(x)[1])),
sym_flat_f,
vx,
unflatteny,
flatteny,
)
end
end
function symbolify(model::NonconvexCore.AbstractModel; objective = true, ineq_constraints = true, eq_constraints = true, sd_constraints = true, kwargs...)
x = getmin(model)
if objective
obj = NonconvexCore.Objective(symbolify(model.objective.f, x; kwargs...), model.objective.multiple, model.objective.flags)
else
obj = model.objective
end
if ineq_constraints
ineq = length(model.ineq_constraints.fs) != 0 ? NonconvexCore.VectorOfFunctions(map(model.ineq_constraints.fs) do c
return NonconvexCore.IneqConstraint(symbolify(c.f, x; kwargs...), c.rhs, c.dim, c.flags)
end) : NonconvexCore.VectorOfFunctions(NonconvexCore.IneqConstraint[])
else
ineq = model.ineq_constraints
end
if eq_constraints
eq = length(model.eq_constraints.fs) != 0 ? NonconvexCore.VectorOfFunctions(map(model.eq_constraints.fs) do c
return NonconvexCore.EqConstraint(symbolify(c.f, x; kwargs...), c.rhs, c.dim, c.flags)
end) : NonconvexCore.VectorOfFunctions(NonconvexCore.EqConstraint[])
else
eq = model.eq_constraints
end
if sd_constraints
sd = length(model.sd_constraints.fs) != 0 ? NonconvexCore.VectorOfFunctions(map(model.sd_constraints.fs) do c
return NonconvexCore.SDConstraint(symbolify(c.f, x; flatteny = false, kwargs...), c.dim)
end) : NonconvexCore.VectorOfFunctions(NonconvexCore.SDConstraint[])
else
sd = model.sd_constraints
end
if model isa NonconvexCore.Model
ModelT = NonconvexCore.Model
elseif model isa NonconvexCore.DictModel
ModelT = NonconvexCore.DictModel
else
throw("Unsupported model type.")
end
return ModelT(obj, eq, ineq, sd, model.box_min, model.box_max, model.init, model.integer)
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 2044 | struct TraceFunction{F, V} <: Function
f::F
trace::V
on_call::Bool
on_grad::Bool
end
function TraceFunction(f; on_call::Union{Bool, Nothing} = nothing, on_grad::Union{Bool, Nothing} = nothing)
if on_call === on_grad === nothing
_on_call = true
_on_grad = true
elseif on_call === nothing
_on_call = !on_grad
_on_grad = on_grad
elseif on_grad === nothing
_on_call = on_call
_on_grad = !on_call
else
_on_call = on_call
_on_grad = on_grad
end
return TraceFunction(f, Any[], _on_call, _on_grad)
end
function (tf::TraceFunction)(x)
v = tf.f(x)
if tf.on_call
push!(tf.trace, (input = copy(x), output = copy(v)))
end
return v
end
function ChainRulesCore.rrule(rc::RuleConfig, tf::TraceFunction, x)
v, pb = ChainRulesCore.rrule_via_ad(rc, tf.f, x)
return v, Δ -> begin
Δin = pb(Δ)
g = Δin[2] isa Array ? Δin[2] : Δin[2].val.f()
if tf.on_grad
push!(tf.trace, (input = copy(x), output = copy(v), grad = copy(g)))
end
return (Δin[1], g)
end
end
function ChainRulesCore.frule(
rc::RuleConfig, (_, Δx), tf::TraceFunction, x,
)
v, g = ChainRulesCore.frule(rc, (NoTangent(), Δx), tf.f, x)
if tf.on_grad
if !isempty(tf.trace) && x == tf.trace[end].input
push!(tf.trace[end].grad, g)
else
push!(tf.trace, (input = copy(x), output = copy(v), grad = [copy(g)]))
end
end
return v, g
end
function ChainRulesCore.frule(
(_, Δx), tf::TraceFunction, x,
)
v, g = ChainRulesCore.frule((NoTangent(), Δx), tf.f, x)
if tf.on_grad
if !isempty(tf.trace) && x == tf.trace[end].input
push!(tf.trace[end].grad, g)
else
push!(tf.trace, (input = copy(x), output = copy(v), grad = [copy(g)]))
end
end
return v, g
end
@ForwardDiff_frule (f::TraceFunction)(x::AbstractVector{<:ForwardDiff.Dual})
@ForwardDiff_frule (f::TraceFunction)(x::ForwardDiff.Dual)
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 3833 | @testset "abstractdiffy and forwarddiffy" begin
@testset "Scalar-valued reverse-mode" begin
global T = Nothing
f = function (x)
global T = eltype(x)
return sum(x)
end
x = [1.0, 1.0]
_f = forwarddiffy(f, x)
Zygote.gradient(_f, x)
@test T <: ForwardDiff.Dual
_f = abstractdiffy(f, AD.ReverseDiffBackend(), x)
Zygote.gradient(_f, x)
@test T <: ReverseDiff.TrackedReal
_f = abstractdiffy(f, AD.TrackerBackend(), x)
Zygote.gradient(_f, x)
@test T <: Tracker.TrackedReal
end
@testset "Scalar-valued forward-mode" begin
global T = Nothing
f = function (x)
global T = eltype(x)
return sum(x)
end
x = [1.0, 1.0]
_f = forwarddiffy(f, x)
ForwardDiff.gradient(_f, x)
@test T <: ForwardDiff.Dual
_f = abstractdiffy(f, AD.ReverseDiffBackend(), x)
ForwardDiff.gradient(_f, x)
@test T <: ReverseDiff.TrackedReal
_f = abstractdiffy(f, AD.TrackerBackend(), x)
ForwardDiff.gradient(_f, x)
@test T <: Tracker.TrackedReal
end
@testset "Vector-valued reverse-mode" begin
global T = Nothing
f = function (x)
global T = eltype(x)
return 2x
end
x = [1.0, 1.0]
_f = forwarddiffy(f, x)
Zygote.jacobian(_f, x)
@test T <: ForwardDiff.Dual
_f = abstractdiffy(f, AD.ReverseDiffBackend(), x)
Zygote.jacobian(_f, x)
@test T <: ReverseDiff.TrackedReal
_f = abstractdiffy(f, AD.TrackerBackend(), x)
Zygote.jacobian(_f, x)
@test T <: Tracker.TrackedReal
end
@testset "Vector-valued forward-mode" begin
global T = Nothing
f = function (x)
global T = eltype(x)
return 2x
end
x = [1.0, 1.0]
_f = forwarddiffy(f, x)
ForwardDiff.jacobian(_f, x)
@test T <: ForwardDiff.Dual
_f = abstractdiffy(f, AD.ReverseDiffBackend(), x)
ForwardDiff.jacobian(_f, x)
@test T <: ReverseDiff.TrackedReal
_f = abstractdiffy(f, AD.TrackerBackend(), x)
ForwardDiff.jacobian(_f, x)
@test T <: Tracker.TrackedReal
end
@testset "Multiple inputs, multiple outputs" begin
global T = Nothing
__f = function (x::AbstractVector, y::Tuple)
global T = eltype(x)
return 2x[1] + x[2], y[1] * y[2]
end
x = ([1.0, 1.0], (2.0, 3.0))
_f = forwarddiffy(__f, x...)
f = x -> [_f(x[1:2], (x[3], x[4]))...]
flatx = [1.0, 1.0, 2.0, 3.0]
ForwardDiff.jacobian(f, flatx)
@test T <: ForwardDiff.Dual
_f = abstractdiffy(__f, AD.ReverseDiffBackend(), x...)
f = x -> [_f(x[1:2], (x[3], x[4]))...]
ForwardDiff.jacobian(f, flatx)
@test T <: ReverseDiff.TrackedReal
_f = abstractdiffy(__f, AD.TrackerBackend(), x...)
f = x -> [_f(x[1:2], (x[3], x[4]))...]
ForwardDiff.jacobian(f, flatx)
@test T <: Tracker.TrackedReal
end
@testset "Model - first order = $first_order" for first_order in (true, false)
f = (x::AbstractVector) -> sqrt(x[2])
g = (x::AbstractVector, a, b) -> (a*x[1] + b)^3 - x[2]
options = IpoptOptions(first_order = first_order)
m = Model(f)
addvar!(m, [0.0, 0.0], [10.0, 10.0])
add_ineq_constraint!(m, x -> g(x, 2, 0))
add_ineq_constraint!(m, x -> g(x, -1, 1))
alg = IpoptAlg()
sp_model = forwarddiffy(m)
r = NonconvexIpopt.optimize(sp_model, alg, [1.234, 2.345], options = options)
@test abs(r.minimum - sqrt(8/27)) < 1e-6
@test norm(r.minimizer - [1/3, 8/27]) < 1e-6
end
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 1316 | @testset "CustomGradFunction" begin
fakeg = [2.0]
f = CustomGradFunction(sum, x -> fakeg)
@test Zygote.gradient(f, [1.0]) == (fakeg,)
@test ForwardDiff.gradient(f, [1.0]) == fakeg
fakeJ = [1.0 2.0; 0.0 -1.0]
f = CustomGradFunction(identity, x -> fakeJ)
@test Zygote.jacobian(f, [1.0, 1.0]) == (fakeJ,)
@test ForwardDiff.jacobian(f, [1.0, 1.0]) == fakeJ
end
@testset "CustomHessianFunction" begin
fakeH = [3.0 -1.0; -1.0 2.0]
f = CustomHessianFunction(sum, x -> fakeg, x -> fakeH)
fakeg = [2.0, 2.0]
@test Zygote.gradient(f, [1.0, 1.0]) == (fakeg,)
@test Zygote.jacobian(x -> Zygote.gradient(f, x)[1], [1.0, 1.0]) == (fakeH,)
@test ForwardDiff.gradient(f, [1.0, 1.0]) == fakeg
@test ForwardDiff.jacobian(x -> Zygote.gradient(f, x)[1], [1.0, 1.0]) == fakeH
@test ForwardDiff.jacobian(x -> ForwardDiff.gradient(f, x), [1.0, 1.0]) == fakeH
hvp = (x, v) -> fakeH * v
f = CustomHessianFunction(sum, x -> fakeg, hvp; hvp = true)
H = Zygote.jacobian(x -> Zygote.gradient(f, x)[1], [1.0, 1.0])[1]
@test norm(H - fakeH) < 1e-6
H = ForwardDiff.jacobian(x -> Zygote.gradient(f, x)[1], [1.0, 1.0])
@test norm(H - fakeH) < 1e-6
H = ForwardDiff.jacobian(x -> ForwardDiff.gradient(f, x), [1.0, 1.0])
@test norm(H - fakeH) < 1e-6
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 5636 | # TODO tests:
## Multiple outputs
## Struct output
## Functor f - fix first
@testset "ForwardDiff frule" begin
@eval begin
f1(x, y) = (x + 2y).^2
global frule_count = 0
function ChainRulesCore.frule((_, Δx1, Δx2), ::typeof(f1), x1, x2)
global frule_count += 1
println("frule was called")
return f1(x1, x2), Δx1 + Δx2
end
NonconvexUtils.@ForwardDiff_frule f1(x1::ForwardDiff.Dual, x2::ForwardDiff.Dual)
NonconvexUtils.@ForwardDiff_frule f1(x1::AbstractVector{<:ForwardDiff.Dual}, x2::AbstractVector{<:ForwardDiff.Dual})
NonconvexUtils.@ForwardDiff_frule f1(x1::AbstractMatrix{<:ForwardDiff.Dual}, x2::AbstractMatrix{<:ForwardDiff.Dual})
f2(x::NamedTuple, y::NamedTuple) = (a = x.a + y.a, b = x.b + y.b)
f2(x::AbstractVector, y::AbstractVector) = f2.(x, y)
function ChainRulesCore.frule((_, Δx1, Δx2), ::typeof(f2), x1::NamedTuple, x2::NamedTuple)
global frule_count += 1
println("frule was called")
return f2(x1, x2), (a = Δx1.a + Δx2.a, b = Δx1.b + Δx2.b)
end
NonconvexUtils.@ForwardDiff_frule f2(x1::NamedTuple{<:Any, <:Tuple{Vararg{<:ForwardDiff.Dual}}}, x2::NamedTuple{<:Any, <:Tuple{Vararg{<:ForwardDiff.Dual}}})
struct MyStruct{T, T1, T2}
a::T1
b::T2
end
MyStruct(a, b) = MyStruct{typeof(a), typeof(a), typeof(b)}(a, b)
# The @constructor macro takes the type (first) and constructor function (second)
# The constructor function takes input the fields generated from ntfromstruct (as multiple positional arguments)
# The ntfromstruct function can be overloaded for your type
DifferentiableFlatten.@constructor MyStruct MyStruct
f2(x::MyStruct, y::MyStruct) = MyStruct(x.a + y.a, x.b + y.b)
function ChainRulesCore.frule((_, Δx1, Δx2), ::typeof(f2), x1::MyStruct, x2::MyStruct)
global frule_count += 1
println("frule was called")
return f2(x1, x2), MyStruct(Δx1.a + Δx2.a, Δx1.b + Δx2.b)
end
NonconvexUtils.@ForwardDiff_frule f2(x1::MyStruct{<:ForwardDiff.Dual}, x2::MyStruct{<:ForwardDiff.Dual})
Base.sum(s::MyStruct) = s.a + s.b
# I recommend creating your own type to avoid piracy
_eigvals!(x) = eigvals!(x)
function ChainRulesCore.frule((_, Δx), ::typeof(_eigvals!), x::Symmetric{<:Real})
global frule_count += 1
println("frule was called")
return frule((NoTangent(), Δx), eigvals!, x)
end
# I recommend creating your own type to avoid piracy
DifferentiableFlatten.@constructor Symmetric Symmetric
import NamedTupleTools: ntfromstruct, structfromnt
ntfromstruct(a::Symmetric) = (data = a.data,)
structfromnt(::Type{Symmetric}, x::NamedTuple) = Symmetric(x.data, :U)
NonconvexUtils.@ForwardDiff_frule _eigvals!(A::Symmetric{<:ForwardDiff.Dual})
end
global frule_count = 0
@testset "2 real inputs - 1 real output" begin
_f = x -> f1(x[1], x[2])
_f(rand(2))
g1 = ForwardDiff.gradient(_f, rand(2))
@test frule_count == 2
cfg = ForwardDiff.GradientConfig(_f, rand(2), ForwardDiff.Chunk{2}())
g2 = ForwardDiff.gradient(_f, rand(2), cfg)
@test frule_count == 4
@test g1 == g2
end
frule_count = 0
@testset "2 vector inputs - 1 real output" begin
_f = x -> sum(f1(x[1:2], x[3:4]))
g1 = ForwardDiff.gradient(_f, rand(4))
@test frule_count == 4
cfg = ForwardDiff.GradientConfig(_f, rand(4), ForwardDiff.Chunk{2}())
g2 = ForwardDiff.gradient(_f, rand(4), cfg)
@test frule_count == 8
@test g1 == g2
end
frule_count = 0
@testset "2 vector inputs - 1 vector output" begin
_f = x -> f1(x[1:2], x[3:4])
j1 = ForwardDiff.jacobian(_f, rand(4))
@test frule_count == 4
cfg = ForwardDiff.JacobianConfig(_f, rand(4), ForwardDiff.Chunk{2}())
j2 = ForwardDiff.jacobian(_f, rand(4), cfg)
@test frule_count == 8
@test j1 == j2
end
frule_count = 0
@testset "2 matrix inputs - 1 real output" begin
_f = x -> sum(f1(x[1:2,1:2], x[3:4,3:4]))
g1 = ForwardDiff.gradient(_f, rand(4, 4))
@test frule_count == 16
cfg = ForwardDiff.GradientConfig(_f, rand(4, 4), ForwardDiff.Chunk{2}())
g2 = ForwardDiff.gradient(_f, rand(4, 4), cfg)
@test frule_count == 32
@test g1 == g2
end
frule_count = 0
@testset "2 NamedTuple inputs - 1 real output" begin
_f = x -> sum(f2((a = x[1], b = x[2]), (a = x[3], b = x[4])))
g1 = ForwardDiff.gradient(_f, rand(4))
@test frule_count == 4
cfg = ForwardDiff.GradientConfig(_f, rand(4), ForwardDiff.Chunk{2}())
g2 = ForwardDiff.gradient(_f, rand(4))
@test frule_count == 8
@test g1 == g2
end
frule_count = 0
@testset "2 struct inputs - 1 real output" begin
_f = x -> sum(f2(MyStruct(x[1], x[2]), MyStruct(x[3], x[4])))
g1 = ForwardDiff.gradient(_f, rand(4))
@test frule_count == 4
cfg = ForwardDiff.GradientConfig(_f, rand(4), ForwardDiff.Chunk{2}())
g2 = ForwardDiff.gradient(_f, rand(4))
@test frule_count == 8
@test g1 == g2
end
frule_count = 0
@testset "eigvals" begin
# Gradient of trace
g = ForwardDiff.gradient(x -> sum(_eigvals!(Symmetric(x))), rand(4, 4))
@test frule_count == 16
@test norm(g - I) < 1e-6
end
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 7997 | @testset "Implicit functions" begin
@testset "Non-closure conditions" begin
@testset "Vector input and output - Jacobian $jac - matrixfree $matrixfree" for jac in (false, true), matrixfree in (false, true)
# Adapted from https://github.com/JuliaNLSolvers/NLsolve.jl/issues/205
rng = StableRNG(123)
nonlin = 0.1
get_info_vec = N -> begin
A = spdiagm(0 => fill(10.0, N), 1 => fill(-1.0, N-1), -1 => fill(-1.0, N-1))
p0 = randn(rng, N)
f = (p, x) -> A*x + nonlin*x.^2 - p
solve_x = (p) -> begin
xstar = nlsolve(x -> f(p, x), zeros(N), method=:anderson, m=10).zero
return xstar, jac ? Zygote.jacobian(x -> f(p, x), xstar)[1] : nothing
end
g_analytic = gmres((A + Diagonal(2*nonlin*solve_x(p0)[1]))', ones(N))
return solve_x, f, p0, g_analytic
end
solve_x, f, p0, g_analytic = get_info_vec(10)
imf = ImplicitFunction(solve_x, f; matrixfree)
obj = p -> sum(imf(p))
g_auto = Zygote.gradient(obj, p0)[1]
@test norm(g_analytic - g_auto) < 1e-6
imf = ImplicitFunction(solve_x, f; matrixfree, tol = 0.0)
obj = p -> sum(imf(p))
g_auto = Zygote.gradient(obj, p0)[1]
@test all(isnan.(g_auto))
imf = ImplicitFunction(solve_x, f; matrixfree, tol = 0.0, error_on_tol_violation = true)
obj = p -> sum(imf(p))
@test_throws ArgumentError Zygote.gradient(obj, p0)[1]
end
@testset "Non-vector input and output" begin
rng = StableRNG(123)
nonlin = 0.1
get_info_nonvec = N -> begin
A = spdiagm(0 => fill(10.0, N), 1 => fill(-1.0, N-1), -1 => fill(-1.0, N-1))
p0 = (a = randn(rng, N),)
f = (p, x) -> A*x.a + nonlin*x.a.^2 - p.a
solve_x = (p) -> begin
return (a = nlsolve(x -> f(p, (a = x,)), zeros(N), method=:anderson, m=10).zero,), nothing
end
g_analytic = (a = gmres((A + Diagonal(2*nonlin*solve_x(p0)[1].a))', ones(N)),)
return solve_x, f, p0, g_analytic
end
for matrixfree in (false, true)
solve_x, f, p0, g_analytic = get_info_nonvec(10)
imf = ImplicitFunction(solve_x, f; matrixfree)
obj = p -> sum(imf(p).a)
g_auto = Zygote.gradient(obj, p0)[1]
@test norm(g_analytic.a - g_auto.a) < 1e-6
imf = ImplicitFunction(solve_x, f; matrixfree, tol = 0.0)
obj = p -> sum(imf(p).a)
g_auto = Zygote.gradient(obj, p0)[1]
@test all(isnan.(g_auto.a))
imf = ImplicitFunction(solve_x, f; matrixfree, tol = 0.0, error_on_tol_violation = true)
obj = p -> sum(imf(p).a)
@test_throws ArgumentError Zygote.gradient(obj, p0)[1]
end
end
end
@testset "Closure conditions" begin
@testset "Vector input and output - Jacobian $jac - matrixfree $matrixfree" for jac in (false, true), matrixfree in (false, true)
rng = StableRNG(123)
nonlin = 0.1
get_info_closure_vec = N -> begin
A = spdiagm(0 => fill(10.0, N), 1 => fill(-1.0, N-1), -1 => fill(-1.0, N-1))
p0 = randn(rng, N)
f = (x) -> A*x + nonlin*x.^2 - p0
solve_x = () -> begin
xstar = nlsolve(f, zeros(N), method=:anderson, m=10).zero
xstar, jac ? Zygote.jacobian(f, xstar)[1] : nothing
end
g_analytic = gmres((A + Diagonal(2*nonlin*solve_x()[1]))', ones(N))
return p0, A, g_analytic
end
p0, A, g_analytic = get_info_closure_vec(10)
obj = p -> begin
N = 10
# f closes over p
f = (x) -> A*x + nonlin*x.^2 - p
solve_x = () -> begin
return nlsolve(f, zeros(N), method=:anderson, m=10).zero, nothing
end
imf = ImplicitFunction(solve_x, f; matrixfree)
return sum(imf())
end
g_auto = Zygote.gradient(obj, p0)[1]
@test norm(g_analytic - g_auto) < 1e-6
obj = p -> begin
N = 10
# f closes over p
f = (x) -> A*x + nonlin*x.^2 - p
solve_x = () -> begin
return nlsolve(f, zeros(N), method=:anderson, m=10).zero, nothing
end
imf = ImplicitFunction(solve_x, f; matrixfree, tol = 0.0)
return sum(imf())
end
g_auto = Zygote.gradient(obj, p0)[1]
@test all(isnan.(g_auto))
obj = p -> begin
N = 10
# f closes over p
f = (x) -> A*x + nonlin*x.^2 - p
solve_x = () -> begin
return nlsolve(f, zeros(N), method=:anderson, m=10).zero, nothing
end
imf = ImplicitFunction(solve_x, f; matrixfree, tol = 0.0, error_on_tol_violation = true)
return sum(imf())
end
@test_throws ArgumentError Zygote.gradient(obj, p0)[1]
end
@testset "Non-vector input and output" begin
rng = StableRNG(123)
nonlin = 0.1
get_info_closure_nonvec = N -> begin
N = 10
A = spdiagm(0 => fill(10.0, N), 1 => fill(-1.0, N-1), -1 => fill(-1.0, N-1))
p0 = (a = randn(rng, N),)
_f = (x) -> A*x.a + nonlin*x.a.^2 - p0.a
_solve_x = () -> begin
return (a = nlsolve(x -> _f((a = x,)), zeros(N), method=:anderson, m=10).zero,), nothing
end
return p0, A, (a = gmres((A + Diagonal(2*nonlin*_solve_x()[1].a))', ones(N)),)
end
for matrixfree in (false, true)
p0, A, g_analytic = get_info_closure_nonvec(10)
obj = p -> begin
N = 10
# f closes over p
f = (x) -> A*x.a + nonlin*x.a.^2 - p.a
solve_x = () -> begin
return (a = nlsolve(x -> f((a = x,)), zeros(N), method=:anderson, m=10).zero,), nothing
end
imf = ImplicitFunction(solve_x, f; matrixfree)
return sum(imf().a)
end
g_auto = Zygote.gradient(obj, p0)[1]
@test norm(g_analytic.a - g_auto.a) < 1e-6
obj = p -> begin
N = 10
# f closes over p
f = (x) -> A*x.a + nonlin*x.a.^2 - p.a
solve_x = () -> begin
return (a = nlsolve(x -> f((a = x,)), zeros(N), method=:anderson, m=10).zero,), nothing
end
imf = ImplicitFunction(solve_x, f; matrixfree, tol = 0.0)
return sum(imf().a)
end
g_auto = Zygote.gradient(obj, p0)[1]
@test all(isnan.(g_auto.a))
obj = p -> begin
N = 10
# f closes over p
f = (x) -> A*x.a + nonlin*x.a.^2 - p.a
solve_x = () -> begin
return (a = nlsolve(x -> f((a = x,)), zeros(N), method=:anderson, m=10).zero,), nothing
end
imf = ImplicitFunction(solve_x, f; matrixfree, tol = 0.0, error_on_tol_violation = true)
return sum(imf().a)
end
@test_throws ArgumentError Zygote.gradient(obj, p0)[1]
end
end
end
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 403 | using NonconvexUtils, ForwardDiff, ReverseDiff, Tracker, Zygote
using Test, LinearAlgebra, SparseArrays, NLsolve, IterativeSolvers
using StableRNGs, ChainRulesCore, NonconvexCore, NonconvexIpopt
using DifferentiableFlatten
include("forwarddiff_frule.jl")
include("abstractdiff.jl")
include("trace.jl")
include("custom.jl")
include("implicit.jl")
include("symbolic.jl")
include("sparse_forwarddiff.jl")
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 2809 | @testset "sparsify" begin
@testset "Functions" begin
f = sparsify(sum, rand(3); hessian = false).flat_f
x = rand(3)
@test Zygote.gradient(f, x)[1] ≈ ForwardDiff.gradient(f, rand(3))
f = sparsify(x -> 2(x.^2) + x[1] * ones(3), rand(3); hessian = false).flat_f
x = rand(3)
@test Zygote.jacobian(f, x)[1] ≈ ForwardDiff.jacobian(f, x)
@test NonconvexCore.sparse_jacobian(f, x) ≈ Zygote.jacobian(f, x)[1]
@test NonconvexCore.sparse_jacobian(f, x) isa SparseMatrixCSC
f = sparsify(sum, rand(3); hessian = true)
x = rand(3)
@test Zygote.gradient(f, x)[1] ≈ ForwardDiff.gradient(f, rand(3))
@test Zygote.hessian(f, x) ≈ ForwardDiff.hessian(f, rand(3))
@test NonconvexCore.sparse_hessian(f, x) ≈ Zygote.hessian(f, x)
@test NonconvexCore.sparse_hessian(f, x) isa SparseMatrixCSC
f = sparsify(x -> sum(x)^2 + x[1], rand(3); hessian = true)
x = rand(3)
@test Zygote.hessian(f, x) ≈ ForwardDiff.hessian(f, x)
@test NonconvexCore.sparse_hessian(f, x) ≈ Zygote.hessian(f, x)
@test NonconvexCore.sparse_hessian(f, x) isa SparseMatrixCSC
f = sparsify(x -> sum(x)^2 + x[1], rand(3); hessian = true)
x = rand(3)
@test Zygote.jacobian(x -> Zygote.gradient(f, x)[1], x)[1] ≈ ForwardDiff.hessian(f, x)
f = sparsify(x -> [sum(x)^2, x[1]], rand(3); hessian = true)
x = rand(3)
g = x -> sum(f(x))
@test Zygote.gradient(g, x)[1] ≈ ForwardDiff.gradient(g, x)
@test Zygote.hessian(g, x) ≈ ForwardDiff.hessian(g, x)
f = sparsify(x -> [0.0, 0.0], rand(3); hessian = true)
x = rand(3)
g = x -> sum(f(x))
@test Zygote.gradient(g, x)[1] ≈ ForwardDiff.gradient(g, x)
@test Zygote.hessian(g, x) ≈ ForwardDiff.hessian(g, x)
f = sparsify(x -> 0.0, rand(3); hessian = true)
x = rand(3)
@test Zygote.gradient(f, x)[1] ≈ ForwardDiff.gradient(f, x)
@test Zygote.hessian(f, x) ≈ ForwardDiff.hessian(f, x)
end
@testset "Model - first order = $first_order, sparse = $sparse" for first_order in (true, false), sparse in (true, false)
f = (x::AbstractVector) -> sqrt(x[2])
g = (x::AbstractVector, a, b) -> (a*x[1] + b)^3 - x[2]
options = IpoptOptions(; first_order, sparse)
m = Model(f)
addvar!(m, [0.0, 0.0], [10.0, 10.0])
add_ineq_constraint!(m, x -> g(x, 2, 0))
add_ineq_constraint!(m, x -> g(x, -1, 1))
alg = IpoptAlg()
sp_model = sparsify(m; hessian = !first_order)
r = NonconvexIpopt.optimize(sp_model, alg, [1.234, 2.345], options = options)
@test abs(r.minimum - sqrt(8/27)) < 1e-6
@test norm(r.minimizer - [1/3, 8/27]) < 1e-6
end
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 4835 | function _test_function_scoping()
model = Model()
addvar!(model, [0.0], [1.0])
set_objective!(model, x -> x[1])
add_eq_constraint!(model, x -> x[1])
return NonconvexIpopt.optimize(
symbolify(model),
IpoptAlg(),
[0.0];
options = IpoptOptions(),
)
end
@testset "symbolify" begin
@testset "Functions - simplify = $simplify, sparse = $sparse" for simplify in (false, true), sparse in (false, true)
f = symbolify(sum, rand(3); hessian = false, simplify, sparse)
x = rand(3)
@test Zygote.gradient(f, x)[1] ≈ ForwardDiff.gradient(f, rand(3))
f = symbolify(x -> 2(x.^2) + x[1] * ones(3), rand(3); hessian = false, simplify, sparse).flat_f
x = rand(3)
@test Zygote.jacobian(f, x)[1] ≈ ForwardDiff.jacobian(f, x)
if sparse
@test NonconvexCore.sparse_jacobian(f, x) ≈ Zygote.jacobian(f, x)[1]
@test NonconvexCore.sparse_jacobian(f, x) isa SparseMatrixCSC
end
f = symbolify(sum, rand(3); hessian = true, simplify, sparse)
x = rand(3)
@test Zygote.gradient(f, x)[1] ≈ ForwardDiff.gradient(f, rand(3))
@test Zygote.hessian(f, x) ≈ ForwardDiff.hessian(f, rand(3))
if sparse
@test NonconvexCore.sparse_hessian(f, x) ≈ Zygote.hessian(f, x)
@test NonconvexCore.sparse_hessian(f, x) isa SparseMatrixCSC
end
f = symbolify(x -> norm(x) + x[1], rand(3); hessian = true, simplify, sparse)
x = rand(3)
@test Zygote.hessian(f, x) ≈ ForwardDiff.hessian(f, x)
f = symbolify(x -> [norm(x), x[1]], rand(3); hessian = true, simplify, sparse)
x = rand(3)
g = x -> sum(f(x))
@test Zygote.gradient(g, x)[1] ≈ ForwardDiff.gradient(g, x)
@test Zygote.hessian(g, x) ≈ ForwardDiff.hessian(g, x)
end
@testset "Model - first order = $first_order - sparse = $sparse" for first_order in (true, false), sparse in (true, false)
f = (x::AbstractVector) -> sqrt(x[2])
g = (x::AbstractVector, a, b) -> (a*x[1] + b)^3 - x[2]
options = IpoptOptions(; first_order, sparse)
m = Model(f)
addvar!(m, [0.0, 0.0], [10.0, 10.0])
add_ineq_constraint!(m, x -> g(x, 2, 0))
add_ineq_constraint!(m, x -> g(x, -1, 1))
alg = IpoptAlg()
sym_model = symbolify(m, hessian = !first_order, sparse = true)
r = NonconvexIpopt.optimize(sym_model, alg, [1.234, 2.345], options = options)
if sparse
vsym_model, xv, _ = NonconvexCore.tovecmodel(sym_model)
@test issparse(NonconvexCore.sparse_gradient(vsym_model.objective, xv))
@test issparse(NonconvexCore.sparse_jacobian(vsym_model.ineq_constraints, xv))
end
@test abs(r.minimum - sqrt(8/27)) < 1e-6
@test norm(r.minimizer - [1/3, 8/27]) < 1e-6
end
@testset "function-scope" begin
r = _test_function_scoping()
@test abs(r.minimum) < 1e-6
end
# https://github.com/JuliaNonconvex/Nonconvex.jl/issues/139
@testset "Nonconvex issue 139" begin
model = Model()
addvar!(model, fill(1.0, 4), fill(5.0, 4))
add_ineq_constraint!(model, x -> 25.0 - x[1] * x[2] * x[3] * x[4])
add_eq_constraint!(model, x -> 40.0 - x[1]^2 + x[2]^2 + x[3]^2 + x[4]^2)
sym_model = symbolify(
model;
hessian = true,
sparse = true,
simplify = true,
)
vsym_model, xv, _ = NonconvexCore.tovecmodel(sym_model)
@test issparse(NonconvexCore.sparse_gradient(vsym_model.objective, xv))
@test issparse(NonconvexCore.sparse_jacobian(vsym_model.ineq_constraints, xv))
@test issparse(NonconvexCore.sparse_jacobian(vsym_model.eq_constraints, xv))
end
# https://github.com/JuliaNonconvex/Nonconvex.jl/issues/140
@testset "Nonconvex issue 140" begin
model = Model(x -> x[1] * x[4] * (x[1] + x[2] + x[3]) + x[3])
addvar!(model, fill(1.0, 4), fill(5.0, 4))
add_ineq_constraint!(model, x -> 25.0 - x[1] * x[2] * x[3] * x[4])
add_eq_constraint!(model, x -> 40.0 - x[1]^2 + x[2]^2 + x[3]^2 + x[4]^2)
sym_model = symbolify(
model;
hessian = true,
sparse = true,
simplify = true,
)
vsym_model, xv, _ = NonconvexCore.tovecmodel(sym_model)
@test issparse(NonconvexCore.sparse_gradient(vsym_model.objective, xv))
@test issparse(NonconvexCore.sparse_jacobian(vsym_model.ineq_constraints, xv))
@test issparse(NonconvexCore.sparse_jacobian(vsym_model.eq_constraints, xv))
result = optimize(
sym_model,
IpoptAlg(),
[1.0, 5.0, 5.0, 1.0];
options = IpoptOptions(; first_order = false, sparse = true),
)
end
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | code | 2714 | @testset "TraceFunction" begin
@testset "Reverse-mode" begin
f = TraceFunction(sum, on_call = true)
@test f.on_call == true
@test f.on_grad == false
f([2.0])
@test f.trace == [(input = [2.0], output = 2.0)]
Zygote.gradient(f, [3.0])
@test f.trace == [(input = [2.0], output = 2.0)]
f = TraceFunction(sum, on_grad = true)
@test f.on_call == false
@test f.on_grad == true
f([2.0])
@test f.trace == []
Zygote.gradient(f, [3.0])
@test f.trace == [(input = [3.0], output = 3.0, grad = [1.0])]
f = TraceFunction(sum)
@test f.on_call == true
@test f.on_grad == true
f = TraceFunction(sum, on_call = true, on_grad = true)
@test f.on_call == true
@test f.on_grad == true
f([2.0])
@test f.trace == [(input = [2.0], output = 2.0)]
Zygote.gradient(f, [3.0])
@test f.trace == [
(input = [2.0], output = 2.0),
(input = [3.0], output = 3.0, grad = [1.0]),
]
f = TraceFunction(sum, on_call = false, on_grad = false)
@test f.on_call == false
@test f.on_grad == false
f([2.0])
@test f.trace == []
Zygote.gradient(f, [3.0])
@test f.trace == []
end
@testset "Forward-mode" begin
f = TraceFunction(sum, on_call = true)
@test f.on_call == true
@test f.on_grad == false
f([2.0, 2.0])
@test f.trace == [(input = [2.0, 2.0], output = 4.0)]
ForwardDiff.gradient(f, [3.0, 3.0])
@test f.trace == [(input = [2.0, 2.0], output = 4.0)]
f = TraceFunction(sum, on_grad = true)
@test f.on_call == false
@test f.on_grad == true
f([2.0, 2.0])
@test f.trace == []
ForwardDiff.gradient(f, [3.0, 3.0])
@test f.trace == [(input = [3.0, 3.0], output = 6.0, grad = [1.0, 1.0])]
f = TraceFunction(sum)
@test f.on_call == true
@test f.on_grad == true
f = TraceFunction(sum, on_call = true, on_grad = true)
@test f.on_call == true
@test f.on_grad == true
f([2.0, 2.0])
@test f.trace == [(input = [2.0, 2.0], output = 4.0)]
ForwardDiff.gradient(f, [3.0, 3.0])
@test f.trace == [
(input = [2.0, 2.0], output = 4.0),
(input = [3.0, 3.0], output = 6.0, grad = [1.0, 1.0]),
]
f = TraceFunction(sum, on_call = false, on_grad = false)
@test f.on_call == false
@test f.on_grad == false
f([2.0, 2.0])
@test f.trace == []
ForwardDiff.gradient(f, [3.0, 3.0])
@test f.trace == []
end
end
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.4.3 | 3a74f57be05c51ef621620e9b05bd6db94d0c434 | docs | 608 | # NonconvexUtils
[](https://github.com/JuliaNonconvex/NonconvexUtils.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/JuliaNonconvex/NonconvexUtils.jl)
This is a package full of useful hacks for use in [Nonconvex.jl](https://github.com/JuliaNonconvex/Nonconvex.jl). The documentation can be found in the [Nonconvex.jl documentation](https://julianonconvex.github.io/Nonconvex.jl/stable/gradients/gradients/).
| NonconvexUtils | https://github.com/JuliaNonconvex/NonconvexUtils.jl.git |
|
[
"MIT"
] | 0.1.3 | 85b0b3e5c09eed5e05447305b18ab179af88e5b2 | code | 2301 | using SDPSymmetryReduction
using Documenter
using Literate
using Test
ENV["PLOTS_TEST"] = "true"
ENV["GKSwstype"] = "100"
DocMeta.setdocmeta!(SDPSymmetryReduction, :DocTestSetup, :(using SDPSymmetryReduction); recursive=true)
## Use Literate.jl to generate examples (functions modified from https://github.com/jump-dev/JuMP.jl/blob/master/docs/make.jl)
function _file_list(full_dir, relative_dir, extension)
return map(
file -> joinpath(relative_dir, file),
filter(file -> endswith(file, extension), sort(readdir(full_dir))),
)
end
"""
_include_sandbox(filename)
Include the `filename` in a temporary module that acts as a sandbox. (Ensuring
no constants or functions leak into other files.)
"""
function _include_sandbox(filename)
mod = @eval module $(gensym()) end
return Base.include(mod, filename)
end
function _literate_directory(dir)
rm.(_file_list(dir, dir, ".md"))
for filename in _file_list(dir, dir, ".jl")
# `include` the file to test it before `#src` lines are removed. It is
# in a testset to isolate local variables between files.
Test.@testset "$(filename)" begin
_include_sandbox(filename)
end
Literate.markdown(
filename,
dir;
documenter = true,
credit = true,
)
end
return nothing
end
_literate_directory.(joinpath(@__DIR__, "src", "examples"))
## Generate docs
makedocs(;
modules=[SDPSymmetryReduction],
authors="Daniel Brosch <[email protected]> and contributors",
repo="https://github.com/DanielBrosch/SDPSymmetryReduction.jl/blob/{commit}{path}#{line}",
sitename="SDPSymmetryReduction.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://DanielBrosch.com/SDPSymmetryReduction.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Examples" => [
"examples/ErdosRenyiThetaFunction.md",
"examples/QuadraticAssignmentProblems.md",
"examples/ReduceAndSolveJuMP.md"
]
],
)
##
deploydocs(;
repo="github.com/DanielBrosch/SDPSymmetryReduction.jl",
devbranch="main",
)
| SDPSymmetryReduction | https://github.com/DanielBrosch/SDPSymmetryReduction.jl.git |
|
[
"MIT"
] | 0.1.3 | 85b0b3e5c09eed5e05447305b18ab179af88e5b2 | code | 3040 | # # The Theta'-function of Erdos-Renyi graphs
# Let $q$ be an odd prime, and let $V = \mathrm{GF}(q)^3$ be a three dimensional
# vector space over the finite field of order $q$. The set of one dimensional
# subspaces, i.e. the projective plane, of $V$ is denoted by $\mathrm{PG}(2,q)$.
# There are $q^2+q+1$ such subspaces, which are the vertices of the Erdos-Renyi
# graph $\mathrm{ER}(q)$. Two vertices are adjacent if they are distinct and
# orthogonal, i.e. for two representing vectors $x$ and $y$ we have $x^Ty=0$. We
# are interested in the size of a maximum stable set of these graphs,
# specifically upper bounds for this value. Note that these are not the equally named
# random graphs.
using LinearAlgebra #hide
q = 7
PG2q = vcat([[0, 0, 1]],
[[0, 1, b] for b = 0:q-1],
[[1, a, b] for a = 0:q-1 for b = 0:q-1])
Adj = [x' * y % q == 0 && x != y for x in PG2q, y in PG2q]
size(Adj)
#
using Plots #hide
spy(Adj)
# ## The Theta'-function
# The Theta'-function $\vartheta(G)$ of a graph $G=(V,E)$ is such an upper bound, based on
# semidefinite programming:
#
# ```math
# \vartheta(G)\coloneqq \sup\{\langle X,J\rangle : \langle X,A\rangle = 0, X\succcurlyeq 0, X\geq 0\}.
# ```
#
# In vectorized standard form this is simply
N = length(PG2q)
C = ones(N^2)
A = vcat(vec(Adj)', vec(Matrix{Float64}(I, N, N))')
b = [0.0, 1.0];
# ## Determining the symmetry reduction
# We can now apply the Jordan reduction method to the problem.
# First, we need to determine an (optimal) admissible subspace.
using SDPSymmetryReduction
P = admPartSubspace(C, A, b, true)
P.n
using Test #src
@test P.n == 18 #src
# Running `admPartSubspace` returns a `Partition` object. `P.n` are the number of orbits (and thus
# variables), and `P.P` is a matrix with integer values from `1` trough `P.n`. Here, `P.P` looks like this
# (different color shades = different orbits):
heatmap(reverse(P.P, dims=1)) #hide
# Now we can block-diagonalize the algebra (numerically)
blkD = blockDiagonalize(P, true);
@test sort(blkD.blkSizes) == [2,2,2,2,3] #src
# ## Building the reduced SDP
# Since `blkD.blks[i]` is the block-diagonalized image of `P.P .== i`,
# we obtain the new, symmetry reduced SDP by
using SparseArrays
PMat = hcat([sparse(vec(P.P .== i)) for i = 1:P.n]...)
newA = A * PMat
newB = b
newC = C' * PMat;
# ## Solving the SDP with JuMP and CSDP
using JuMP, CSDP
m = Model(CSDP.Optimizer)
## Initialize variables corresponding parts of the partition P
## >= 0 because the original SDP-matrices are entry-wise nonnegative
x = @variable(m, x[1:P.n] >= 0)
@constraint(m, newA * x .== newB)
@objective(m, Max, newC * x)
psdBlocks = sum(blkD.blks[i] .* x[i] for i = 1:P.n)
for blk in psdBlocks
if size(blk, 1) > 1
@constraint(m, blk in PSDCone())
else
@constraint(m, blk .>= 0)
end
end
optimize!(m)
#
termination_status(m)
#
objective_value(m)
@test objective_value(m) ≈ 15.743402681126568 atol = 5 #src
| SDPSymmetryReduction | https://github.com/DanielBrosch/SDPSymmetryReduction.jl.git |
|
[
"MIT"
] | 0.1.3 | 85b0b3e5c09eed5e05447305b18ab179af88e5b2 | code | 4065 | # # Symmetry reducting a strong relaxation of the quadratic assigment problem
# Here, we are going to show how to load a QAP from QABLib, formulate
# a strong semidefinite relaxation of it, symmetry reduce it, and finally solve it.
# ## Quadratic assigment problems
# QAPs are given by two quadratic matrices $A$ and $B$. The objective is to permute
# the rows and columns of $B$, such that the inner product between the matrices is
# minimized.
#
# ``\mathrm{QAP}(A,B) = \min_{\phi\in S_n} \sum_{i,j=1}^n a_{ij}b_{\phi(i)\phi(j)}``
#
# QAPs are notoriously hard to solve exactly, but there exist strong polynomial time
# relaxations, such as the following semidefinite programming relaxation:
#
# ```math
# \begin{aligned}
# \min\enspace & \langle B\otimes A ,Y\rangle\\
# \mathrm{s.t.}\enspace & \langle I_n\otimes E_{jj},Y\rangle=1 \text{ for }j\in [n],\\
# & \langle E_{jj}\otimes I_n,Y\rangle=1 \text{ for }j\in [n],\\
# & \langle I_n\otimes (J_n-I_n)+(J_n-I_n)\otimes I_n,Y\rangle =0, \\
# & \langle J_{n^2},Y\rangle = n^2,\\
# & Y\in D^{n^2},
# \end{aligned}
# ```
#
# But in practice this relaxation is often too big to be solved directly.
# ## Loading the data of a QAP
using SparseArrays, LinearAlgebra
using Test #src
file = joinpath(@__DIR__, "esc16j.dat")
data = open(file) do file
read(file, String)
end
data = split(data, [' ', '\n', '\r'], keepempty = false)
n = parse(Int64, data[1])
A = zeros(Int64, n, n)
B = zeros(Int64, n, n)
pos = 2
for x = 1:n
for y = 1:n
A[x, y] = parse(Int64, data[pos])
global pos += 1
end
end
for x = 1:n
for y = 1:n
B[x, y] = parse(Int64, data[pos])
global pos += 1
end
end
# ## Building the SDP (in vectorized standard form)
n = size(A, 1)
## Objective
CPrg = sparse(kron(B, A))
In = sparse(I, n, n)
Jn = ones(n, n)
## Vectorised constraint matrices as rows of large matrix APrg
APrg = spzeros(2n + 1, n^4)
bPrg = zeros(2n + 1)
currentRow = 1
for j = 1:n
Ejj = spzeros(n, n)
Ejj[j, j] = 1.0
APrg[currentRow, :] = vec(kron(In, Ejj))
bPrg[currentRow] = 1
global currentRow += 1
## Last constraint is linearly dependent on others
if (j < n)
APrg[currentRow, :] = vec(kron(Ejj, In))
bPrg[currentRow] = 1
global currentRow += 1
end
end
APrg[currentRow, :] = vec(kron(In, Jn - In) + kron(Jn - In, In))
bPrg[currentRow] = 0
currentRow += 1
APrg[currentRow, :] = vec(ones(n^2, n^2))
bPrg[currentRow] = n^2
CPrg = sparse(vec(0.5 * (CPrg + CPrg')));
# ## Symmetry reducing the SDP
# We first determine an optimal admissible partition subspace
using SDPSymmetryReduction
P = admPartSubspace(CPrg, APrg, bPrg, true)
P.n
@test P.n == 150 #src
# And then we block-diagonalize it
blkD = blockDiagonalize(P, true);
@test sort(blkD.blkSizes) == sort([7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) #src
# ## Determining the coefficients of the reduced SDP
PMat = spzeros(Bool, n^4, P.n)
for c in eachindex(P.P)
PMat[c, P.P[c]] = 1
end
newA = APrg * PMat
newB = bPrg
newC = CPrg' * PMat;
## Removing linearly dependent constraints #src
# using RowEchelon #src
# T = rref!(Matrix(hcat(newA,newB))) #src
# r = rank(T) #src
# newA = T[1:r,1:end-1] #src
# newB = T[1:r,end] #src
# length(newB) #src
# ## Solving the reduced SDP with JuMP and CSDP
using JuMP, CSDP, MutableArithmetics
m = Model(CSDP.Optimizer)
## Initialize variables corresponding parts of the partition P
## >= 0 because the original SDP-matrices are entry-wise nonnegative
x = @variable(m, x[1:P.n] >= 0)
@constraint(m, newA * x .== newB)
@objective(m, Min, newC * x)
psdBlocks = @rewrite(sum(x[i] * blkD.blks[i] for i = 1:P.n));
for blk in psdBlocks
if size(blk, 1) > 1
@constraint(m, blk in PSDCone())
else
@constraint(m, blk .>= 0)
end
end
optimize!(m)
#
termination_status(m)
#
objective_value(m)
@test objective_value(m) ≈ 7.7942 atol = 5#src | SDPSymmetryReduction | https://github.com/DanielBrosch/SDPSymmetryReduction.jl.git |
|
[
"MIT"
] | 0.1.3 | 85b0b3e5c09eed5e05447305b18ab179af88e5b2 | code | 3320 | # # General example
# This function takes an SDP in standard form, reduces it, formulates it as (hermitian) SDP, and solves it with JuMP
using LinearAlgebra
using SDPSymmetryReduction
using JuMP
using CSDP
using SparseArrays
function reduceAndSolve(C, A, b;
objSense = MathOptInterface.MAX_SENSE,
verbose = false,
complex = false,
limitSize = 3000)
tmd = @timed admPartSubspace(C, A, b, verbose)
P = tmd.value
jordanTime = tmd.time
if P.n <= limitSize
tmd = @timed blockDiagonalize(P, verbose; complex = complex)
blkD = tmd.value
blkDTime = tmd.time
if blkD === nothing
## Either random/rounding error, or complex numbers needed
return nothing
end
## solve with solver of choice
m = nothing
if verbose
m = Model(CSDP.Optimizer)
else
m = Model(optimizer_with_attributes(CSDP.Optimizer, "MSK_IPAR_LOG" => 0))
end
## >= 0 because the SDP-matrices should be entry-wise nonnegative
x = @variable(m, x[1:P.n] >= 0)
PMat = hcat([sparse(vec(P.P .== i)) for i = 1:P.n]...)
## Reduce the number of constraints
newConstraints = Float64.(hcat(A * PMat, b))
newConstraints = sparse(svd(Matrix(newConstraints)').U[:, 1:rank(newConstraints)]')
droptol!(newConstraints, 1e-8)
newA = newConstraints[:, 1:end-1]
newB = newConstraints[:, end]
newC = C' * PMat
@constraint(m, newA * x .== newB)
@objective(m, objSense, newC * x)
for i = 1:length(blkD[1])
blkExpr =
x[1] .* (
complex ?
[
real(blkD[2][1][i]) -imag(blkD[2][1][i])
imag(blkD[2][1][i]) real(blkD[2][1][i])
] :
blkD[2][1][i]
)
for j = 2:P.n
add_to_expression!.(
blkExpr,
x[j] .* (
complex ?
[
real(blkD[2][j][i]) -imag(blkD[2][j][i])
imag(blkD[2][j][i]) real(blkD[2][j][i])
] :
blkD[2][j][i]
),
)
end
if size(blkExpr, 1) > 1
@constraint(m, blkExpr in PSDCone())
else
@constraint(m, blkExpr .>= 0)
end
end
tmd = @timed optimize!(m)
optTime = tmd.time
if Int64(termination_status(m)) != 1
@show termination_status(m)
@error("Solve error.")
end
return (
jTime = jordanTime,
blkTime = blkDTime,
solveTime = optTime,
optVal = newC * value.(x),
blkSize = blkD[1],
originalSize = size(P.P, 1),
newSize = P.n
)
end
return (
jTime = jordanTime,
blkTime = 0,
solveTime = 0,
optVal = 0,
blkSize = 0,
originalSize = size(P.P, 1),
newSize = P.n
)
end
| SDPSymmetryReduction | https://github.com/DanielBrosch/SDPSymmetryReduction.jl.git |
|
[
"MIT"
] | 0.1.3 | 85b0b3e5c09eed5e05447305b18ab179af88e5b2 | code | 10251 | module SDPSymmetryReduction
using SparseArrays
using LinearAlgebra
export Partition, admPartSubspace, blockDiagonalize
# Stores a partition of [m]×[m] in a single matrix
# Entries of P should always be 1,…,n
"""
Partition
A partition subspace. `P.n` is the number of parts, and `P.P` an integer matrix defining the basis elements.
"""
struct Partition
n::Int64 # Number of parts
P::Matrix{Int64} # Matrix with entries 1,...,n
end
"""
roundToZero(f::Number)
Round numbers near zero to zero.
"""
function roundToZero(f::Number)
if abs(f) < 1e-8
return 0.0
end
return f
end
"""
part(M::Matrix{T}) where T
Create a partition from the unique entries of `M`.
"""
function part(M::Matrix{T}) where T
u = unique(M)
filter!(e -> e ≠ 0, u)
d = Dict([(u[i], i) for i in eachindex(u)])
d[0] = 0
return Partition(size(u, 1), [d[i] for i in M])
end
"""
coarsestPart(P1::Partition, P2::Partition)
Find the coarsest partition refining `P1` and `P2`.
"""
function coarsestPart(P1::Partition, P2::Partition)
return part(P1.P * (P2.n + 1) + P2.P)
end
"""
rndPart(P::Partition)
Returns a random linear combination in the partition space `P`.
"""
function rndPart(P::Partition)
r = [rand() for i = 1:P.n+1]
r[1] = 0
return [r[i+1] for i in P.P]
end
"""
roundMat(M)
Rounds the matrix, sets entries near 0.0 to 0.0.
"""
function roundMat(M)
tmp = round.(M, sigdigits = 5)
tmp = roundToZero.(tmp)
return tmp
end
"""
orthProject(A::AbstractMatrix{T}, v::AbstractVector{T}) where T
Project v orthogonally to the span of columns of A
"""
function orthProject(A::AbstractMatrix{T}, v::AbstractVector{T}) where T
return A * ((A' * A) \ Vector(A' * v))
end
"""
projectAndRound(M::AbstractMatrix{T}, A::AbstractMatrix{T}; round = true) where T
Projects M to the nullspace of A and rounds the matrix afterwards.
"""
function projectAndRound(M::AbstractMatrix{T}, A::AbstractMatrix{T}; round = true) where T
n = Int64(sqrt(length(M)))
tmp = vec(Matrix(M))
tmp = tmp - orthProject(A, tmp)
if round
tmp = roundMat(tmp)
end
tmp = reshape(tmp, n, n)
return Float64.(tmp)
end
"""
admPartSubspace(C::Vector{T}, A::Matrix{T}, b::Vector{T}, verbose::Bool = false)
Returns the optimal admissible partition subspace for the SDP
``\\inf\\{C^Tx, Ax = b, \\mathrm{Mat}(x) \\succcurlyeq 0\\}.``
This is done using a randomized Jordan-reduction algorithm, and it returns a Jordan algebra (closed under linear combinations and squaring). SDPs can be restricted to such a subspace without changing their optimal value.
## Output
A `Partition` subspace `P`.
"""
function admPartSubspace(C::AbstractVector{T}, A::AbstractMatrix{T}, b::AbstractVector{T}, verbose::Bool = false) where T<:AbstractFloat
n = Int(sqrt(Float64(length(C))))
verbose && print("\nStarting the reduction. Original dimension: $(Int64((n^2+n)/2))\n",)
C = roundMat(C)
CL = C - orthProject(A', C)
CL = reshape(roundMat(CL), n, n)
CL = Matrix(Symmetric(0.5 * (CL + CL')))
X0 = sparse(qr(sparse(A)) \ b)
X0Sym = vec(0.5 * (reshape(X0, n, n) + reshape(X0, n, n)'))
X0Lp = orthProject(A', X0Sym)
X0Lp = roundMat(X0Lp)
X0Lp = Matrix(Symmetric(reshape(X0Lp, n, n)))
# Initialize P as the coarsest partition refining the partitions of two matrices
P = coarsestPart(part(X0Lp), part(CL))
dim = P.n
it = 0
# Iterate until converged
while true
it += 1
verbose && print("Iteration $it, Current dimension: $dim\n",)
# Add a random projection to L
randomizedP = rndPart(P)
BL = projectAndRound(randomizedP, A')
P = coarsestPart(P, part(BL))
# If the last projection didn't change the partition, reuse the random linear combination
if (dim != P.n)
randomizedP = rndPart(P)
end
# Add random square
P2 = randomizedP^2
P2 = roundMat(P2)
P = coarsestPart(P, part(P2))
# Check if converged (not changed or reached full partition)
if (dim == P.n) | (P.n == Int64((n^2 + n) / 2))
dim = P.n
break
end
dim = P.n
end
verbose &&
print("$it Total iterations, Final dimension: $dim, Old dimension: $(Int64((n^2+n)/2))\n")
return P
end
"""
unSymmetrize(P::Partition)
WL algorithm to "un-symmetrize" the Jordan algebra corresponding to `P`.
"""
function unSymmetrize(P::Partition)
P = deepcopy(P)
dim = P.n
it = 0
# Iterate until converged
while true
it += 1
randomizedP1 = rndPart(P)
randomizedP2 = rndPart(P)
P2 = randomizedP1 * randomizedP2
P2 = roundMat(P2)
P = coarsestPart(P, part(P2))
# Check if converged
if dim == P.n
break
end
dim = P.n
end
return P
end
"""
blockDiagonalize(P::Partition, verbose = true; epsilon = 1e-8, complex = false)
Determines a block-diagonalization of a (Jordan)-algebra given by a partition `P` using a randomized algorithm. `blockDiagonalize(P)` returns a real block-diagonalization `blkd`, if it exists, otherwise `nothing`.
`blockDiagonalize(P; complex = true)` returns the same, but with complex valued matrices, and should be used if no real block-diagonalization was found. To use the complex matrices practically, remember that a Hermitian matrix `A` is positive semidefinite iff `[real(A) -imag(A); imag(A) real(A)]` is positive semidefinite.
## Output
* `blkd.blkSizes` is an integer array of the sizes of the blocks.
* `blkd.blks` is an array of length `P.n` containing arrays of (real/complex) matrices of sizes `blkd.blkSizes`. I.e. `blkd.blks[i]` is the image of the basis element `P.P .== i`.
"""
function blockDiagonalize(P::Partition, verbose = true; epsilon = 1e-8, complex = false)
P2 = P
if complex
P2 = unSymmetrize(P)
end
function getRandomMatrix()
if !complex
return rndPart(P)
else
tmp = rndPart(P2) + im * rndPart(P2)
return tmp + tmp'
end
end
verbose && println("Determining block sizes...")
A = getRandomMatrix()
F = eigen(A)
Q = F.vectors
# split by eigenvalues
roundedEV = round.(F.values, digits = 10)
uniqueEV = unique(roundedEV)
countEV = [count(roundedEV .== u) for u in uniqueEV]
QSplit = [Q[:, [i for i = 1:length(roundedEV) if roundedEV[i] == u]] for u in uniqueEV]
PSplit = [P.P .== i for i = 1:P.n]
K = collect(1:length(uniqueEV))
tmp = getRandomMatrix()
for i = 1:length(uniqueEV)
for j = i:length(uniqueEV)
if K[i] != K[j] && countEV[i] == countEV[j]
if any(x -> x >= epsilon, abs.(QSplit[i]' * tmp * QSplit[j]))
K[K.==K[j]] .= K[i]
end
end
end
end
verbose && println("Block sizes are $(sort!([count(K.==Ki) for Ki in unique(K)]))")
if !complex &&
sum([count(K .== Ki) * (count(K .== Ki) + 1) / 2 for Ki in unique(K)]) != P.n
if verbose
@error("Dimensions do not match up. Rounding error (try different epsilons and/or try again) or not block-diagonalizable over the reals (try parameter complex = true).")
@show sum([count(K .== Ki) * (count(K .== Ki) + 1) / 2 for Ki in unique(K)])
@show P.n
end
return nothing
elseif complex && sum([count(K .== Ki)^2 for Ki in unique(K)]) != P2.n
if verbose
@error("Dimensions do not match up. Probably a rounding error (try different epsilons and/or try again).")
@show sum([count(K .== Ki)^2 for Ki in unique(K)])
@show P2.n
end
return nothing
end
verbose && println("Determining the algebra-isomorphism...")
uniqueKs = unique(K)
reducedQis = []
for Ki in uniqueKs
countKi = count(K .== Ki)
QKi = hcat([QSplit[i] for i = 1:length(uniqueEV) if K[i] == Ki]...)
B1 = Symmetric(QKi' * getRandomMatrix() * QKi)
QKi3 = zeros(complex ? Complex{Float64} : Float64, size(B1))
mult = countEV[Ki]
for j = 1:countKi
if j == 1
QKi3[
(mult*(j-1)+1):(mult*(j-1)+mult),
(mult*(j-1)+1):(mult*(j-1)+mult),
] = Matrix(I, mult, mult)
else
QKi3[
(mult*(j-1)+1):(mult*(j-1)+mult),
(mult*(j-1)+1):(mult*(j-1)+mult),
] =
B1[
1:mult,
(mult*(j-1)+1):(mult*(j-1)+mult),
]^(-1)
QKi3[
(mult*(j-1)+1):(mult*(j-1)+mult),
(mult*(j-1)+1):(mult*(j-1)+mult),
] ./= norm(QKi3[
(mult*(j-1)+1),
(mult*(j-1)+1):(mult*(j-1)+mult),
])
end
end
Per = zeros(size(B1))
for i = 1:countKi
for j = 1:mult
Per[i+countKi*(j-1), j+mult*(i-1)] = 1
end
end
reducedQi = (QKi*QKi3*Per')[:, 1:countKi]
push!(reducedQis, reducedQi)
end
verbose && println("Calculating image of the basis of the algebra...")
blockDiagonalization = [
[
(
complex ?
real(B) .* (abs.(real(B)) .>= epsilon) +
im * imag(B) .* (abs.(imag(B)) .>= epsilon) :
B .* (abs.(B) .>= epsilon)
) for B in [Qi' * P * Qi for Qi in reducedQis]
] for P in PSplit
]
blockSizes = [size(b)[1] for b in blockDiagonalization[1]]
return (blkSizes = blockSizes, blks = blockDiagonalization)
end
end
| SDPSymmetryReduction | https://github.com/DanielBrosch/SDPSymmetryReduction.jl.git |
|
[
"MIT"
] | 0.1.3 | 85b0b3e5c09eed5e05447305b18ab179af88e5b2 | code | 1884 | using SDPSymmetryReduction
using LinearAlgebra
using Test
@testset "SDPSymmetryReduction.jl" begin
@test iszero(SDPSymmetryReduction.roundToZero(1e-10))
M = rand(1:10,10,10)
@test SDPSymmetryReduction.part(M).n == length(unique(M))
P1 = Partition(3, [1 2 2; 2 3 3; 2 3 3])
P2 = Partition(3, [1 1 2; 1 1 2; 1 1 3])
P3 = Partition(6, [1 2 4; 2 3 5; 2 3 6])
@test SDPSymmetryReduction.coarsestPart(P1,P2).P == P3.P
@test SDPSymmetryReduction.part(SDPSymmetryReduction.rndPart(P1)).P == P1.P
M = rand(10,10)
@test SDPSymmetryReduction.roundMat(M) ≈ M atol = 1e-4
A = rand(9,3)
M = rand(3,3)
@test maximum(abs.(A \ vec(SDPSymmetryReduction.projectAndRound(M, A; round = false)))) ≈ 0.0 atol=1e-10
T = M - SDPSymmetryReduction.projectAndRound(M, A; round = false)
@test all(isapprox.(A*(A\vec(T)) - vec(T), 0, atol=1e-8))
q = 7
PG2q = vcat([[0, 0, 1]],
[[0, 1, b] for b = 0:q-1],
[[1, a, b] for a = 0:q-1 for b = 0:q-1])
Adj = [x' * y % q == 0 && x != y for x in PG2q, y in PG2q]
N = length(PG2q)
C = ones(N^2)
A = vcat(vec(Adj)', vec(Matrix{Float64}(I, N, N))')
b = [0.0, 1.0]
@test admPartSubspace(C, A, b, true).n == 18
@test sort(blockDiagonalize(admPartSubspace(C, A, b, true), true).blkSizes) == [2,2,2,2,3]
@test SDPSymmetryReduction.unSymmetrize(P1).P == Partition(4, [1 3 3; 2 4 4; 2 4 4]).P
# complex block diagonalization tests
P = Partition(4,[1 2 3 2; 2 1 2 3; 3 2 1 2; 2 3 2 1])
@test blockDiagonalize(P; complex = true).blkSizes == [1,1,1]
function failsBlockDiagonalize()
try
blockDiagonalize(P; complex = false).blkSizes == [1,1,1]
catch
return true
end
return false
end
@test failsBlockDiagonalize()
end | SDPSymmetryReduction | https://github.com/DanielBrosch/SDPSymmetryReduction.jl.git |
|
[
"MIT"
] | 0.1.3 | 85b0b3e5c09eed5e05447305b18ab179af88e5b2 | docs | 3424 | # SDPSymmetryReduction
[](https://DanielBrosch.github.io/SDPSymmetryReduction.jl/stable)
[](https://DanielBrosch.github.io/SDPSymmetryReduction.jl/dev)
[](https://github.com/DanielBrosch/SDPSymmetryReduction.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/DanielBrosch/SDPSymmetryReduction.jl)
# SDPSymmetryReduction
Numerically reduces semidefinite programming problems by exploiting their symmetry. Input is in vectorized standard form
```
sup/inf dot(C,x)
subject to Ax = b,
Mat(x) is positive semidefinite/doubly nonnegative,
```
where `C` and `b` are vectors and `A` is a matrix.
## Installation
Simply run
```julia
pkg> add SDPSymmetryReduction # Press ']' to enter the Pkg REPL mode.
```
## Main use
The function `admPartSubspace` determines an optimal admissible partition subspace for the problem. This is done using a randomized Jordan-reduction algorithm, and it returns a Jordan algebra (closed under linear combinations and squaring). SDPs can be restricted to such a subspace without changing their optimal value.
The function `blockDiagonalize` determines a block-diagonalization of a (Jordan)-algebra given by a partition `P` using a randomized algorithm.
For more details, see the [documentation](https://DanielBrosch.github.io/SDPSymmetryReduction.jl/stable).
## Example: Theta'-function
Let `Adj` be an adjacency matrix of an (undirected) graph `G`. Then the Theta'-function of the graph is given by
```
sup dot(J,X)
subject to dot(Adj,X) = 0,
dot(I,X) = 1,
X is positive semidefinite,
X is entry-wise nonnegative,
```
where `J` is the all-ones matrix, and `I` the identity. Then we can exploit the symmetry of the graph and calculate this function by
```julia
using SDPSymmetryReduction
using LinearAlgebra, SparseArrays
using JuMP, MosekTools
# Theta' SDP
N = size(Adj,1)
C = ones(N^2)
A = hcat(vec(Adj), vec(Matrix(I, N, N)))'
b = [0, 1]
# Find the optimal admissible subspace (= Jordan algebra)
P = admPartSubspace(C, A, b, true)
# Block-diagonalize the algebra
blkD = blockDiagonalize(P, true)
# Calculate the coefficients of the new SDP
PMat = hcat([sparse(vec(P.P .== i)) for i = 1:P.n]...)
newA = A * PMat
newB = b
newC = C' * PMat
# Solve with optimizer of choice
m = Model(Mosek.Optimizer)
# Initialize variables corresponding parts of the partition P
# >= 0 because the original SDP-matrices are entry-wise nonnegative
x = @variable(m, x[1:P.n] >= 0)
@constraint(m, newA * x .== newB)
@objective(m, Max, newC * x)
psdBlocks = sum(blkD.blks[i] .* x[i] for i = 1:P.n)
for blk in psdBlocks
if size(blk, 1) > 1
@constraint(m, blk in PSDCone())
else
@constraint(m, blk .>= 0)
end
end
optimize!(m)
@show termination_status(m)
@show value(newC * x)
```
There are more examples in the [documentation](https://DanielBrosch.github.io/SDPSymmetryReduction.jl/stable).
## Citing
See [`CITATION.bib`](CITATION.bib) for the relevant reference(s).
| SDPSymmetryReduction | https://github.com/DanielBrosch/SDPSymmetryReduction.jl.git |
|
[
"MIT"
] | 0.1.3 | 85b0b3e5c09eed5e05447305b18ab179af88e5b2 | docs | 1310 | ```@meta
CurrentModule = SDPSymmetryReduction
```
# SDPSymmetryReduction.jl
*A symmetry reduction package for Julia.*
Documentation for [SDPSymmetryReduction](https://github.com/DanielBrosch/SDPSymmetryReduction.jl).
This package provides functions to determine a symmetry reduction of an [SDP](https://en.wikipedia.org/wiki/Semidefinite_programming) numerically using the Jordan Reduction method.
It assumes that the problem is given in vectorized standard form
```
sup/inf dot(C,x)
subject to Ax = b,
Mat(x) is positive semidefinite/doubly nonnegative,
```
where `C` and `b` are vectors and `A` is a matrix.
The function [`admPartSubspace`](@ref) finds an optimal admissible partition subspace for a given SDP. An SDP can be restricted to such a subspace without changing its optimum. The returned [`Partition`](@ref)-subspace can then be block-diagonalized using [`blockDiagonalize`](@ref).
For details on the theory and the implemented algorithms, check out the reference linked in the repository.
## Examples
```@contents
Pages = ["examples/ErdosRenyiThetaFunction.md", "examples/QuadraticAssignmentProblems.md", "examples/ReduceAndSolveJuMP.md"]
Depth = 1
```
## Documentation
```@autodocs
Modules = [SDPSymmetryReduction]
Private = false
```
| SDPSymmetryReduction | https://github.com/DanielBrosch/SDPSymmetryReduction.jl.git |
|
[
"MIT"
] | 0.1.1 | 833c9a53837d3c80589503bb00765b92256dd083 | code | 4436 | module SortTileRecursiveTree
using Extents
import GeoInterface as GI
"""
STRtree(geoms; nodecapacity=10)
Construct an STRtree from a collection of geometries with the given node capacity.
"""
struct STRtree{T}
rootnode::T
function STRtree(geoms; nodecapacity=10)
rootnode = build_root_node(geoms, nodecapacity=nodecapacity)
return new{typeof(rootnode)}(rootnode)
end
end
struct STRNode{E,T}
extent::E
children::T
end
struct STRLeafNode{E}
extents::E
indices::Vector{Int}
end
GI.extent(n::STRNode) = n.extent
GI.extent(n::STRLeafNode) = foldl(Extents.union, n.extents)
function Base.show(io::IO, tree::SortTileRecursiveTree.STRtree)
println(io, "STRtree")
if tree.rootnode isa STRNode
display(tree.rootnode.extent)
elseif tree.rootnode isa STRLeafNode
display(foldl(Extents.union, tree.rootnode.extents))
end
end
function leafnodes(geoms; nodecapacity=10)
extents_indices = [(GI.extent(geoms[i]), i) for i in eachindex(geoms)]
perm = sortperm(extents_indices; by=(v -> ((v[1][1][1] + v[1][1][2]) / 2))) # [extent/index][dim][min/max] sort by x
sorted_extents = extents_indices[perm]
r = length(sorted_extents)
P = ceil(Int, r / nodecapacity)
S = ceil(Int, sqrt(P))
x_splits = Iterators.partition(sorted_extents, S * nodecapacity)
nodes = STRLeafNode{Vector{typeof(extents_indices[1][1])}}[]
for x_split in x_splits
perm = sortperm(x_split; by=(v -> ((v[1][2][1] + v[1][2][2]) / 2))) # [extent/index][dim][min/max] sort by y
sorted_split = x_split[perm]
y_splits = Iterators.partition(sorted_split, nodecapacity)
for y_split in y_splits
push!(nodes, STRLeafNode(getindex.(y_split,1), getindex.(y_split,2)))
end
end
return nodes
end
# a bit of duplication...
function parentnodes(nodes; nodecapacity=10)
extents_indices = [(GI.extent(node), node) for node in nodes]
perm = sortperm(extents_indices; by=(v -> ((v[1][1][1] + v[1][1][2]) / 2))) # [extent/node][dim][min/max] sort by x
sorted_extents = extents_indices[perm]
r = length(sorted_extents)
P = ceil(Int, r / nodecapacity)
S = ceil(Int, sqrt(P))
x_splits = Iterators.partition(sorted_extents, S * nodecapacity)
T = typeof(extents_indices[1][1])
N = Vector{typeof(extents_indices[1][2])}
nodes = STRNode{T, N}[]
for x_split in x_splits
perm = sortperm(x_split; by=(v -> ((v[1][2][1] + v[1][2][2]) / 2))) # [extent/index][dim][min/max] sort by y
sorted_split = x_split[perm]
y_splits = Iterators.partition(sorted_split, nodecapacity)
for y_split in y_splits
push!(nodes, STRNode(foldl(Extents.union, getindex.(y_split,1)), getindex.(y_split,2)))
end
end
return nodes
end
"""recursively build root node from geometries and node capacity"""
function build_root_node(geoms; nodecapacity=10)
nodes = leafnodes(geoms, nodecapacity=nodecapacity)
while length(nodes) > 1
nodes = parentnodes(nodes, nodecapacity=nodecapacity)
end
return nodes[1]
end
"""
query(tree::STRtree, extent::Extent)
query(tree::STRtree, geom)
Query the tree for geometries whose extent intersects with the given extent or the extent of the given geometry.
Returns a vector of indices of the geometries that can be used to index into the original collection of geometries under the assumption that the collection has not been modified since the tree was built.
"""
function query end
function query(tree::STRtree, extent::Extent)
query_result = Int[]
query!(query_result, tree.rootnode, extent)
return unique(sort!(query_result))
end
query(tree::STRtree, geom) = query(tree, GI.extent(geom))
"""recursively query the nodes until a leaf node is reached"""
function query!(query_result::Vector{Int}, node::STRNode, extent::Extent)
if Extents.intersects(node.extent, extent)
for child in node.children
query!(query_result, child, extent)
end
end
return query_result
end
"""when leaf node is reached, push indices of geometries to query result"""
function query!(query_result::Vector{Int}, node::STRLeafNode, extent::Extent)
for i in eachindex(node.extents)
if Extents.intersects(node.extents[i], extent)
push!(query_result, node.indices[i])
end
end
end
export STRtree, query
end
| SortTileRecursiveTree | https://github.com/maxfreu/SortTileRecursiveTree.jl.git |
|
[
"MIT"
] | 0.1.1 | 833c9a53837d3c80589503bb00765b92256dd083 | code | 1099 | using SortTileRecursiveTree
using Test
using Extents
import ArchGDAL as AG
import GeoInterface as GI
@testset "SortTileRecursiveTree.jl" begin
@testset "Single point" begin
point = AG.createpoint(1, 1)
tree = STRtree([point])
# test that showing the thing works
display(tree)
@test query(tree, Extent(X=(0, 1.5), Y=(0, 1.5))) == [1]
@test query(tree, Extent(X=(0, 0.5), Y=(0, 0.5))) == []
end
@testset "Many points" begin
x = 1:100
y = 1:100
points = AG.createpoint.(x, y')
# polygons = AG.buffer.(points, 0.1)
tree = STRtree(points)
display(tree)
@test tree.rootnode isa SortTileRecursiveTree.STRNode
@test tree.rootnode.children[1] isa SortTileRecursiveTree.STRNode
query_result = query(tree, Extent(X=(0, 100.5), Y=(0, 1.5)))
@test query_result isa Vector{Int}
@test length(query_result) == 100
@test points[query_result] == points[:,1]
@test query(tree, Extent(X=(0, 0.5), Y=(0, 0.5))) == []
end
end
| SortTileRecursiveTree | https://github.com/maxfreu/SortTileRecursiveTree.jl.git |
|
[
"MIT"
] | 0.1.1 | 833c9a53837d3c80589503bb00765b92256dd083 | docs | 728 | # SortTileRecursiveTree
[](https://github.com/maxfreu/SortTileRecursiveTree.jl/actions/workflows/CI.yml?query=branch%3Amain)
An STR tree implementation for GeoInterface compatible geometries.
Usage:
```julia
using SortTileRecursiveTree
using Extents
tree = STRtree(geometries)
query_result = query(tree, Extent(X=(0, 100.5), Y=(0, 1.5)))
# or
query_result = query(tree, query_geometry)
```
The query result is a `Vector{Int}` that you can use to index into the collection of geometries from which the tree was created, under the assumption that it has not changed since then.
Contributions are welcome! :) | SortTileRecursiveTree | https://github.com/maxfreu/SortTileRecursiveTree.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | code | 713 | using Documenter, AllocCheck
makedocs(
sitename = "AllocCheck Documentation",
doctest = false,
modules = [AllocCheck],
warnonly = [:missing_docs],
pages = [
"Home" => "index.md",
"Tutorials" => [
"Optional debugging and logging" => "tutorials/optional_debugging_and_logging.md",
"Hot loops" => "tutorials/hot_loop.md",
"Minimum latency error recovery" => "tutorials/error_recovery.md",
],
"API" => "api.md",
],
format = Documenter.HTML(prettyurls = haskey(ENV, "CI")),
)
deploydocs(
repo = "github.com/JuliaLang/AllocCheck.jl.git",
push_preview = true,
)
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | code | 8146 | module AllocCheck
import LLVM, GPUCompiler
using GPUCompiler: JuliaContext, safe_name
using LLVM: BasicBlock, ConstantExpr, ConstantInt, InlineAsm, IRBuilder, UndefValue,
blocks, br!, called_operand, dispose, dominates, instructions, metadata,
name, opcode, operands, position!, ret!, successors, switch!, uses, user
include("static_backtrace.jl")
include("abi_call.jl")
include("classify.jl")
include("compiler.jl")
include("macro.jl")
include("types.jl")
include("utils.jl")
module Runtime end
function rename_calls_and_throws!(f::LLVM.Function, mod::LLVM.Module)
# In order to detect whether an instruction executes only when
# throwing an error, we re-write all throw/catches to pass through
# the same basic block and then we check whether the instruction
# is (post-)dominated by this "any_throw" / "any_catch" basic block.
#
# The goal is to check whether an inst always flows into some _some_
# throw (or from some catch), rather than looking for a specific
# throw/catch that the instruction always flows into.
any_throw = BasicBlock(f, "any_throw")
any_catch = BasicBlock(f, "any_catch")
builder = IRBuilder()
position!(builder, any_throw)
throw_ret = ret!(builder) # Dummy inst for post-dominance test
position!(builder, any_catch)
undef_i32 = UndefValue(LLVM.Int32Type())
catch_switch = switch!(builder, undef_i32, any_catch, 0) # Dummy inst for dominance test
for block in blocks(f)
for inst in instructions(block)
if isa(inst, LLVM.CallInst)
rename_call!(inst, mod)
decl = called_operand(inst)
# `throw`: Add pseudo-edge to any_throw
if name(decl) == "ijl_throw" || name(decl) == "llvm.trap"
position!(builder, block)
brinst = br!(builder, any_throw)
end
# `catch`: Add pseudo-edge from any_catch
if name(decl) == "__sigsetjmp" || name(decl) == "sigsetjmp"
icmp_ = user(only(uses(inst))) # Asserts one usage
@assert icmp_ isa LLVM.ICmpInst
@assert convert(Int, operands(icmp_)[2]) == 0
for br_ in uses(icmp_)
br_ = user(br_)
@assert br_ isa LLVM.BrInst
# Rewrite the jump to this `catch` block as an indirect jump
# from a common `any_catch` block
_, catch_target = successors(br_)
successors(br_)[2] = any_catch
branch_index = ConstantInt(length(successors(catch_switch)))
LLVM.API.LLVMAddCase(catch_switch, branch_index, catch_target)
end
end
elseif isa(inst, LLVM.UnreachableInst)
# By assuming forward-progress, we know that any code post-dominated
# by an `unreachable` must either be dead or contain a statically-known
# throw().
#
# This can be useful in, e.g., cases where Julia codegen knows that a
# dynamic dispatch is must-throw but the LLVM IR does not otherwise
# reflect this information.
position!(builder, block)
brinst = br!(builder, any_throw)
end
end
end
dispose(builder)
# Return the "any_throw" and "any_catch" instructions so that they
# can be used for (post-)dominance tests.
return throw_ret, catch_switch
end
"""
Find all static allocation sites in the provided LLVM IR.
This function modifies the LLVM module in-place, effectively trashing it.
"""
function find_allocs!(mod::LLVM.Module, meta; ignore_throw=true)
(; entry, compiled) = meta
errors = []
worklist = LLVM.Function[ entry ]
seen = LLVM.Function[ entry ]
while !isempty(worklist)
f = pop!(worklist)
throw_, catch_ = rename_calls_and_throws!(f, mod)
domtree = LLVM.DomTree(f)
postdomtree = LLVM.PostDomTree(f)
for block in blocks(f)
for inst in instructions(block)
if isa(inst, LLVM.CallInst)
decl = called_operand(inst)
throw_only = dominates(postdomtree, throw_, inst)
ignore_throw && throw_only && continue
catch_only = dominates(domtree, catch_, inst)
ignore_throw && catch_only && continue
class, may_allocate = classify_runtime_fn(name(decl); ignore_throw)
if class === :alloc
allocs = resolve_allocations(inst)
if allocs === nothing # TODO: failed to resolve
bt = backtrace_(inst; compiled)
push!(errors, AllocationSite(Any, bt))
else
for (inst_, typ) in allocs
throw_only = dominates(postdomtree, throw_, inst_)
ignore_throw && throw_only && continue
catch_only = dominates(domtree, catch_, inst_)
ignore_throw && catch_only && continue
bt = backtrace_(inst_; compiled)
push!(errors, AllocationSite(typ, bt))
end
end
@assert may_allocate
elseif class === :dispatch
fname = resolve_dispatch_target(inst)
bt = backtrace_(inst; compiled)
push!(errors, DynamicDispatch(bt, fname))
@assert may_allocate
elseif class === :runtime && may_allocate
bt = backtrace_(inst; compiled)
fname = replace(name(decl), r"^ijl_"=>"jl_")
push!(errors, AllocatingRuntimeCall(fname, bt))
end
if decl isa LLVM.Function && length(blocks(decl)) > 0 && !in(decl, seen)
push!(worklist, decl)
push!(seen, decl)
end
end
end
end
dispose(postdomtree)
dispose(domtree)
end
# TODO: dispose(mod)
# dispose(mod)
unique!(errors)
return errors
end
"""
check_allocs(func, types; ignore_throw=true)
Compiles the given function and types to LLVM IR and checks for allocations.
Returns a vector of `AllocationSite`, `DynamicDispatch`, and `AllocatingRuntimeCall`
!!! warning
The Julia language/compiler does not guarantee that this result is stable across
Julia invocations.
If you rely on allocation-free code for safety/correctness, it is not sufficient
to verify `check_allocs` in test code and expect that the corresponding call in
production will not allocate at runtime.
For this case, you must use `@check_allocs` instead.
# Example
```jldoctest
julia> function foo(x::Int, y::Int)
z = x + y
return z
end
foo (generic function with 1 method)
julia> allocs = check_allocs(foo, (Int, Int))
AllocCheck.AllocationSite[]
```
"""
function check_allocs(@nospecialize(func), @nospecialize(types); ignore_throw=true)
if !hasmethod(func, types)
throw(MethodError(func, types))
end
source = GPUCompiler.methodinstance(Base._stable_typeof(func), Base.to_tuple_type(types))
target = DefaultCompilerTarget()
job = CompilerJob(source, config)
allocs = JuliaContext() do ctx
mod, meta = GPUCompiler.compile(:llvm, job, validate=false, optimize=false, cleanup=false)
optimize!(job, mod)
allocs = find_allocs!(mod, meta; ignore_throw)
# display(mod)
# dispose(mod)
allocs
end
return allocs
end
export check_allocs, alloc_type, @check_allocs, AllocCheckFailure
end
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | code | 2915 | import GPUCompiler, LLVM
using LLVM: LLVMType
# https://github.com/JuliaGPU/GPUCompiler.jl/blob/21ca075c1e91fe0c15f1330ab487b4831013ec1f/examples/jit.jl#L145-L222
@generated function abi_call(f::Ptr{Cvoid}, rt::Type{RT}, tt::Type{T}, func::F, args::Vararg{Any, N}) where {T, RT, F, N}
argtt = tt.parameters[1]
rettype = rt.parameters[1]
argtypes = DataType[argtt.parameters...]
argexprs = Union{Expr, Symbol}[]
ccall_types = DataType[]
before = :()
after = :(ret)
# Note this follows: emit_call_specfun_other
JuliaContext() do ctx
if !GPUCompiler.isghosttype(F) && !Core.Compiler.isconstType(F)
isboxed = GPUCompiler.deserves_argbox(F)
argexpr = :(func)
if isboxed
push!(ccall_types, Any)
else
et = convert(LLVMType, func)
if isa(et, LLVM.SequentialType) # et->isAggregateType
push!(ccall_types, Ptr{F})
argexpr = Expr(:call, GlobalRef(Base, :Ref), argexpr)
else
push!(ccall_types, F)
end
end
push!(argexprs, argexpr)
end
T_jlvalue = LLVM.StructType(LLVMType[])
T_prjlvalue = LLVM.PointerType(T_jlvalue, #= AddressSpace::Tracked =# 10)
for (source_i, source_typ) in enumerate(argtypes)
if GPUCompiler.isghosttype(source_typ) || Core.Compiler.isconstType(source_typ)
continue
end
argexpr = :(args[$source_i])
isboxed = GPUCompiler.deserves_argbox(source_typ)
et = isboxed ? T_prjlvalue : convert(LLVMType, source_typ)
if isboxed
push!(ccall_types, Any)
elseif isa(et, LLVM.SequentialType) # et->isAggregateType
push!(ccall_types, Ptr{source_typ})
argexpr = Expr(:call, GlobalRef(Base, :Ref), argexpr)
else
push!(ccall_types, source_typ)
end
push!(argexprs, argexpr)
end
if GPUCompiler.isghosttype(rettype) || Core.Compiler.isconstType(rettype)
# Do nothing...
# In theory we could set `rettype` to `T_void`, but ccall will do that for us
# elseif jl_is_uniontype?
elseif !GPUCompiler.deserves_retbox(rettype)
rt = convert(LLVMType, rettype)
if !isa(rt, LLVM.VoidType) && GPUCompiler.deserves_sret(rettype, rt)
before = :(sret = Ref{$rettype}())
pushfirst!(argexprs, :(sret))
pushfirst!(ccall_types, Ptr{rettype})
rettype = Nothing
after = :(sret[])
end
else
# rt = T_prjlvalue
end
end
quote
$before
ret = ccall(f, $rettype, ($(ccall_types...),), $(argexprs...))
$after
end
end
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | code | 10787 | """
classify_runtime_fn(name)
A :dispatch function is responsible for a "dynamic dispatch" to an unknown Julia
function.
An :alloc functions is used by codegen to lower allocations for mutable structs,
arrays, and other Julia objects.
A :runtime function is any function used by the runtime which does not explicitly
perform allocation, but which might allocate to get its job done (e.g. jl_subtype).
"""
function classify_runtime_fn(name::AbstractString; ignore_throw::Bool)
match_ = match(r"^(ijl_|jl_)(.*)$", name)
isnothing(match_) && return (:unknown, false)
name = match_[2]
may_alloc = fn_may_allocate(name; ignore_throw)
if name in ("alloc_genericmemory", "genericmemory_copy", "genericmemory_copy_slice",
"string_to_genericmemory", "ptr_to_genericmemory", "array_copy", "alloc_string",
"alloc_array_1d", "alloc_array_2d", "alloc_array_3d", "gc_alloc_typed",
"gc_pool_alloc", "gc_pool_alloc_instrumented", "gc_big_alloc_instrumented"
) || occursin(r"^box_.*", name)
return (:alloc, may_alloc)
elseif name in ("f__apply_latest", "f__apply_iterate", "f__apply_pure", "f__call_latest",
"f__call_in_world", "f__call_in_world_total", "f_intrinsic_call", "f_invoke",
"f_opaque_closure_call", "apply", "apply_generic", "gf_invoke",
"gf_invoke_by_method", "gf_invoke_lookup_worlds", "invoke", "invoke_api",
"call", "call0", "call1", "call2", "call3", "unknown_fptr")
return (:dispatch, may_alloc)
else
return (:runtime, may_alloc)
end
end
const generic_method_offsets = Dict{String,Int}(("jl_f__apply_latest" => 2, "ijl_f__apply_latest" => 2,
"jl_f__call_latest" => 2, "ijl_f__call_latest" => 2, "jl_f_invoke" => 2, "jl_invoke" => 1,
"jl_apply_generic" => 1, "ijl_f_invoke" => 2, "ijl_invoke" => 1, "ijl_apply_generic" => 1,
"jl_unknown_fptr" => 0, "ijl_unknown_fptr" => 0))
function resolve_dispatch_target(inst::LLVM.Instruction)
@assert isa(inst, LLVM.CallInst)
fun = LLVM.called_operand(inst)
if isa(fun, LLVM.Function) && in(LLVM.name(fun), keys(generic_method_offsets))
offset = generic_method_offsets[LLVM.name(fun)]
offset == 0 && return nothing
flib = operands(inst)[offset]
flib = unwrap_ptr_casts(flib)
flib = look_through_loads(flib)
if isa(flib, ConstantInt)
rep = reinterpret(Ptr{Cvoid}, convert(Csize_t, flib))
flib = Base.unsafe_pointer_to_objref(rep)
return flib
end
end
return nothing
end
function fn_may_allocate(name::AbstractString; ignore_throw::Bool)
if name in ("egal__unboxed", "lock_value", "unlock_value", "get_nth_field_noalloc",
"load_and_lookup", "lazy_load_and_lookup", "box_bool", "box_int8",
"box_uint8", "excstack_state", "restore_excstack", "enter_handler",
"pop_handler", "f_typeof", "clock_now", "throw", "gc_queue_root", "gc_enable",
"gc_disable_finalizers_internal", "gc_is_in_finalizer", "enable_gc_logging",
"gc_safepoint", "gc_collect", "genericmemory_owner", "get_pgcstack") || occursin(r"^unbox_.*", name)
return false # these functions never allocate
elseif name in ("f_ifelse", "f_typeassert", "f_is", "f_throw", "f__svec_ref",
"genericmemory_copyto")
return ignore_throw == false # these functions only allocate if they throw
else
return true
end
end
function unwrap_ptr_casts(val::LLVM.Value)
while true
is_simple_cast = false
is_simple_cast |= isa(val, LLVM.BitCastInst)
is_simple_cast |= isa(val, LLVM.AddrSpaceCastInst) || isa(val, LLVM.PtrToIntInst)
is_simple_cast |= isa(val, LLVM.ConstantExpr) && opcode(val) == LLVM.API.LLVMAddrSpaceCast
is_simple_cast |= isa(val, LLVM.ConstantExpr) && opcode(val) == LLVM.API.LLVMIntToPtr
is_simple_cast |= isa(val, LLVM.ConstantExpr) && opcode(val) == LLVM.API.LLVMBitCast
if !is_simple_cast
return val
else
val = operands(val)[1]
end
end
end
function look_through_loads(val::LLVM.Value)
if isa(val, LLVM.LoadInst)
val = operands(val)[1]
val = unwrap_ptr_casts(val)
if isa(val, LLVM.GlobalVariable)
val = LLVM.initializer(val)
val = unwrap_ptr_casts(val)
end
end
return val
end
"""
Returns `nothing` if the value could not be resolved statically.
"""
function resolve_static_jl_value_t(val::LLVM.Value)
val = unwrap_ptr_casts(val)
val = look_through_loads(val)
!isa(val, ConstantInt) && return nothing
ptr = reinterpret(Ptr{Cvoid}, convert(UInt, val))
return Base.unsafe_pointer_to_objref(ptr)
end
function transitive_uses(inst::LLVM.Instruction; unwrap = (use)->false)
uses_ = LLVM.Use[]
for use in uses(inst)
if unwrap(use)
append!(uses_, transitive_uses(user(use); unwrap))
else
push!(uses_, use)
end
end
return uses_
end
"""
Returns `nothing` if the type could not be resolved statically.
"""
function resolve_allocations(call::LLVM.Value)
@assert isa(call, LLVM.CallInst)
fn = LLVM.called_operand(call)
!isa(fn, LLVM.Function) && return nothing
name = LLVM.name(fn)
# Strip off the "jl_" or "ijl_" prefix
match_ = match(r"^(ijl_|jl_)(.*)$", name)
isnothing(match_) && return nothing
name = match_[2]
if name in ("gc_pool_alloc_instrumented", "gc_big_alloc_instrumented", "gc_alloc_typed")
type = resolve_static_jl_value_t(operands(call)[end-1])
return type !== nothing ? [(call, type)] : nothing
elseif name in ("alloc_array_1d", "alloc_array_2d", "alloc_array_3d")
type = resolve_static_jl_value_t(operands(call)[1])
return type!== nothing ? [(call, type)] : nothing
elseif name == "alloc_string"
return [(call, String)]
elseif name == "array_copy"
return [(call, Array)]
elseif name in ("genericmemory_copy", "genericmemory_copy_slice", "ptr_to_genericmemory")
@assert VERSION > v"1.11.0-DEV.753"
return [(call, Memory)]
elseif name == "string_to_genericmemory"
@assert VERSION > v"1.11.0-DEV.753"
return [(call, Memory{UInt8})]
elseif name == "alloc_genericmemory"
type = resolve_static_jl_value_t(operands(call)[1])
return [(call, type !== nothing ? type : Memory)]
elseif occursin(r"^box_(.*)", name)
typestr = match(r"^box_(.*)", name).captures[end]
typestr == "bool" && return [(call, Bool)]
typestr == "char" && return [(call, Char)]
typestr == "float32" && return [(call, Float32)]
typestr == "float64" && return [(call, Float64)]
typestr == "int16" && return [(call, Int16)]
typestr == "int32" && return [(call, Int32)]
typestr == "int64" && return [(call, Int64)]
typestr == "int8" && return [(call, Int8)]
typestr == "slotnumber" && return [(call, Core.SlotNumber)]
typestr == "ssavalue" && return [(call, Core.SSAValue)]
typestr == "uint16" && return [(call, UInt16)]
typestr == "uint32" && return [(call, UInt32)]
typestr == "uint64" && return [(call, UInt64)]
typestr == "uint8" && return [(call, UInt8)]
typestr == "uint8pointer" && return [(call, Ptr{UInt8})]
typestr == "voidpointer" && return [(call, Ptr{Cvoid})]
@assert false # above is exhaustive
elseif name == "gc_pool_alloc"
seen = Set()
allocs = Tuple{LLVM.Instruction, Any}[]
for calluse in transitive_uses(call; unwrap = (use)->user(use) isa LLVM.BitCastInst)
gep = user(calluse)
!isa(gep, LLVM.GetElementPtrInst) && continue
# Check that this points into the type tag (at a -1 offset)
offset = operands(gep)[2]
!isa(offset, LLVM.ConstantInt) && continue
(convert(Int, offset) != -1) && continue
# Now, look for the store into the type tag and count that as our allocation(s)
for gepuse in uses(gep)
store = user(gepuse)
!isa(store, LLVM.StoreInst) && continue
# It is possible for the optimizer to merge multiple distinct `gc_pool_alloc`
# allocations which actually have distinct types, so here we count each type
# tag store as a separate allocation.
type_tag = operands(store)[1]
type = resolve_static_jl_value_t(type_tag)
if type === nothing
type = Any
end
type in seen && continue
push!(seen, type)
push!(allocs, (store, type))
end
end
return allocs
end
return nothing
end
"""
Resolve the callee of a call embedded in Julia-constructed LLVM IR
and replace it with a new locally-declared function that has the
resolved name as its identifier.
"""
function rename_call!(call::LLVM.CallInst, mod::LLVM.Module)
callee = called_operand(call)
if isa(callee, LLVM.LoadInst)
fn_got = unwrap_ptr_casts(operands(callee)[1])
fname = name(fn_got)
match_ = match(r"^jlplt_(.*)_\d+_got$", fname)
match_ === nothing && return
fname = match_[1]
elseif isa(callee, ConstantExpr)
# extract the literal pointer
ptr_arg = unwrap_ptr_casts(callee)
@assert isa(ptr_arg, LLVM.ConstantInt)
ptr = Ptr{Cvoid}(convert(Int, ptr_arg))
# look it up in the Julia JIT cache
frames = ccall(:jl_lookup_code_address, Any, (Ptr{Cvoid}, Cint,), ptr, 0)
length(frames) == 0 && return
fn, file, line, linfo, fromC, inlined = last(frames)
fname = string(fn)
elseif isa(callee, InlineAsm)
return # these are emitted for GC operations
elseif isa(callee, LLVM.Function)
return # function is already compile-time-known and expanded
else
# Call to a runtime-determined function pointer, usually an OpaqueClosure
# or a ccall that we were not able to fully resolve.
#
# We label this as a DynamicDispatch to an unknown function target.
fname = "jl_unknown_fptr"
end
# Re-write function call to use a locally-created version with a nice name
lfn = LLVM.API.LLVMGetNamedFunction(mod, fname)
if lfn == C_NULL
lfn = LLVM.API.LLVMAddFunction(mod, Symbol(fname), LLVM.API.LLVMGetCalledFunctionType(call))
end
LLVM.API.LLVMSetOperand(call, LLVM.API.LLVMGetNumOperands(call) - 1, lfn)
return nothing
end
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | code | 5903 | import LLVM, GPUCompiler
using LLVM: TargetMachine, @dispose
using GPUCompiler: CompilerConfig, CompilerJob, MemoryBuffer, NativeCompilerTarget, JuliaContext, ThreadSafeContext, run!
include("compiler_utils.jl")
function __init__()
opt_level = Base.JLOptions().opt_level
tm[] = LLVM.JITTargetMachine(LLVM.triple(), cpu_name(), cpu_features();
optlevel = llvm_codegen_level(opt_level))
LLVM.asm_verbosity!(tm[], true)
lljit = LLVM.has_julia_ojit() ? LLVM.JuliaOJIT() : LLVM.LLJIT(; tm=tm[])
jd_main = LLVM.JITDylib(lljit)
prefix = LLVM.get_prefix(lljit)
dg = LLVM.CreateDynamicLibrarySearchGeneratorForProcess(prefix)
LLVM.add!(jd_main, dg)
# TODO: Do we need this trick from Enzyme?
# if Sys.iswindows() && Int === Int64
# # TODO can we check isGNU?
# define_absolute_symbol(jd_main, mangle(lljit, "___chkstk_ms"))
# end
es = LLVM.ExecutionSession(lljit)
try
lctm = LLVM.LocalLazyCallThroughManager(GPUCompiler.triple(lljit), es)
ism = LLVM.LocalIndirectStubsManager(GPUCompiler.triple(lljit))
jit[] = CompilerInstance(lljit, lctm, ism)
catch err
@warn "OrcV2 initialization failed with" err
jit[] = CompilerInstance(lljit, nothing, nothing)
end
end
@static if LLVM.has_julia_ojit()
struct CompilerInstance
jit::LLVM.JuliaOJIT
lctm::Union{LLVM.LazyCallThroughManager, Nothing}
ism::Union{LLVM.IndirectStubsManager, Nothing}
end
else
struct CompilerInstance
jit::LLVM.LLJIT
lctm::Union{LLVM.LazyCallThroughManager, Nothing}
ism::Union{LLVM.IndirectStubsManager, Nothing}
end
end
struct CompileResult{Success, F, TT, RT}
f_ptr::Ptr{Cvoid}
arg_types::Type{TT}
return_type::Type{RT}
func::F
analysis # TODO: add type
end
# lock + JIT objects
const codegen_lock = ReentrantLock()
const jit = Ref{CompilerInstance}()
const tm = Ref{TargetMachine}() # for opt pipeline
# cache of kernel instances
const _kernel_instances = Dict{Any, Any}()
const compiler_cache = Dict{Any, CompileResult}()
const config = CompilerConfig(DefaultCompilerTarget(), NativeParams();
kernel=false, entry_abi = :specfunc, always_inline=false)
const NativeCompilerJob = CompilerJob{NativeCompilerTarget,NativeParams}
GPUCompiler.can_safepoint(@nospecialize(job::NativeCompilerJob)) = true
GPUCompiler.runtime_module(::NativeCompilerJob) = Runtime
function optimize!(@nospecialize(job::CompilerJob), mod::LLVM.Module)
triple = GPUCompiler.llvm_triple(job.config.target)
tm = GPUCompiler.llvm_machine(job.config.target)
if VERSION >= v"1.10-beta3"
@dispose pb = LLVM.PassBuilder(tm) begin
@dispose mpm = LLVM.NewPMModulePassManager(pb) begin
build_newpm_pipeline!(pb, mpm)
run!(mpm, mod, tm)
end
end
else
@dispose pm=LLVM.ModulePassManager() begin
build_oldpm_pipeline!(pm)
run!(pm, mod)
end
end
end
"""
compile_callable(f, tt=Tuple{}; kwargs...)
Low-level interface to compile a function invocation for the provided function and tuple of
argument types using the naive JuliaOJIT() pipeline.
The output of this function is automatically cached, so that new code will be generated
automatically and checked for allocations whenever the function changes or when different
types or keyword arguments are provided.
"""
function compile_callable(f::F, tt::TT=Tuple{}; ignore_throw=true) where {F, TT}
# cuda = active_state()
Base.@lock codegen_lock begin
# compile the function
cache = compiler_cache
source = GPUCompiler.methodinstance(F, tt)
rt = Core.Compiler.return_type(f, tt)
function compile(@nospecialize(job::CompilerJob))
return JuliaContext() do ctx
mod, meta = GPUCompiler.compile(:llvm, job, validate=false)
optimize!(job, mod)
clone = copy(mod)
analysis = find_allocs!(mod, meta; ignore_throw)
# TODO: This is the wrong meta
return clone, meta, analysis
end
end
function link(@nospecialize(job::CompilerJob), (mod, meta, analysis))
return JuliaContext() do ctx
lljit = jit[].jit
jd = LLVM.JITDylib(lljit)
buf = convert(MemoryBuffer, mod)
tsm = ThreadSafeContext() do ctx
mod = parse(LLVM.Module, buf)
GPUCompiler.ThreadSafeModule(mod)
end
LLVM.add!(lljit, jd, tsm)
f_ptr = pointer(LLVM.lookup(lljit, LLVM.name(meta.entry)))
if f_ptr == C_NULL
throw(GPUCompiler.InternalCompilerError(job,
"Failed to compile @check_allocs function"))
end
if length(analysis) == 0
CompileResult{true, typeof(f), tt, rt}(f_ptr, tt, rt, f, analysis)
else
CompileResult{false, typeof(f), tt, rt}(f_ptr, tt, rt, f, analysis)
end
end
end
fun = GPUCompiler.cached_compilation(cache, source, config, compile, link)
# create a callable object that captures the function instance. we don't need to think
# about world age here, as GPUCompiler already does and will return a different object
key = (objectid(source), hash(fun), f)
return get(_kernel_instances, key, fun)::CompileResult
end
end
function (f::CompileResult{Success, F, TT, RT})(args...) where {Success, F, TT, RT}
if Success
return abi_call(f.f_ptr, RT, TT, f.func, args...)
else
error("@check_allocs function contains ", length(f.analysis), " allocations.")
end
end
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | code | 1620 | import LLVM, GPUCompiler
using GPUCompiler: NativeCompilerTarget
struct NativeParams <: GPUCompiler.AbstractCompilerParams end
DefaultCompilerTarget(; kwargs...) = NativeCompilerTarget(; jlruntime=true, kwargs...)
function llvm_codegen_level(opt_level::Integer)
if opt_level < 2
optlevel = LLVM.API.LLVMCodeGenLevelNone
elseif opt_level == 2
optlevel = LLVM.API.LLVMCodeGenLevelDefault
else
optlevel = LLVM.API.LLVMCodeGenLevelAggressive
end
end
function cpu_name()
ccall(:jl_get_cpu_name, String, ())
end
function cpu_features()
if VERSION >= v"1.10.0-beta1"
return ccall(:jl_get_cpu_features, String, ())
end
@static if Sys.ARCH == :x86_64 ||
Sys.ARCH == :x86
return "+mmx,+sse,+sse2,+fxsr,+cx8" # mandated by Julia
else
return ""
end
end
if VERSION >= v"1.10-beta3"
function build_newpm_pipeline!(pb::LLVM.PassBuilder, mpm::LLVM.NewPMModulePassManager, speedup=2, size=0, lower_intrinsics=true,
dump_native=false, external_use=false, llvm_only=false,)
ccall(:jl_build_newpm_pipeline, Cvoid,
(LLVM.API.LLVMModulePassManagerRef, LLVM.API.LLVMPassBuilderRef, Cint, Cint, Cint, Cint, Cint, Cint),
mpm, pb, speedup, size, lower_intrinsics, dump_native, external_use, llvm_only)
end
else
function build_oldpm_pipeline!(pm::LLVM.ModulePassManager, opt_level=2, lower_intrinsics=true)
ccall(:jl_add_optimization_passes, Cvoid,
(LLVM.API.LLVMPassManagerRef, Cint, Cint),
pm, opt_level, lower_intrinsics)
end
end
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | code | 5401 | using ExprTools: splitdef, combinedef
using MacroTools: splitarg, combinearg
function extract_keywords(ex0)
kws = Dict{Symbol, Any}()
arg = ex0[end]
for i in 1:length(ex0)-1
x = ex0[i]
if x isa Expr && x.head === :(=) # Keyword given of the form "foo=bar"
if length(x.args) != 2
error("Invalid keyword argument: $x")
end
if x.args[1] != :ignore_throw || !(x.args[2] isa Bool)
return error("@check_allocs received unexpected argument: $(x)")
end
kws[x.args[1]] = x.args[2]
else
return error("@check_allocs expects only one non-keyword argument")
end
end
return kws, arg
end
"""
@check_allocs ignore_throw=true (function def)
Wraps the provided function definition so that all calls to it will be automatically
checked for allocations.
If the check fails, an `AllocCheckFailure` exception is thrown containing the detailed
failures, including the backtrace for each defect.
Note: All calls to the wrapped function are effectively a dynamic dispatch, which
means they are type-unstable and may allocate memory at function _entry_. `@check_allocs`
only guarantees the absence of allocations after the function has started running.
# Example
```jldoctest
julia> @check_allocs multiply(x,y) = x*y
multiply (generic function with 1 method)
julia> multiply(1.5, 3.5) # no allocations for Float64
5.25
julia> multiply(rand(3,3), rand(3,3)) # matmul needs to allocate the result
ERROR: @check_allocs function contains 1 allocations.
Stacktrace:
[1] macro expansion
@ ~/repos/AllocCheck/src/macro.jl:134 [inlined]
[2] multiply(x::Matrix{Float64}, y::Matrix{Float64})
@ Main ./REPL[2]:133
[3] top-level scope
@ REPL[5]:1
```
"""
macro check_allocs(ex...)
kws, body = extract_keywords(ex)
if Base.is_function_def(body)
return _check_allocs_macro(body, __module__, __source__; kws...)
else
error("@check_allocs used on something other than a function definition")
end
end
function normalize_args!(func_def)
name = get(func_def, :name, :(__anon))
if haskey(func_def, :name)
# e.g. function (f::Foo)(a::Int, b::Int)
func_def[:name] isa Expr && (pushfirst!(func_def[:args], name);)
func_def[:name] = gensym(name isa Symbol ? name : gensym())
end
if haskey(func_def, :kwargs)
if !haskey(func_def, :args)
func_def[:args] = Any[]
end
for arg in func_def[:kwargs]
Meta.isexpr(arg, :kw) && (arg = arg.args[1];)
push!(func_def[:args], arg)
end
empty!(func_def[:kwargs])
end
end
"""
Takes a function definition and returns the expressions needed to forward the arguments to an inner function.
For example `function foo(a, ::Int, c...; x, y=1, z...)` will
1. modify the function to `gensym()` nameless arguments
2. return `(:a, gensym(), :(c...)), (:x, :y, :(z...)))`
"""
function forward_args!(func_def)
args = []
if haskey(func_def, :name) && func_def[:name] isa Expr
name, type, splat, default = splitarg(func_def[:name])
name = something(name, gensym())
push!(args, splat ? :($name...) : name)
func_def[:name] = combinearg(name, type, splat, default)
end
if haskey(func_def, :args)
func_def[:args] = map(func_def[:args]) do arg
name, type, splat, default = splitarg(arg)
name = something(name, gensym())
push!(args, splat ? :($name...) : name)
combinearg(name, type, splat, default)
end
end
kwargs = []
if haskey(func_def, :kwargs)
for arg in func_def[:kwargs]
name, type, splat, default = splitarg(arg)
push!(kwargs, splat ? :($name...) : name)
end
end
args, kwargs
end
function _check_allocs_macro(ex::Expr, mod::Module, source::LineNumberNode; ignore_throw=true)
# Transform original function to a renamed version with flattened args
def = splitdef(deepcopy(ex))
normalize_args!(def)
original_fn = combinedef(def)
f_sym = haskey(def, :name) ? gensym(def[:name]) : gensym()
# Next, create a wrapper function that will compile the original function on-the-fly.
def = splitdef(ex)
fwd_args, fwd_kwargs = forward_args!(def)
haskey(def, :name) && (def[:name] = esc(def[:name]);)
haskey(def, :args) && (def[:args] = esc.(def[:args]);)
haskey(def, :kwargs) && (def[:kwargs] = esc.(def[:kwargs]);)
haskey(def, :whereparams) && (def[:whereparams] = esc.(def[:whereparams]);)
# The way that `compile_callable` works is by doing a dynamic dispatch and
# on-the-fly compilation.
def[:body] = quote
callable_tt = Tuple{map(Core.Typeof, ($(esc.(fwd_args)...),$(esc.(fwd_kwargs)...)))...}
callable = $compile_callable($f_sym, callable_tt; ignore_throw=$ignore_throw)
if (length(callable.analysis) > 0)
throw(AllocCheckFailure(callable.analysis))
end
callable($(esc.(fwd_args)...), $(esc.(fwd_kwargs)...))
end
# Replace function definition line number node with that from source
@assert def[:body].args[1] isa LineNumberNode
def[:body].args[1] = source
wrapper_fn = combinedef(def)
return quote
local $f_sym = $(esc(original_fn))
Base.@__doc__ $(wrapper_fn)
end
end
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | code | 2891 | import LLVM
# generate a pseudo-backtrace from LLVM IR instruction debug information
#
# this works by looking up the debug information of the instruction, and inspecting the call
# sites of the containing function. if there's only one, repeat the process from that call.
# finally, the debug information is converted to a Julia stack trace.
function backtrace_(inst::LLVM.Instruction, bt=StackTraces.StackFrame[]; compiled::Union{Nothing,Dict{Any,Any}}=nothing)
done = Set{LLVM.Instruction}()
while true
if in(inst, done)
break
end
push!(done, inst)
f = LLVM.parent(LLVM.parent(inst))
# look up the debug information from the current instruction
if haskey(metadata(inst), LLVM.MD_dbg)
loc = metadata(inst)[LLVM.MD_dbg]
while loc !== nothing
scope = LLVM.scope(loc)
if scope !== nothing
emitted_name = LLVM.name(f)
name = replace(LLVM.name(scope), r";$" => "")
file = LLVM.file(scope)
path = joinpath(LLVM.directory(file), LLVM.filename(file))
line = LLVM.line(loc)
linfo = nothing
from_c = false
inlined = LLVM.inlined_at(loc) !== nothing
!inlined && for (mi, (; ci, func, specfunc)) in compiled
if safe_name(func) == emitted_name || safe_name(specfunc) == emitted_name
linfo = mi
break
end
end
push!(bt, StackTraces.StackFrame(Symbol(name), Symbol(path), line,
linfo, from_c, inlined, 0))
end
loc = LLVM.inlined_at(loc)
end
end
# move up the call chain
## functions can be used as a *value* in eg. constant expressions, so filter those out
callers = filter(val -> isa(user(val), LLVM.CallInst), collect(uses(f)))
## get rid of calls without debug info
filter!(callers) do call
md = metadata(user(call))
haskey(md, LLVM.MD_dbg)
end
if !isempty(callers)
# figure out the call sites of this instruction
call_sites = unique(callers) do call
# there could be multiple calls, originating from the same source location
md = metadata(user(call))
md[LLVM.MD_dbg]
end
if length(call_sites) > 1
frame = StackTraces.StackFrame("multiple call sites", "unknown", 0)
push!(bt, frame)
elseif length(call_sites) == 1
inst = user(first(call_sites))
continue
end
end
break
end
return bt
end
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | code | 4993 |
struct AllocatingRuntimeCall
name::String
backtrace::Vector{Base.StackTraces.StackFrame}
end
function Base.hash(self::AllocatingRuntimeCall, h::UInt)
return Base.hash(self.name, nice_hash(self.backtrace, h))
end
function Base.:(==)(self::AllocatingRuntimeCall, other::AllocatingRuntimeCall)
return (self.name == other.name) && (nice_isequal(self.backtrace,other.backtrace))
end
function Base.show(io::IO, call::AllocatingRuntimeCall)
if length(call.backtrace) == 0
Base.printstyled(io, "Allocating runtime call", color=:red, bold=true)
# TODO: Even when backtrace fails, we should report at least 1 stack frame
Base.println(io, " to \"", call.name, "\" in unknown location")
else
Base.printstyled(io, "Allocating runtime call", color=:red, bold=true)
Base.println(io, " to \"", call.name, "\" in ", call.backtrace[1].file, ":", call.backtrace[1].line)
show_backtrace_and_excerpt(io, call.backtrace)
end
end
struct DynamicDispatch
backtrace::Vector{Base.StackTraces.StackFrame}
fname::Any
end
function Base.hash(self::DynamicDispatch, h::UInt)
return Base.hash(self.fname, nice_hash(self.backtrace, h))
end
function Base.:(==)(self::DynamicDispatch, other::DynamicDispatch)
return (self.fname === other.fname) && (nice_isequal(self.backtrace,other.backtrace))
end
function Base.show(io::IO, dispatch::DynamicDispatch)
Base.printstyled(io, "Dynamic dispatch", color=:magenta, bold=true)
if dispatch.fname !== nothing
Base.print(io, " to function ")
Base.printstyled(io, dispatch.fname, bold=true)
end
if length(dispatch.backtrace) == 0
# TODO: Even when backtrace fails, we should report at least 1 stack frame
Base.println(io," in unknown location")
else
Base.println(io," in ", dispatch.backtrace[1].file, ":", dispatch.backtrace[1].line)
show_backtrace_and_excerpt(io, dispatch.backtrace)
end
end
struct AllocationSite
type::Any
backtrace::Vector{Base.StackTraces.StackFrame}
end
function nice_hash(backtrace::Vector{Base.StackTraces.StackFrame}, h::UInt)
# `func_id` - Uniquely identifies this function (a method instance in julia, and
# a function in C/C++).
# Note that this should be unique even for several different functions all
# inlined into the same frame.
for frame in backtrace
h = if frame.linfo !== nothing
hash(frame.linfo, h)
else
hash((frame.func, frame.file, frame.line, frame.inlined), h)
end
end
return h
end
function nice_isequal(self::Vector{Base.StackTraces.StackFrame}, other::Vector{Base.StackTraces.StackFrame})
if length(self) != length(other)
return false
end
for (a, b) in zip(self, other)
if a.linfo !== b.linfo
return false
end
if a.func !== b.func
return false
end
if a.file !== b.file
return false
end
if a.line !== b.line
return false
end
if a.inlined !== b.inlined
return false
end
end
return true
end
function Base.hash(alloc::AllocationSite, h::UInt)
return Base.hash(alloc.type, nice_hash(alloc.backtrace, h))
end
function Base.:(==)(self::AllocationSite, other::AllocationSite)
return (self.type == other.type) && (nice_isequal(self.backtrace,other.backtrace))
end
function Base.show(io::IO, alloc::AllocationSite)
if length(alloc.backtrace) == 0
Base.printstyled(io, "Allocation", color=:red, bold=true)
# TODO: Even when backtrace fails, we should report at least 1 stack frame
Base.println(io, " of ", alloc.type, " in unknown location")
else
Base.printstyled(io, "Allocation", color=:red, bold=true)
Base.println(io, " of ", alloc.type, " in ", alloc.backtrace[1].file, ":", alloc.backtrace[1].line)
show_backtrace_and_excerpt(io, alloc.backtrace)
end
end
struct AllocCheckFailure
errors::Vector
end
function Base.show(io::IO, failure::AllocCheckFailure)
allocs = count(err isa AllocationSite || err isa AllocatingRuntimeCall for err in failure.errors)
dispatches = count(err isa DynamicDispatch for err in failure.errors)
Base.print(io, "@check_allocs function encountered ")
Base.printstyled(io, length(failure.errors), color=:red, bold=true)
Base.print(io, " errors ($(allocs) allocations / $(dispatches) dynamic dispatches).")
end
function show_backtrace_and_excerpt(io::IO, backtrace::Vector{Base.StackTraces.StackFrame})
# Print code excerpt of callation site
try
source = open(fixup_source_path(backtrace[1].file))
Base.print(io, " | ")
Base.println(io, strip(readlines(source)[backtrace[1].line]))
close(source)
catch
Base.print(io, " | (source not available)")
end
# Print backtrace
Base.show_backtrace(io, backtrace)
Base.println(io)
end
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | code | 417 | """
path = fixup_source_path(path)
Return a normalized, absolute path for a source file `path`.
"""
function fixup_source_path(file)
file = string(file)
if !isabspath(file)
# This may be a Base or Core method
newfile = Base.find_source_file(file)
if isa(newfile, AbstractString)
file = normpath(newfile)
end
end
return Base.fixup_stdlib_path(file)
end
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | code | 12118 | using AllocCheck
using AllocCheck: AllocatingRuntimeCall, DynamicDispatch, AllocationSite
using Test
mutable struct Foo{T}
val::T
end
function alloc_in_catch()
try
if Base.inferencebarrier(false)
# Prevent catch from being elided
error()
end
catch
return Foo{Float64}(1.5) # in catch block: filtered by `ignore_throw=true`
end
return Foo{Int}(1)
end
function same_ccall()
a = Array{Int}(undef,5,5)
b = Array{Int}(undef,5,5)
a,b
end
function throw_eof()
throw(EOFError())
end
function toggle_gc()
GC.enable(false)
GC.enable(true)
end
function run_gc_explicitly()
GC.gc()
end
function call_jl_call(x::Int, y::Int)
return ccall(:jl_call2, Any, (#= Function =# Any, Any, Any), +, x, y)
end
const f = Base.Experimental.@opaque (x::Int, y::Int) -> x + y
function call_opaque_closure(x::Int, y::Int)
return f(x, y)
end
@testset "Number of Allocations" begin
@test length(check_allocs(mod, (Float64,Float64); ignore_throw=false)) == 0
@test length(check_allocs(sin, (Float64,); ignore_throw=false)) > 0
@test length(check_allocs(sin, (Float64,); ignore_throw=true)) == 0
@test length(check_allocs(*, (Matrix{Float64},Matrix{Float64}); ignore_throw=true)) != 0
@test length(check_allocs(alloc_in_catch, (); ignore_throw=false)) == 2
@test length(check_allocs(alloc_in_catch, (); ignore_throw=true)) == 1
@test length(check_allocs(same_ccall, (); ignore_throw=false)) > 0
@test length(check_allocs(same_ccall, (); ignore_throw=true)) > 0
@test length(check_allocs(first, (Core.SimpleVector,); ignore_throw = false)) > 0
@test length(check_allocs(first, (Core.SimpleVector,); ignore_throw = true)) == 0
@test length(check_allocs(time, (); ignore_throw = false)) == 0
@test length(check_allocs(throw_eof, (); ignore_throw = false)) == 0
@test length(check_allocs(toggle_gc, (); ignore_throw = false)) == 0
@test length(check_allocs(run_gc_explicitly, (); ignore_throw = false)) == 0
@test_throws MethodError check_allocs(sin, (String,); ignore_throw=false)
allocs = check_allocs(call_jl_call, (Int, Int); ignore_throw = false)
@test length(allocs) == 2
@test all((alloc isa AllocationSite && allocs[1].type === Int64) ||
(alloc isa DynamicDispatch && allocs[2].fname === nothing)
for alloc in allocs)
allocs = check_allocs(call_opaque_closure, (Int, Int); ignore_throw = false)
@test length(allocs) > 0 && any(alloc isa DynamicDispatch for alloc in allocs)
end
@testset "@check_allocs macro (syntax)" begin
struct Bar
val::Float64
end
# Standard function syntax
@check_allocs function my_mod0()
return 1.5
end
@check_allocs function my_mod1(x, y)
return mod(x, y)
end
@check_allocs function my_mod2(x::Float64, y)
return mod(x, y)
end
@check_allocs function my_mod3(::Float64, y)
return y * y
end
@check_allocs function my_mod4(::Float64, y::Float64)
return y * y
end
@check_allocs function (x::Bar)(y::Float64)
return mod(x.val, y)
end
x,y = Base.rand(2)
@test my_mod0() == 1.5
@test my_mod1(x, y) == mod(x, y)
@test my_mod2(x, y) == mod(x, y)
@test my_mod3(x, y) == y * y
@test my_mod4(x, y) == y * y
@test Bar(x)(y) == mod(x, y)
struct Baz
val::Float64
end
# Standard function syntax (w/ kwargs)
@check_allocs function my_mod5(; offset=1.0)
return 2 * offset
end
@check_allocs function my_mod6(x, y; offset=1.0)
return mod(x, y) + offset
end
@check_allocs function my_mod7(x::Float64, y; offset)
return mod(x, y) + offset
end
@check_allocs function (x::Baz)(y::Float64; a, b)
return mod(x.val, y) + a + b
end
@check_allocs function (x::Baz)(y::Float32; a, b=1.0)
return mod(x.val, y) + a + b
end
offset = Base.rand()
a,b = Base.rand(2)
@test my_mod5(; offset) == 2 * offset
@test my_mod5() == 2.0
@test my_mod6(x, y; offset) == mod(x, y) + offset
@test my_mod6(x, y) == mod(x, y) + 1.0
@test my_mod7(x, y; offset) == mod(x, y) + offset
@test Baz(x)(y; a, b) == mod(x, y) + a + b
@test Baz(x)(y; b, a) == mod(x, y) + a + b
@test Baz(x)(Float32(y); b, a) == mod(x, Float32(y)) + a + b
@test Baz(x)(Float32(y); a) == mod(x, Float32(y)) + a + 1.0
# (x,y) -> x*y
f0 = @check_allocs () -> 1.5
@test f0() == 1.5
f1 = @check_allocs (x,y) -> x * y
@test f1(x, y) == x * y
f2 = @check_allocs (x::Int,y::Float64) -> x * y
@test f2(3, y) == 3 * y
f3 = @check_allocs (::Int,y::Float64) -> y * y
@test f3(1, y) == y * y
# foo(x) = x^2
@check_allocs mysum0() = 1.5
@test mysum0() == 1.5
@check_allocs mysum1(x, y) = x + y
@test mysum1(x, y) == x + y
@check_allocs mysum2(x::Float64, y::Float64) = x + y
@test mysum2(x, y) == x + y
@check_allocs (x::Baz)(y::Baz) = x.val + y.val
@test Baz(x)(Baz(y)) == x + y
end
@testset "@check_allocs macro (behavior)" begin
# The check should raise errors only for problematic argument types
@check_allocs mymul(x,y) = x * y
@test mymul(1.5, 2.5) == 1.5 * 2.5
@test_throws AllocCheckFailure mymul(rand(10,10), rand(10,10))
# If provided, ignore_throw=false should include allocations that
# happen only on error paths
@check_allocs ignore_throw=false alloc_on_throw1(cond) =
(cond && (error("This exception allocates");); return 1.5)
@test_throws AllocCheckFailure alloc_on_throw1(false)
@check_allocs ignore_throw=true alloc_on_throw2(cond) =
(cond && (error("This exception allocates");); return 1.5)
@test alloc_on_throw2(false) === 1.5
end
@testset "Types of Allocations" begin
if VERSION > v"1.11.0-DEV.753"
@test any(x isa AllocatingRuntimeCall && x.name == "jl_genericmemory_copyto"
for x in check_allocs(copyto!, (Memory{Int}, Int, Memory{Int}, Int); ignore_throw = false))
@test !any(x isa AllocatingRuntimeCall && x.name == "jl_genericmemory_copyto"
for x in check_allocs(copyto!, (Memory{Int}, Int, Memory{Int}, Int); ignore_throw = true))
@test all(x isa AllocationSite && x.type == Memory{Int} # uses jl_alloc_genericmemory
for x in check_allocs(Memory{Int}, (typeof(undef), Int); ignore_throw = false))
@test any(x isa AllocationSite && x.type == Memory # uses jl_genericmemory_copy
for x in check_allocs(copy, (Memory{Int},)))
@test any(x isa AllocationSite && x.type == Memory # uses jl_genericmemory_copy_slice
for x in check_allocs(copy, (Vector{Int},)))
@test all(x isa DynamicDispatch || (x isa AllocationSite && x.type == Memory{UInt8}) # uses jl_string_to_genericmemory
for x in check_allocs(Base.array_new_memory, (Memory{UInt8}, Int)))
# Marked broken because the `Expr(:foreigncall, QuoteNode(:jl_alloc_string), ...)` should be resolved
# by AllocCheck.jl, but is instead (conservatively) marked as a DynamicDisaptch.
#
# We get thrown off by the `jl_load_and_lookup` machinery here.
@test_broken all(x isa AllocationSite && x.type == Memory{UInt8} # uses jl_string_to_genericmemory
for x in check_allocs(Base.array_new_memory, (Memory{UInt8}, Int)))
@test all(x isa AllocationSite && x.type == Memory{Int} # uses jl_alloc_genericmemory
for x in check_allocs(Base.array_new_memory, (Memory{Int}, Int)))
@test all(x isa AllocationSite && x.type == Memory # uses jl_ptr_to_genericmemory
for x in check_allocs(Base.unsafe_wrap, (Type{Memory{Int}}, Ptr{Int}, Int)))
@test length(check_allocs(Base.mightalias, (Memory{Int},Memory{Int}))) == 0 # uses jl_genericmemory_owner (intercepted)
end
@test any(alloc.type == Base.RefValue{Int} for alloc in check_allocs(()->Ref{Int}(), ()))
allocs1 = check_allocs(()->Ref{Vector{Int64}}(Int64[]), ())
@test any(alloc.type == Base.RefValue{Vector{Int64}} for alloc in allocs1)
@test any(alloc.type == Vector{Int64} for alloc in allocs1)
end
@testset "Error types" begin
# All error types should support Base.show()
iob = IOBuffer()
alloc_with_no_bt = AllocationSite(Float32, Base.StackTraces.StackFrame[])
show(iob, alloc_with_no_bt)
@test occursin("unknown location", String(take!(iob)))
alloc_with_bt = AllocationSite(Float32, Base.stacktrace())
show(iob, alloc_with_bt) === nothing
@test !occursin("unknown location", String(take!(iob)))
call_with_no_bt = AllocatingRuntimeCall("jl_subtype", Base.StackTraces.StackFrame[])
show(iob, call_with_no_bt)
@test occursin("unknown location", String(take!(iob)))
call_with_bt = AllocatingRuntimeCall("jl_subtype", Base.stacktrace())
show(iob, call_with_bt) === nothing
@test !occursin("unknown location", String(take!(iob)))
dispatch_with_no_bt_nothing = DynamicDispatch(Base.StackTraces.StackFrame[], nothing)
show(iob, dispatch_with_no_bt_nothing)
@test occursin("unknown location", String(take!(iob)))
dispatch_with_no_bt_foo = DynamicDispatch(Base.StackTraces.StackFrame[], :foo)
show(iob, dispatch_with_no_bt_foo)
@test occursin("to function foo", String(take!(iob)))
dispatch_with_bt_nothing = DynamicDispatch(Base.stacktrace(), nothing)
show(iob, dispatch_with_bt_nothing) === nothing
@test !occursin("unknown location", String(take!(iob)))
dispatch_with_bt_foo = DynamicDispatch(Base.stacktrace(), :foo)
show(iob, dispatch_with_bt_foo) === nothing
@test !occursin("unknown location", String(take!(iob)))
# All error types should implement the required Base.:(==) and Base.hash for uniquing
uniqued = unique([
alloc_with_no_bt, alloc_with_bt, call_with_no_bt, call_with_bt,
dispatch_with_no_bt_nothing, dispatch_with_bt_nothing,
dispatch_with_no_bt_foo, dispatch_with_bt_foo,
alloc_with_no_bt, alloc_with_bt, call_with_no_bt, call_with_bt,
dispatch_with_no_bt_nothing, dispatch_with_bt_nothing,
dispatch_with_no_bt_foo, dispatch_with_bt_foo,
])
@test uniqued == [
alloc_with_no_bt, alloc_with_bt, call_with_no_bt, call_with_bt,
dispatch_with_no_bt_nothing, dispatch_with_bt_nothing,
dispatch_with_no_bt_foo, dispatch_with_bt_foo,
]
end
@testset "repeated allocations" begin
send_control(u) = sum(abs2, u) # Dummy control function
calc_control() = 1.0
get_measurement() = [1.0]
function example_loop(data, t_start, y_start, y_old, ydf, Ts, N, r)
for i = 1:N
t = time() - t_start
y = get_measurement()[] - y_start # Subtract initial position for a smoother experience
yd = (y - y_old) / Ts
ydf = 0.9*ydf + 0.1*yd
# r = 45sin(2π*freq*t)
u = calc_control()
send_control([u])
log = [t, y, ydf, u, r(t)]
data[:, i] .= log
y_old = y
end
end
## Generate some example input data
r = t->(2 + 2floor(t/4)^2)
N = 10
data = Matrix{Float64}(undef, 5, N)
t_start = 1.0
y_start = 0.0
y_old = 0.0
ydf = 0.0
Ts = 1.0
typetuple = typeof.((data, t_start, y_start, y_old, ydf, Ts, N, r))
@test allunique(check_allocs(example_loop, typetuple, ignore_throw=true))
foobar() = stacktrace()
stack1 = foobar()
stack2 = stacktrace()
allocs = [AllocCheck.AllocationSite(Any,stack1), AllocCheck.AllocationSite(Any,stack2), AllocCheck.AllocationSite(Any,stack1)]
@test !allunique(allocs)
@test length(unique(allocs)) == 2
end
"""
Documentation for `issue64`.
"""
@check_allocs function issue64(v::AbstractVector, i, j)
v[i], v[j] = v[j], v[i]
v
end
let io = IOBuffer()
print(io, @doc issue64)
s = String(take!(io))
@test occursin("Documentation for `issue64`.", s)
end
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | docs | 3763 | # AllocCheck.jl
<!-- [](https://github.com/gbaraldi/AllocCheck.jl/actions/workflows/CI.yml?query=branch%3Amain) -->
[](https://julialang.github.io/AllocCheck.jl/dev/)
[AllocCheck.jl](https://github.com/JuliaLang/AllocCheck.jl) is a Julia package that statically checks if a function call may allocate by analyzing the generated LLVM IR of it and its callees using LLVM.jl and GPUCompiler.jl
AllocCheck operates on _functions_, attempting to determine statically whether a function _may_ allocate memory, and if so, _where_ that allocation appears. This is different from measuring allocations using, e.g., `@time` or `@allocated`, which measures the allocations that actually happened during the execution of a function.
## Getting started
The primary entry point to check allocations is the macro [`@check_allocs`](@ref) which is used to annotate a function definition that you'd like to enforce allocation checks for:
```julia
julia> using AllocCheck
julia> @check_allocs multiply(x, y) = x * y
multiply (generic function with 1 method)
julia> multiply(1.5, 2.5) # call automatically checked for allocations
3.75
julia> multiply(rand(3, 3), rand(3, 3)) # result matrix requires an allocation
ERROR: @check_allocs function encountered 1 errors (1 allocations / 0 dynamic dispatches).
```
The `multiply(::Float64, ::Float64)` call happened without error, indicating that the function was proven not to allocate. On the other hand, the `multiply(::Matrix{Float64}, ::Matrix{Float64})` call raised an `AllocCheckFailure` due to one internal allocation.
The `errors` field can be used to inspect the individual errors:
```julia
julia> try
multiply(rand(3, 3), rand(3, 3))
catch err
err.errors[1]
end
Allocation of Matrix{Float64} in ./boot.jl:477
| Array{T,2}(::UndefInitializer, m::Int, n::Int) where {T} =
Stacktrace:
[1] Array
@ ./boot.jl:477 [inlined]
[2] Array
@ ./boot.jl:485 [inlined]
[3] similar
@ ./array.jl:418 [inlined]
[4] *(A::Matrix{Float64}, B::Matrix{Float64})
@ LinearAlgebra ~/.julia/juliaup/julia-1.10.0-rc1+0.x64.linux.gnu/share/julia/stdlib/v1.10/LinearAlgebra/src/matmul.jl:113
[5] var"##multiply#235"(x::Matrix{Float64}, y::Matrix{Float64})
@ Main ./REPL[13]:1
```
### Functions that throw exceptions
Some functions that we do not expect may allocate memory, like `sin`, actually may:
```julia
julia> @allocated try
sin(Inf)
catch
end
48
```
The reason for this is that `sin` needs to allocate if it **throws an error**.
By default, `@check_allocs` ignores all such allocations and assumes that no exceptions are thrown. If you care about detecting these allocations anyway, you can use `ignore_throw=false`:
```julia
julia> @check_allocs mysin1(x) = sin(x)
julia> @check_allocs ignore_throw = false mysin2(x) = sin(x)
julia> mysin1(1.5)
0.9974949866040544
julia> mysin2(1.5)
ERROR: @check_allocs function encountered 2 errors (1 allocations / 1 dynamic dispatches).
```
#### Limitations
Every call into a `@check_allocs` function behaves like a dynamic dispatch. This means that it can trigger compilation dynamically (involving lots of allocation), and even when the function has already been compiled, a small amount of allocation is still expected on the function entry.
For most applications, the solution is to use `@check_allocs` to wrap your top-level entry point or your main application loop, in which case those allocations are only incurred once. `@check_allocs` will guarantee that no dynamic compilation or allocation occurs once your function has started running.
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | docs | 2647 | This package includes code and examples derived from:
- [GPUCompiler.jl](https://github.com/JuliaGPU/GPUCompiler.jl)
- [Enzyme.jl](https://github.com/EnzymeAD/Enzyme.jl)
The GPUCompiler.jl package is licensed under the MIT "Expat" License:
> Copyright (c) 2019-present: Julia Computing and other contributors
>
> Copyright (c) 2014-2018: Tim Besard
>
> All Rights Reserved.
>
> Permission is hereby granted, free of charge, to any person obtaining a copy
> of this software and associated documentation files (the "Software"), to deal
> in the Software without restriction, including without limitation the rights
> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
> copies of the Software, and to permit persons to whom the Software is
> furnished to do so, subject to the following conditions:
>
> The above copyright notice and this permission notice shall be included in all
> copies or substantial portions of the Software.
>
> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
> SOFTWARE.
>
The Enzyme.jl package is licensed under the The MIT License (MIT):
> Copyright © 2020 William Moses, Valentin Churavy, and other contributors
>
> Permission is hereby granted, free of charge, to any person obtaining a copy
> of this software and associated documentation files (the "Software"), to deal
> in the Software without restriction, including without limitation the rights
> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
> copies of the Software, and to permit persons to whom the Software is
> furnished to do so, subject to the following conditions:
>
> The above copyright notice and this permission notice shall be included in
> all copies or substantial portions of the Software.
>
> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
> THE SOFTWARE.
>
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | docs | 147 | # Exported functions and types
## Index
```@index
```
## Docstrings
```@docs
AllocCheck.check_allocs
```
```@docs
AllocCheck.@check_allocs
```
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | docs | 5426 | # AllocCheck
[AllocCheck.jl](https://github.com/JuliaLang/AllocCheck.jl) is a Julia package that statically checks if a function call may allocate, analyzing the generated LLVM IR of it and it's callees using LLVM.jl and GPUCompiler.jl
AllocCheck operates on _functions_, trying to statically determine wether or not a function _may_ allocate memory, and if so, _where_ that allocation appears. This is different from measuring allocations using, e.g., `@time` or `@allocated`, which measures the allocations that _did_ happen during the execution of a function.
## Getting started
AllocCheck has two primary entry points
- [`check_allocs`](@ref)
- [`@check_allocs`](@ref)
The difference between them is subtle, but important in situations where you want to absolutely guarantee that the result of the static analysis holds at runtime.
Starting with **the macro** [`@check_allocs`](@ref), this is used to annotate a function definition that you'd like to enforce allocation checks for:
```@repl README
using AllocCheck
using Test # hide
@check_allocs mymod(x) = mod(x, 2.5)
mymod(1.5) # call automatically checked for allocations
```
This call happened without error, indicating that the function was proven to not allocate any memory after it starts 🎉
When used on a function that may allocate memory
```@repl README
@check_allocs linsolve(a, b) = a \ b
linsolve(rand(10,10), rand(10))
```
the function call raises an `AllocCheckFailure`.
The `errors` field allows us to inspect the individual errors to get some useful information. For example:
```@example README
try
linsolve(rand(10,10), rand(10))
catch err
err.errors[1]
end
```
we see what type of object was allocated, and where in the code the allocation appeared.
The **function** [`check_allocs`](@ref) is similar to the macro, but instead of passing or throwing an error, returns an array of informative objects indicating any presence of allocations, runtime dispatches, or allocating runtime calls:
```@example README
results = check_allocs(\, (Matrix{Float64}, Vector{Float64}))
length(results)
```
This called returned a long array of results, indicating that there are several potential allocations or runtime dispatches resulting form a function call with the specified signature. We have a look at how one of these elements look:
```@example README
results[1]
```
## The difference between `check_allocs` and `@check_allocs`
The function [`check_allocs`](@ref) performs analysis of a function call with the type signature specified in a very particular context, the state of the julia session at the time of the call to `check_allocs`. Code loaded after this analysis may invalidate the analysis, and any analysis performed in, for example, a test suite, may be invalid at runtime. Less obvious problems may appears as a result of the type-inference stage in the Julia compiler sometimes being sensitive to the order of code loading, making it possible for the inference result to differ between two subtly different Julia sessions.
The macro [`@check_allocs`](@ref) on the other hand, performs the analysis _immediately prior_ to the execution of the analyzed function call, ensuring the validity of the analysis at the time of the call.
In safety-critical scenarios, this difference may be important, while in more casual scenarios, the difference may be safely ignored and whichever entry point is more convenient may be used.
### An example of invalidated analysis
In the example below we define a function and perform an analysis on it which indicates on issues. We then load additional code (which may be done by loading, e.g., a package) and perform the analysis again, which now indicates that issues have appeared.
```@example README
my_add(x, y) = x + y
check_allocs(my_add, (Int, Int))
```
As expected, no allocations are indicated. We now load additional code by defining a new method for this function
```@example README
my_add(x::Int, y) = BigInt(x) + y
length(check_allocs(my_add, (Int, Int)))
```
This time, several potential allocations are indicated. In this example, a method that was more specific for the analyzed signature was added, and this method may allocate memory.
### Functions that throw exceptions
Some functions that we do not expect may allocate memory, like `sin`, actually may:
```@example README
@allocated try sin(Inf) catch end
```
The reason for this is that `sin` needs to allocate if it **throws an error**.
By default, `@check_allocs` ignores all such allocations and assumes that no exceptions are thrown. If you care about detecting these allocations anyway, you can use `ignore_throw=false`:
```@example README
@check_allocs mysin1(x) = sin(x)
@check_allocs ignore_throw=false mysin2(x) = sin(x)
@test mysin1(1.5) == sin(1.5)
@test_throws AllocCheckFailure mysin2(1.5)
```
## Limitations
Every call into a `@check_allocs` function behaves like a dynamic dispatch. This means that it can trigger compilation dynamically (involving lots of allocation), and even when the function has already been compiled, a small amount of allocation is still expected on function entry.
For most applications, the solution is to use `@check_allocs` to wrap your top-level entry point or your main application loop, in which case those applications are only incurred once. `@check_allocs` will guarantee that no dynamic compilation or allocation occurs once your function has started running.
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | docs | 2928 | # Guaranteed Error Recovery
Safety-critical real-time systems are often required to have performance critical error-recovery logic. While errors are not supposed to occur, they sometimes do anyways 😦, and when they do, we may want to make sure that the recovery logic runs with minimum latency.
In the following example, we are executing a loop that may throw an error. By default [`check_allocs`](@ref) allows allocations on the error path, i.e., allocations that occur as a consequence of an exception being thrown. This can cause the garbage collector to be invoked by the allocation, and introduce an unbounded latency before we execute the error recovery logic.
To guard ourselves against this, we may follow these steps
1. Prove that the function does not allocate memory except for on exception paths.
2. Since we have proved that we are not allocating memory, we may disable the garbage collector. This prevents it from running before the error recovery logic.
3. To make sure that the garbage collector is re-enabled after an error has been recovered from, we re-enable it in a `finally` block.
```@example ERROR
function treading_lightly()
a = 0.0
GC.enable(false) # Turn off the GC before entering the loop
try
for i = 10:-1:-1
a += sqrt(i) # This throws an error for negative values of i
end
catch
exit_gracefully() # This function is supposed to run with minimum latency
finally
GC.enable(true) # Always turn the GC back on before exiting the function
end
a
end
exit_gracefully() = println("Calling mother")
using AllocCheck, Test
allocs = check_allocs(treading_lightly, ()) # Check that it's safe to proceed
```
```@example ERROR
@test isempty(allocs)
```
[`check_allocs`](@ref) returned zero allocations. If we invoke [`check_allocs`](@ref) with the flag `ignore_throw = false`, we will see that the function may allocate memory on the error path:
```@example ERROR
allocs = check_allocs(treading_lightly, (); ignore_throw = false)
length(allocs)
```
Finally, we test that the function is producing the expected result:
```@example ERROR
val = treading_lightly()
@test val ≈ 22.468278186204103 # hide
```
In this example, we accepted an allocation on the exception path with the motivation that it occurred once only, after which the program was terminated. Implicit in this approach is an assumption that the exception path does not allocate too much memory to execute the error recovery logic before the garbage collector is turned back on. We should thus convince ourselves that this assumption is valid, e.g., by means of testing:
```@example ERROR
treading_lightly() # Warm start
allocated_memory = @allocated treading_lightly() # A call that triggers the exception path
# @test allocated_memory < 1e4
```
The allocations sites reported with the flag `ignore_throw = false` may be used as a guide as to what to test.
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | docs | 3895 | # Allocations followed by a hot loop
A common pattern in high-performance Julia code, as well as in real-time systems, is to initially allocate some working memory, followed by the execution of a performance sensitive _hot loop_ that should perform no allocations. In the example below, we show a function `run_almost_forever` that resembles the implementation of a simple control system. The function starts by allocating a large `logvector` in which some measurement data is to be saved, followed by the execution of a loop which should run with as predictable timing as possible, i.e., we do not want to perform any allocations or invoke the garbage collector while executing the loop.
```@example HOT_LOOP
function run_almost_forever()
N = 100_000 # A large number
logvector = zeros(N) # Allocate a large vector for storing results
for i = 1:N # Run a hot loop that may not allocate
y = sample_measurement()
logvector[i] = y
u = controller(y)
apply_control(u)
Libc.systemsleep(0.01)
end
end
# Silly implementations of the functions used in the example
sample_measurement() = 2.0
controller(y) = -2y
apply_control(u) = nothing
nothing # hide
```
Here, the primary concern is the loop, while the preamble of the function should be allowed to allocate memory. The recommended strategy in this case is to refactor the function into a separate preamble and loop, like this
```@example HOT_LOOP
function run_almost_forever2() # The preamble that performs allocations
N = 100_000 # A large number
logvector = zeros(N) # Allocate a large vector for storing results
run_almost_forever!(logvector)
end
function run_almost_forever!(logvector) # The hot loop that is allocation free
for i = eachindex(logvector) # Run a hot loop that may not allocate
y = sample_measurement()
@inbounds logvector[i] = y
u = controller(y)
apply_control(u)
Libc.systemsleep(0.01)
end
end
nothing # hide
```
We may now analyze the loop function `run_almost_forever!` to verify that it does not allocate memory:
```@example HOT_LOOP
using AllocCheck, Test
allocs = check_allocs(run_almost_forever!, (Vector{Float64},));
@test isempty(allocs)
```
## More complicated initialization
In practice, a function may need to perform several distinct allocations upfront, including potentially allocating objects of potentially complicated types, like closures etc. In situations like this, the following pattern may be useful:
```julia
struct Workspace
# All you need to run the hot loop, for example:
cache1::Vector{Float64}
cache2::Matrix{Float64}
end
function setup(max_iterations::Int = 100_000)
# Allocate and initialize the workspace
cache1 = zeros(max_iterations)
cache2 = zeros(max_iterations, max_iterations)
return Workspace(cache1, cache2)
end
function run!(workspace::Workspace)
# The hot loop
for i = eachindex(workspace.cache1)
workspace.cache1[i] = my_important_calculation() # The allocated cache is modified in place
...
end
end
function run()
workspace = setup()
run!(workspace)
end
```
Here, `workspace` is a custom struct designed to serve as a workspace for the hot loop, but it could also be realized as a simple tuple of all the allocated objects required for the computations. Note, the struct `Workspace` in this example was not marked as mutable. However, its contents, the two cache arrays, are. This means that the `run!` function may modify the contents of the cache arrays.
The benefit of breaking the function up into two parts which are called from a third, is that we may now create the workspace object individually, and use it to compute the type of the arguments to the `run!` function that we are interested in analyzing:
```julia
workspace = setup()
allocs = check_allocs(run!, (typeof(workspace),))
``` | AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.3 | bf0828f97eb93f7a35c76db20f42582ded279cdb | docs | 2759 | # Optional debugging and logging
For debugging purposes, it may sometimes be beneficial to include logging statements in a function, for example
```@example DEBUGGING
using AllocCheck # hide
@check_allocs function myfun(verbose::Bool)
a = 0.0
for i = 1:3
a = a + i
verbose && @info "a = $a"
end
end
nothing # hide
```
Here, the printing of some relevant information is only performed if `verbose = true`. While the printing is optional, and not performed if `verbose = false`, [`check_allocs`](@ref) operates on _types rather than values_, i.e., `check_allocs` only knows that the argument is of type `Bool`, not that it may have the value `false`:
```@repl DEBUGGING
myfun(false)
```
Indeed, this function was determined to potentially allocate memory.
To allow such optional features while still being able to prove that a function does not allocate if the allocating features are turned off, we may [lift the _value_ `true` into the _type domain_](https://docs.julialang.org/en/v1/manual/types/#%22Value-types%22), we do this by means of the `Val` type:
```@example DEBUGGING
function typed_myfun(::Val{verbose}) where verbose
a = 0.0
for i = 1:3
a = a + i
verbose && @info "a = $a"
end
end
length(check_allocs(typed_myfun, (Val{false},)))
```
The compiler, and thus also AllocCheck, now knows that the value of `verbose` is `false`, since this is encoded in the _type_ `Val{false}`. The compiler can use this knowledge to figure out that the `@info` statement won't be executed, and thus prove that the function will not allocate memory.
The user may still use this function with the debug print enabled by calling it like
```@example DEBUGGING
typed_myfun(Val{true}())
```
## Advanced: Constant propagation
Sometimes, code written without this trick will still work just fine with AllocCheck.
That's because in some limited scenarios, the compiler is able to use _constant propagation_ to determine what path through a program will be taken based on the _value of constants_.
We demonstrate this effect below, where the value `verbose = false` is hard-coded into the function:
```@example DEBUGGING
@check_allocs function constant_myfun()
verbose = false
a = 0.0
for i = 1:3
a = a + i
verbose && @info "a = $a"
end
return a
end
constant_myfun()
```
When looking at `constant_myfun`, the compiler knows that `verbose = false` since this constant is hard coded into the program. Sometimes, the compiler can even propagate constant values all the way into called functions.
This is useful, but it's not guaranteed to happen in general. The `Val{T}` trick described here ensures that the variable is propagated as a constant everywhere it is required.
| AllocCheck | https://github.com/JuliaLang/AllocCheck.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | code | 169 | module Peccon
export fin_data,
calc_returns,
sim_mpt,
sharp_ratio
include("../src/extract.jl")
include("../src/general.jl")
include("../src/mpt.jl")
end
| Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | code | 637 | """
fin_data(Tickers)
extracts the daily price info of multiple stocks and puts them in a vector of dataframes.
# Examples
```julia-repl
julia> fin_data(["ADAEUR", "SPY"])
```
"""
function fin_data(Tickers,days = 250, clientKey = "0VS2G38H6PKP03GX" )
#extract the data
client = AlphaVantage.GLOBAL[]
client.key = clientKey
AlphaVantage.GLOBAL[]
portfolio = []
days= days
for i in Tickers
asset = DataFrame(time_series_daily_adjusted( i, outputsize= "full"))
asset = asset[1:days,:]
asset[!,"ticker"] .= i
push!(portfolio, asset)
end
return portfolio
end | Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | code | 894 | """
calc_returns(portfolio, Tickers)
calculates the daily log returns of each stock in a portfolio based on the close price of the day.
# Examples
```julia-repl
julia> tickers = ["ADAEUR", "SPY"]
julia> data = fin_data(tickers)
julia> calc_returns(data, tickers)
```
"""
function calc_returns(portfolio, Tickers)
#calculate returns for each stock
for x in portfolio
price = x[!,"close"]
returns = zeros(0)
for i=2:length(price)
r = log(price[i]/price[i-1])
append!(returns, r)
end
#add null in the beginning of each column
prepend!(returns, 0)
x[!,"returns"] = returns # add to dataframe
end
#add all returns into one dataset
port_returns = DataFrame()
for (x,y) in zip(portfolio, Tickers)
port_returns[!,y] = x[!,"returns"]
end
return port_returns
end | Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | code | 2257 | using AlphaVantage,
DataFrames,
StatsPlots,
Dates,
Statistics,
Distributions,
IterTools,
Plots,
CSV
#=
make random allocation on weights to see the trade-off between risk and return
Next plot all possible allocation to see total impact
In the end find optimal risk return combination
=#
"""
sim_opt(port_returns)
simulates random portfolio combinations and calculates the expected return and standard deviation of the portfolio
# Examples
```julia-repl
julia> port_returns = calc_returns(data, tickers)
julia> sim_mpt(port_returns)
```
"""
function sim_mpt(port_returns, simulations= 5000 )
names_stock= names(port_returns)
port = DataFrame(exp_return = Float64[],
port_var = Float64[]
)
for i in names_stock
port[:,"weight_"*i]= Float64[]
end
i = 1;
while i <= simulations
#set weights
weights = rand(size(port_returns)[2])
total = sum(weights)
w = weights/total
Σ = cov(Matrix(port_returns))
#calculate returns of the portfolio
port_return = Matrix(port_returns)*w
expected_return = mean(port_return)*250
#calculate variance of the profolio
σ²= 0
for i in eachindex(w), j in eachindex(w)
x = w[i]*w[j]*Σ[i,j]
σ² +=x
end
port_var = (σ²*250)
list = [expected_return, port_var, w]
#decompose
results = collect(Iterators.flatten(list))
push!(port, results )
i += 1
end
port[:,:port_std] = .√port[:,:port_var]
return port
end
#calculate the sharp ratio
"""
sharp_ratio(port_sim)
calculates the sharp ratio of each simulates portfolio
# Examples
```julia-repl
julia> port_sim = sim_mpt(port_returns)
julia> sharp_ratio(port_sim)
```
"""
function sharp_ratio(port_sim, rf = 0.02)
port_sim[:, :sharp_ratio] = (port_sim[:,:exp_return] .- rf )./port_sim[: , :port_std]
return sort!(port_sim, :sharp_ratio)
end
#utility function σ² - qE(Rₚ)
function utility_mpt(port_sim, q = 0 )
port_sim[:,:utility] = abs.(port_sim[:,:port_var] - q*port_sim[:,:exp_return])
return sort!(port_sim,:utility)
end
| Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | code | 625 | import Pkg; Pkg.add("Documenter")
using Documenter, Peccon
makedocs(modules = [Peccon],
doctest = true,
sitename="Peccon.jl",
format = Documenter.HTML(
edit_link = "master",
prettyurls = false)
,
pages = Any[
"setup" => "index.md",
"fundamental understanding" => "man/fundamental_understanding_intuition.md",
"glossary" => "man/glossary.md",
"theory" => "man/Theory.md",
"financial planning tools" => "man/financial_planning_tools.md",
"API" => "lib/API.md"
])
deploydocs(
repo = "github.com/korilium/Peccon.jl.git",
) | Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | code | 436 | using Peccon, StatsPlots
Tickers = ["IUSA.AS", "IBCI.AS", "IEMA.AS", "WTCH.AS", "VWRL.AS"]
data1 = data_alpha(Tickers, "0VS2G38H6PKP03GX", 248)
returns = daily_returns(data1, Tickers)
port_sim = sim_mpt(returns,5000)
@df port_sim scatter(:port_var, :exp_return)
# port_sharp = sharp_ratio(port_sim)
port_opt = opt_mpt(returns, 0.0:0.01:2.0, 0.00)
@df port_opt scatter!(:port_var, :exp_return)
sharp_ratio(port_opt, 0.02)
| Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | code | 249 | module Peccon
#export the package functions to call them
export data_alpha,
daily_returns,
sim_mpt,
sharp_ratio,
opt_mpt,
per_return
include("../src/extract.jl")
include("../src/general.jl")
include("../src/mpt.jl")
end
| Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | code | 695 | using MarketData
# function data_yahoo() to be developed
"""
data_alpha(Tickers)
extracts the daily price info of multiple stocks from alphavantage and puts them in a vector of dataframes.
# Examples
```julia-repl
julia> data_alpha(["ADAEUR", "SPY"])
```
"""
function data_alpha(Tickers,clientKey, days = 248 )
#extract the data
client = AlphaVantage.GLOBAL[]
client.key = clientKey
AlphaVantage.GLOBAL[]
portfolio = []
days= days
for i in Tickers
asset = DataFrame(time_series_daily( i, outputsize= "full"))
asset = asset[1:days,:]
asset[!,"ticker"] .= i
push!(portfolio, asset)
end
return portfolio
end | Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | code | 1363 | """
daily_returns(portfolio, Tickers)
calculates the daily log returns of each stock in a portfolio based on the close price of the day.
# Examples
```julia-repl
julia> tickers = ["ADAEUR", "SPY"]
julia> data = fin_data(tickers)
julia> calc_returns(data, tickers)
```
"""
function daily_returns(portfolio, Tickers)
#calculate returns for each stock
for x in portfolio
price = x[!,"close"]
returns = zeros(0)
for i=1:(length(price)-1)
r = log(price[i]/price[i+1])
append!(returns, r)
end
#add null in the beginning of each column
prepend!(returns, 0)
x[!,"returns"] = returns # add to dataframe
end
#add all returns into one dataset
port_returns = DataFrame()
for (x,y) in zip(portfolio, Tickers)
port_returns[!,y] = x[!,"returns"]
end
return port_returns
end
#per returns
"""
per_return(returns)
calculates the compounded return for a specific time-period from daily log returns
# Examples
```julia-repl
julia> tickers = ["ADAEUR", "SPY"]
julia> data = fin_data(tickers)
julia> calc_returns(data, tickers)
julia> data_alpha(["ADAEUR", "SPY"])
```
"""
function per_return(returns)
days = size(returns)[1]
ann_returns = mapcols(col -> exp(sum(col))-1, returns)
return ann_returns
end | Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | code | 3930 | using AlphaVantage,
DataFrames,
StatsPlots,
Dates,
Statistics,
Distributions,
IterTools,
Plots,
CSV,
Optimization,
OptimizationOptimJL
#=
make random allocation on weights to see the trade-off between risk and return
Next plot all possible allocation to see total impact
In the end find optimal risk return combination
=#
"""
sim_opt(returns, simulations= 5000, days=252)
simulates random portfolio combinations and calculates the expected return and standard deviation of the portfolio
# Examples
```julia-repl
julia> returns = daily_returns(data, tickers)
julia> sim_mpt(returns)
```
"""
function sim_mpt(returns, simulations= 5000 )
days = size(returns)[1]
names_stock= names(returns)
port = DataFrame(exp_return = Float64[],
port_var = Float64[]
)
for i in names_stock
port[:,"weight_"*i]= Float64[]
end
i = 1;
while i <= simulations
#set weights
weights = rand(size(returns)[2])
total = sum(weights)
w = weights/total
Σ = cov(Matrix(returns))
#calculate returns of the portfolio
stock_return = Matrix(returns)*w
expected_return = mean(stock_return)*days
#calculate variance of the profolio
σ²= 0
for i in eachindex(w), j in eachindex(w)
x = w[i]*w[j]*Σ[i,j]
σ² +=x
end
port_var = (σ²*days)
list = [expected_return, port_var, w]
#decompose
results = collect(Iterators.flatten(list))
push!(port, results )
i += 1
end
port[:,:port_std] = .√port[:,:port_var]
return port
end
#calculate the sharp ratio
"""
sharp_ratio(port_sim)
calculates the sharp ratio of each simulates portfolio
# Examples
```julia-repl
julia> port_sim = sim_mpt(stock_returns)
julia> sharp_ratio(port_sim)
```
"""
function sharp_ratio(port, rf = 0.02)
port[:, :sharp_ratio] = (port[:,:exp_return] .- rf )./port[: , :port_std]
return sort!(port, :sharp_ratio)
end
"""
opt_mpt(returns, risk_av_step = 0.0:0.02:2.0, diversification_limit= 0.05)
returns the efficient frontier for a portfolio.
# Examples
```julia-repl
julia> port_opt = opt_mpt(returns)
```
"""
function opt_mpt(returns, risk_av_step = 0.0:0.02:2.0, diversification_limit= 0.0 )
# cost function
F(w,p) = w'*p[1]*w - p[3] * p[2]'*w
#constraints
cons(res, w, p) = (res .=[w; sum(w)])
#setting up parameters
#variance
Σ = cov(Matrix(returns))*1260
#stock returns
per_returns = collect(per_return(returns)[1,:])
#intial weights
w0_size = 1/size(returns)[2]
w0 = repeat([w0_size],size(returns)[2] )
#days
days= size(returns)[1]
#set bounds
nb_bounds = length(w0) +1
divers = fill(diversification_limit, nb_bounds-1)
lcons = append!(divers, 1.0)
ucons = fill(1.0, nb_bounds)
#create dataframe
names_stock= names(returns)
opt_port = DataFrame(exp_return = Float64[],
port_var = Float64[],
risk_aversion = Float64[],
)
for i in names_stock
opt_port[:,"weight_"*i]= Float64[]
end
for i in risk_av_step
_p = [Σ, per_returns, i]
optprob = OptimizationFunction(F, Optimization.AutoForwardDiff(), cons = cons)
prob = OptimizationProblem(optprob, w0, _p, lcons = lcons, ucons = ucons)
sol = solve(prob, IPNewton())
woptimal = sol.u
expected_return = mean(Matrix(returns)*woptimal)*days
Σ = cov(Matrix(returns))*days
var = woptimal'*Σ*woptimal
list = [expected_return, var, i, woptimal]
results = collect(Iterators.flatten(list))
push!(opt_port, results)
end
opt_port[:,:port_std] = .√opt_port[:,:port_var]
return opt_port
end
| Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | code | 2503 | using Peccon
using Test,
CSV,
DataFrames
# const dir = joinpath(dirname(pathof(Peccon)), "..", "test", "test_data")
### create some test to check if the functions work properly
# load in test data
Tickers = ["IUSA.AS", "IBCI.AS", "IEMA.AS", "WTCH.AS", "VWRL.AS"]
data1 = data_alpha(Tickers, "0VS2G38H6PKP03GX", 1260)
returns = daily_returns(data1, Tickers)
period_returns = per_return(returns)
@testset "general" begin
##### checks for daily_returns #####
# check for missing values
@test any(ismissing.(eachrow(returns))) == false
# check for unreasonable outliers
@test returns[partialsortperm(returns[:,"IUSA.AS"], 1:1, rev=true),"IUSA.AS"][1] < 0.5
@test returns[partialsortperm(returns[:,"IBCI.AS"], 1:1, rev=true),"IBCI.AS"][1] < 0.5
@test returns[partialsortperm(returns[:,"IEMA.AS"], 1:1, rev=true),"IEMA.AS"][1] < 0.5
@test returns[partialsortperm(returns[:,"WTCH.AS"], 1:1, rev=true),"WTCH.AS"][1] < 0.5
@test returns[partialsortperm(returns[:,"VWRL.AS"], 1:1, rev=true),"VWRL.AS"][1] < 0.5
# test whether the peiod return is greater then the daily return (this is the case if taken over long period)
@test period_returns[:,"IUSA.AS"][1] > returns[1,"IUSA.AS"]
end
@testset "mpt" begin
sim_port = sim_mpt(returns)
##### check for sim_mpt #####
# check whether the weights sum to one
@test all(sum(eachcol(select(sim_port, r"weight"))) .≈ 1)
#check whether the standard deviation is not negative
@test all(sim_port.port_std .> 0)
# @df sim_port scatter(:port_std, :exp_return)
sharp = sharp_ratio(sim_port, 0.02)
##### sharp_ratio ##### +
# # check if it is the largest sharp ratio
@test all(sharp[end,:sharp_ratio] .≥ sharp[:,:sharp_ratio])
# # check if the best sharp ratio has indeed the highest return for the lowest variance
@test all(sharp[end,:exp_return] .> sharp[sharp[end,:port_std] .> sharp[:,:port_std], :exp_return])
opt_port = opt_mpt(returns, 0.0:0.02:2.0, 0.00)
##### opt_mpt #####
@testset "efficient frontier" for x in eachrow(opt_port)
filter = x[:port_var].> sim_port[:,:port_var] # filter on varaince, take no simulation with higher variance
sel_port = sim_port[filter,[:exp_return, :port_var]]
#round to three digits after comma as the
sel_port[:,:exp_return] = round.(sel_port[:, :exp_return], digits = 3 )
@test all(x[:exp_return] .> sel_port[:,:exp_return])
end
end
| Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | docs | 1155 | # Peccon
[](https://github.com/korilium/Peccon.jl/actions/workflows/CI.yml?query=branch%3Amaster) [](https://app.codecov.io/github/korilium/Peccon.jl)
This is the julia package for the Peccon project. The Peccon project is build for helping people make informative decisions on their financial well-being.
**Documentation**: [![][docs-latest-img]][docs-latest-url]
[docs-latest-img]: https://img.shields.io/badge/docs-latest-blue.svg
[docs-latest-url]: https://korilium.github.io/Peccon.jl/index.html
If you want to collaborate on this please let me know: [](https://github.com/SciML/ColPrac)
Here are my socials :
linkedin: https://www.linkedin.com/in/ignace-decocq-4b703a135/
julia community: https://discourse.julialang.org/u/korilium/summary
E-mail: [email protected]
| Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | docs | 1862 | # Peccon.jl
## Introduction
In this julia package you can find all the fin tools that have been developed to optimize investment, spending and income.
These tools enable you to better anticipate your future financial well-being.
The library is intended to give you an intuitional feel and a theoretical explanation about these tools. The library is not intended to give any financial advice. It rather enables you to have the necessary tools to make your own decisions.
The financial data is extracted by using AlphaVantage. More about this in the following subsection.
## AlphaVantage and access to market data
The Peccon packages uses [AlphaVantage](https://www.alphavantage.co/#about) and [the AlphaVantage.jl package](https://github.com/ellisvalentiner/AlphaVantage.jl) to get stock market data. To get access to the data you need to get an API key. This is obtained by claiming your API key on [this website](https://www.alphavantage.co/support/#api-key). The only thing required is a legitimate email address.
!!! note
You can have 5 API requests per minute and 500 requests per day for free.
If you want to have more get their [premium membership](https://www.alphavantage.co/premium/)
Once you have your API key, it becomes possible to extract financial market data as follows:
```julia
julia> Tickers = ["ADAEUR", "SPY"]
julia> data_alpha(Tickers, 252, "your_API_key")
```
Before we delve into specific tools, I will dedicate the first part to understanding fundamental concepts like interest rate, inflation and variance. These concepts are needed as they enable you to fully comprehend the basic implications of each tool. Then we delve in the theoretical aspect of each tool to subsequently see the implementation in the Peccon.jl. Lastly, you can find an API explaining each and every function in Peccon.jl.
| Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | docs | 386 | # API
## loading in data
```@docs
data_alpha(Tickers, clientKey,days = 250)
```
## General
### Calculating returns
```@docs
daily_returns(portfolio, Tickers)
per_return(returns)
```
## Tools
### modern portfolio theory (mpt)
```@docs
sharp_ratio(port_sim, rf = 0.02)
sim_mpt(returns, simulations= 5000 )
opt_mpt(returns, risk_av_step = 0.0:0.02:2.0, diversification_limit= 0.0 )
``` | Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | docs | 5224 | # Theory
In this subsection each financial planning tool is fully explained.
## Modern Portfolio Theory (MPT)
Modern Portfolio Theory is all about minimizing the variance of the portfolio while taking into account the return of the portfolio with some risk-preference. The first thing to do, is to calculate the returns of the portfolio and the covariance matrix of the portfolio.
The expected return of the portfolio is calculated as follows:
```math
E(R_P) = \sum_iw_iE(R_i)
```
where $R_P$ is the return of the portfolio, $R_i$ is the return of the asset $i$ and $W_i$ is the weight of asset $i$ in the portfolio. The sum of the weights should be equal to $\sum_i w_i = 1$.
Next, we express the variance of the portfolio as follows :
```math
\sigma^2_P = \sum_i\sum_j w_iw_j\sigma_i\sigma_j\rho_{ij}
```
In matrix notation this becomes:
```math
\sigma^2_P = w'\Sigma w
```
Remember, what we want to know is that for a given number of stocks which combinations gives us to lowest variance with the highest return. To achieve this we minimize the variance and take into account our risk-preference $P$ with respect to returns *based on some historic data*.
We therefore minimize the following cost function:
```math
Min(w'\Sigma w - P * E[R_p])
```
given the following constraints:
```math
w_1 + w_2 + ... + w_{n-1} + w_{n} =1
```
```math
0> w_i > 1
```
We solve this optimal design problem using the [Interior point Newton algorithm](https://en.wikipedia.org/wiki/Interior-point_method) from the [Optim.jl package](https://julianlsolvers.github.io/Optim.jl/stable/#)
The only parameter is our risk-preference $P$. For each risk-preference $P$ there is an optimal combination of stock that minimizes our cost function. This creates the efficient frontier
### The efficient frontier
The upward-sloped portion of the hyperbola is the efficient frontier. It reflects the best *expected* level of return for its level of risk as you will get the maximum amount of return with the least amount of variance for your portfolio.

### Sharp ratio
We can use the sharp ratio to see how well the return of the portfolio/asset compensates you for the risk that you take. The sharp ratio does however not take into account all risks involved and has the same limitations as the MPT (see subsection limitations).
```math
S_P = \frac{E[R_P - R_{b}]}{\sigma_P}
```
where $R_b$ is the return of the baseline "risk-free" product.
### Limitations
There are three main limitation to this tool. The first limitation is that the MPT is a historical measurement of the portfolio performance. It does not say anything about future performance of the portfolio. As consequence, different macro-economic situations might lead to total different end results for the MPT. The second issue is that the tool is based on the expected return and variance of the portfolio. This captures the risk-return relationship quite well but it does not take into account [skewness](https://en.wikipedia.org/wiki/Skewness) and [tail risk](https://en.wikipedia.org/wiki/Tail_risk). It therefore gives rise to a reduced volatility and an inflated growth rate for a portfolio. Lastly, the risk measurement is probabilistic in nature. It does not reflect the structural roots of the risks taken. For example, the risk of a stock are off a total different nature then that of a commodity, but MPT still accounts them in the same way.
### Adaptations
The cost function can be altered if the function stays convex. We can therefore adapt our cost function to account for more precise measurements of risk. One of the most popular adaptions is the Post-Modern Portfolio Theory (PMPT).
The current tool only implements the MPT. Future work will enable PMPT and other adaptions to be made possible.
### Recommended usage
Never use this tool for individual stock picking and never but then also never rely *only* on the MPT. Always do your due diligence before creating your portfolio and again this is no way or form financial advice.
So why should you use this tool and for what purpose? It is highly recommended to use this tool with exchange traded funds (ETF) as these products are already substantially diversified and issue two of the MPT is therefore greatly diminished. Also, the structural risk that certain ETF are exposed is difficult the estimate and the MPT can help you gain insights into which ETF have less or more risk compared to the returns they offer. Lastly, MPT also works better if you invest in all assets classes as each class has risks of a different nature. MPT does not take that into account and you therefore have to de it yourself. You also are less exposed to one particular kind of risk.
To know which portfolio weights you should apply, you have to understand your risk preference. If you do not want to take a lot of risk, it is beneficial to look at optimal portfolio's with low values in $P$. The reverse is true for people who are risk seeking.
## to be developed
#### Post-Modern Portfolio Theory
##### Sortino ratio
### Captial asset pricing model (CAPM)
### Optimal control of spending, saving and investment
| Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | docs | 4229 | # Planning tools
## Modern Portfolio Theory (MPT)
The modern portfolio theory (MPT) is one of the oldest applications in modern finance still used today. The technical implication can be found in the theory subsection. We will now demonstrate how MPT is implemented in the Peccon package and lastly cover some import limitations and recommendations of the tool.
### Example
First extract the daily price data of all the assets you are considering in your portfolio.
```@example mpt
using Peccon
Tickers = ["IUSA.AS", "IBCI.AS", "IEMA.AS", "WTCH.AS", "VWRL.AS"];
# data = data_alpha(Tickers, "your_api_key", 252);
data= data_alpha(Tickers, "0VS2G38H6PKP03GX", 248); # hide
data[1][1:5,:]
```
Then calculated the daily log returns for each asset in the portfolio.
```@example mpt
Tickers = ["IUSA.AS", "IBCI.AS", "IEMA.AS", "WTCH.AS", "VWRL.AS"]; # hide
data= data_alpha(Tickers, "0VS2G38H6PKP03GX", 252); # hide
returns = daily_returns(data, Tickers);
returns[1:5,:]
```
Subsequently, simulate 5000 possible portfolio combinations with the assets in the portfolio.
```@example mpt
port_sim = sim_mpt(returns);
port_sim[1:5,:]
```
Plot the expected return and variance of each simulated portfolio to visualize the efficient frontier.
```@example mpt
using Peccon, Pkg; # hide
Pkg.add("StatsPlots") # hide
using StatsPlots
@df port_sim scatter(:port_var, :exp_return)
savefig("sim_fig.svg"); nothing # hide
```

calculate the efficient frontier of the combinations of stocks.
```@example mpt
port_opt = opt_mpt(returns, 0.0:0.02:2.0, 0.00) ;
port_opt[1:5,:]
```
In the dataframe the optimal portfolios with their respective risk-aversions are shown.
subsequently, add the efficient frontier to the simulated plot.
```@example mpt
@df port_opt scatter!(:port_var, :exp_return)
savefig("opt_fig.svg"); nothing # hide
```

Lastly, calculate the sharp ratio to find the portfolio with the optimal return variation ratio
```@example mpt
port_sim_sharp = sharp_ratio(port_sim) ;
@show port_sim_sharp[end,:]
port_opt_sharp = sharp_ratio(port_sim) ;
port_opt_sharp[end,:]
```
### limitations
There are three main limitation to this tool. The first limitation is that the MPT is a historical measurement of the portfolio performance. It does not say anything about future performance of the portfolio. Different Macro-economic situations might lead to total different end results. The second issue is that the tool is based on the expected return and variance of the portfolio. This captures the risk return relationship quite well but it does not take into account [skewness](https://en.wikipedia.org/wiki/Skewness) and [tail risk](https://en.wikipedia.org/wiki/Tail_risk). It therefore gives rise to a reduced volatility and an inflated growth rate for a portfolio. Lastly, the risk measurement is probabilistic in nature. It does not reflect the structural roots of the risk. For example, the risk of a stock are off a total different nature then that of a commodity, but to tool will still account for them the same way.
### Recommended usage
Never use this tool for individual stock picking and never but then also never rely *only* on the MPT. Always do your own due diligence before creating your portfolio and again this is no way or form financial advice.
So why should you use this tool and for what purpose? It is highly recommended to use this tool with exchange traded funds (ETF) as these products are already substantially diversified and issue two of the MPT is therefore greatly diminished. Also, the structural risk that certain ETF are exposed is difficult the estimate and the MPT can help you gain insights into which ETF have less or more risk compared to the returns they offer. Lastly, MPT also works better if you invest in all assets classes as each class has risks of a different nature and you are then therefore not fully exposed to one particular kind of risk.
To know which portfolio weights you should apply, you have to understand your risk preference. If you do not want to take a lot of risk, it is beneficial to look at optimal portfolio's with low values in $P$. The reverse is true for people who are risk seeking.
| Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | docs | 5631 | # Fundamental Understanding
In this subsection of the documentation we delve deeper into the main concepts behind financial planning. The general intuition will be given here, while the theoretical aspects of the tools will be discussed in next part. It is highly recommended to at least **understand** the intuitional part of the fundamentals as this will enable you to better account for the limitations and the context of each tool.
> Financial planning is like learning how to drive. Know the traffic rules and start in the parking lot.
## Intuition
Financial planning has three main area's of focus: income, spending and investment/savings. These three area's are linked with each other:

Income is the money that you earn from your wage. This income is then subdivided into your savings and expenditures depending on your spending behavior. Consequentially, a part of the savings is invested into different kinds of assets. Depending on the risk you take, these assets will return a certain profit or loss. In each subdomain we can optimize choices in a way that benefits your financial well-being. The fundamental context is discussed in the following sections.
### Investments
Investments are important. They are the corner stone of planning your financial well-being. It is however not easy to decide how you should invest your money. A whole industry is build around that question and it becomes even harder when you take into account the ramifications of those decisions. It is therefore paramount to fully understand the options available and the possible caveats. The investment decision is the most difficult one you will make and most of the financial planning tools will therefore revolve around it.
In this subsection, however, a small introduction to the basic concepts in investment is given, starting with risk management and returns.
#### Risk Management and Returns
Risks are all about chances of future events, more specifically, chances of negative future events. In finance we always try to manage the negative impacts events might have on our investments. Take for example a coin flip. There are two possible outcomes: heads or tails. Each has a 50% chance of happening. In our example we maybe want to avoid flipping tail because we can lose 50 euros, while we win 50 euro if we get head. We therefore adjust the coin so that the probability of flipping tails will decrease. Adjusting your exposure to risk is what we call risk management. **The main goal of risk management is that if the event occurs we can live with the loss.** The idea is therefore that you set your own risk appetite (what are you willing to lose?).
The general notion about risk with respect to age is that as we grow older we should take less risks as the consequence of a bad event has a larger impact on your financial well-being.
Note that there are two dimensions when talking about risk (see graph):
* the severity of the loss if the event occurs
* the probability of the event

The most precarious risks for an investment are the one's with low probability and high consequence. The main reason for this is that the risks are not observed as much and thus estimating the probability of occurrence is rather difficult. Also, we humans tend to be irrational when dealing with risks. We generally over/underestimate them and are therefore overexposed to them or overprotected from them.
Returns are always expressed as an percentage of the initial investment and always refer to a certain time period.
Returns are coherently linked with risks: The higher the risk the higher the return. You can imagine it as in the following graph.

As the risk increases so does the variability of the returns. This is called volatility and it is one of the main measurement of risk. In the stock market we can measure the risk of a stock by analyzing the fluctuations of the daily returns. This risk measurement is however not perfect and a lot of different measurement have been invented to capture the total risk a certain assets has.
In the next subsection we will talk about interest rates which are a kind of return generally discussed with bonds/obligations.
#### Interest Rate
Interest rates are a percentage of an initial capital and are calculated on a time horizon. For example
a yearly interest rate of two percent on an initial capital of 1 000 euro will amount to 20 euro after one year, while a monthly interest rate of four percent for the same capital will return 40 euro per month. It is therefore important to remember two questions when faced with interest rates:
* On what time horizon is the interest rate calculated on?
* To which initial capital does the interest rate refer to?
#### Inflation
Inflation is the devaluation of currency. If today your 1€ can buy you x amount of goods and tomorrow your 1€ can buy x-1 goods then we speak of inflation. Inflation is good for debt and bad for **capital** . Let's assume you have 1000 euro in debt, 1000 euro in a deposit and you earn 100 euro. know assume that you have 5% inflation such that also your wage increases with 5%. Then your assets and liabilities will change over time as it is depicted on the graph.

As you can see the initial capital and debt are almost reduced due to the 5% inflation while your wage generated the bulk of your capital.
### Income
To be made
### Spending
To be made
### Bibliography
```{bibliography}
``` | Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.1 | 33ac6ab881f950cf1b467a5603558a04b2f7d7cf | docs | 197 | # Glossary
The glossary is intented to give you an overview of all the financial jargon needed to understand the financial planning tools.
## Exchange traded fund
## stock
## asset class
| Peccon | https://github.com/korilium/Peccon.jl.git |
|
[
"MIT"
] | 0.1.11 | fbb7013194404c2c6c822af6259766f9ad245931 | code | 344 | module AlgebraicNumbers
using Nemo
import PolynomialRoots
export AlgebraicNumber
export *,+,-,/,^,root,==,inv
export sqrt,cbrt
export exp_alg,cos_alg,sin_alg
export log_alg,acos_alg,asin_alg
export pow2
include("algebraic.jl")
include("trig.jl")
include("promote.jl")
include("newton.jl")
include("inv_totient.jl")
end
| AlgebraicNumbers | https://github.com/anj1/AlgebraicNumbers.jl.git |
|
[
"MIT"
] | 0.1.11 | fbb7013194404c2c6c822af6259766f9ad245931 | code | 7534 | # Exact representation of algebraic numbers
# (Numbers that are roots of polynomials with integer coefficients)
# And also arithmetic on algebraic numbers,
# including +, -, *, /, and radicals.
import PolynomialRoots
import Base.zero,Base.one
import Base.+,Base.-,Base.*,Base./,Base.inv
import Base.abs,Base.conj
import Base.real,Base.imag
import Base.==
# see: http://nemocas.org/nemo-0.4.pdf
# An algebraic number,
# consisting of the minimal polynomial of the number,
# an arbitrary-precision approximation of the number,
# and prec which specifies the minimal distance between roots of p
# TODO: apprx has to be complex.
struct AlgebraicNumber{T<:Integer,F<:AbstractFloat} <: Number
coeff::Vector{T}
apprx::Complex{F}
prec::F
end
# algebraic number from just poly and approximation.
# computes precision and simplifies as well.
function AlgebraicNumber(coeff::Vector{T}, apprx::Complex{F}) where {T<:Integer,F<:AbstractFloat}
an = AlgebraicNumber{T,F}(coeff, apprx, calc_precision(coeff, apprx))
return simplify(an)
end
# AlgebraicNumber from any integer type
AlgebraicNumber(x::T) where {T<:Integer} =
AlgebraicNumber(BigInt[-x,one(T)], Complex{BigFloat}(x))
# AlgebraicNumber from rationails
AlgebraicNumber(x::Rational) =
AlgebraicNumber(BigInt[-numerator(x), denominator(x)], Complex{BigFloat}(x))
AlgebraicNumber(x::Complex) =
AlgebraicNumber(real(x)) + AlgebraicNumber(imag(x))*root(AlgebraicNumber(-1),2)
function poly_from_coeff(a)
R,x=polynomial_ring(Nemo.FlintZZ,"x")
sum([a[i]*x^(i-1) for i=1:length(a)])
end
function is_displayed_exactly(an)
io = IOBuffer()
show(io,convert(Complex{Float64},an.apprx))
displ = String(take!(io))
from_displ = AlgebraicNumber(Complex{Rational{BigInt}}(parse(Complex{BigFloat}, displ)))
from_displ==an, displ
end
import Base.show
# TODO: only show up to precision
function show(io::IO, an::AlgebraicNumber)
display_exact, displ = is_displayed_exactly(an)
display_exact || print(io, "≈")
print(io, displ)
end
#get_coeffs(p::Nemo.ZZPolyRingElem) = pointer_to_array(convert(Ptr{Int64}, p.coeffs), (p.length,))
get_coeffs(p::Nemo.ZZPolyRingElem) = [BigInt(Nemo.coeff(p,i)) for i=0:Nemo.degree(p)]
prec_roots(a::Vector{T}) where {T<:Integer} = PolynomialRoots.roots(convert(Array{BigFloat},a))
# TODO: make sure roots returns distinct roots
# Given an algebraic number, find minimum precision required
# to specify it among roots of an.p
# TODO: handle case of repeated roots precisely
function calc_precision(coeff::Vector{T}, apprx::Complex{F}) where {T<:Integer,F<:AbstractFloat}
# compute smallest distance between all pairs of elements in x
function min_pairwise_dist(x)
biginf = convert(F,Inf)
n = length(x)
if n<=1
return biginf
else
pdists = [i < j ? abs(x[i]-x[j]) : biginf for i=1:n,j=1:n]
return minimum(pdists)
end
end
# first, find all roots of p
rts = prec_roots(coeff)
# first, trivial case
if length(rts)==1
return convert(F, Inf)
end
# find minimum pairwise distance between roots;
# multiply by 0.5 safety factor
return 0.5*min_pairwise_dist(convert(Vector{Complex{F}},rts))
end
# simplify an algebraic number by reducing p to the minimal polynomial.
# This assumes that calc_precision! has already been called.
function simplify(an::AlgebraicNumber)
# for all factors of an.p, find the one that matches our roots
# If linear polynomial, then already irreducible.
if length(an.coeff)<=2
return an
end
# Otherwise, factor out.
R, x = polynomial_ring(Nemo.FlintZZ, "x")
p = R(map(Nemo.FlintZZ, an.coeff))
fctr_dict = Nemo.factor(p)
#fctrs = keys(fctr_dict)
fctrs = [p for (p,e) in fctr_dict]
# first, trivial case
if length(fctrs)==1
# irreducible case
if first(values(fctr_dict))==1
return AlgebraicNumber(get_coeffs(first(fctrs)),an.apprx,an.prec)
end
# reducible case
coeffs1 = get_coeffs(first(fctrs))
apprx1 = an.apprx
return AlgebraicNumber(coeffs1,apprx1,calc_precision(coeffs1,apprx1))
end
# case where more than one factor exists
mindists = [minimum(abs.(an.apprx .- prec_roots(get_coeffs(fctr)))) for fctr in fctrs]
(newprec, i) = findmin(mindists)
fctr = collect(fctrs)[i]
return AlgebraicNumber(get_coeffs(fctr),an.apprx,newprec)
end
function ==(an1::AlgebraicNumber,an2::AlgebraicNumber)
cf1 = an1.coeff
cf2 = an2.coeff
(cf1./cf1[end])==(cf2./cf2[end]) || return false
prec1 = calc_precision(an1.coeff, an1.apprx)
prec2 = calc_precision(an2.coeff, an2.apprx)
return abs(an1.apprx-an2.apprx)<min(prec1,prec2)
end
inv(an::AlgebraicNumber) = AlgebraicNumber(reverse(an.coeff), inv(an.apprx))
# interleave each elemnet of a with n zeros
interleave(a,n) = vec(vcat(a',zeros(Int64,n,length(a))))
function root(an::AlgebraicNumber,n::Int64)
if n==0
throw(ArgumentError("n must be nonzero"))
end
if n==1
return an
end
if n < 0
an = inv(an)
n = -n
end
# TODO: quickly calculate precision
return AlgebraicNumber(interleave(an.coeff, n-1), an.apprx^(1/n))
end
import Base.sqrt
import Base.cbrt
sqrt(an::AlgebraicNumber) = root(an,2)
cbrt(an::AlgebraicNumber) = root(an,3)
# TODO: special, more efficient cases for ^2 and ^3
function pow2(an::AlgebraicNumber)
cfs = an.coeff
# first check if it is already in the form of a square root.
if all(cfs[2:2:end] .== 0)
pp_cfs = cfs
else
cfs2 = [iseven(i) ? -cfs[i] : cfs[i] for i=1:length(cfs)]
pp = poly_from_coeff(cfs)*poly_from_coeff(cfs2)
pp_cfs = get_coeffs(pp)
end
p2 = pp_cfs[1:2:end]
return AlgebraicNumber(p2, an.apprx*an.apprx)
end
# partially simplify a polynomial b
# eliminating repeated factors
reduce_repeated_factors(p::Nemo.ZZPolyRingElem) = prod([p for (p,e) in Nemo.factor(p)]; init=one(p))
# multiplication
function *(an1::AlgebraicNumber,an2::AlgebraicNumber)
if an1==0 || an2==0
# TODO: don't handle this explicitly
return zero(AlgebraicNumber)
end
# check if p==q, if then use a more optimized and correct routine
#if an1.coeff == an2.coeff
# return
#end
p = composed_product(an1.coeff, an2.coeff)
return AlgebraicNumber(p, an1.apprx * an2.apprx)
end
function +(an1::AlgebraicNumber,an2::AlgebraicNumber)
p = composed_sum(an1.coeff, an2.coeff)
return AlgebraicNumber(p, an1.apprx + an2.apprx)
end
function -(an1::AlgebraicNumber)
cfs = copy(an1.coeff)
for i=1:2:length(cfs)
cfs[i]=-cfs[i]
end
return AlgebraicNumber(cfs, -an1.apprx, an1.prec)
end
-(an1::AlgebraicNumber,an2::AlgebraicNumber) = an1+(-an2)
/(an1::AlgebraicNumber,an2::AlgebraicNumber) = an1*(inv(an2))
# the complex conjugate of an algebraic number has the same minimal polynomial
conj(an::AlgebraicNumber) = AlgebraicNumber(an.coeff,conj(an.apprx),an.prec)
abs(an::AlgebraicNumber) = sqrt(an*conj(an))
zero(::Type{AlgebraicNumber}) = AlgebraicNumber(BigInt[0, 1],Complex{BigFloat}(0.0),BigFloat(1.0))
one(::Type{AlgebraicNumber}) = AlgebraicNumber(BigInt[-1,1],Complex{BigFloat}(1.0),BigFloat(1.0))
real(an::AlgebraicNumber) = (an+conj(an))*AlgebraicNumber(BigInt[1,-2], BigFloat(0.5)+0im,BigFloat(0.5))
imag(an::AlgebraicNumber) = (an-conj(an))*AlgebraicNumber(BigInt[1,0,4],BigFloat(-0.5)*im,BigFloat(0.5))
# take roots of a polynomial,
# and return them as algebraic numbers
function alg_roots(coeff::Vector{Integer})
#TODO
end
confirm_algnumber(b) = sum(b.coeff .* [b.apprx^(i-1) for i=1:length(b.coeff)])
| AlgebraicNumbers | https://github.com/anj1/AlgebraicNumbers.jl.git |
|
[
"MIT"
] | 0.1.11 | fbb7013194404c2c6c822af6259766f9ad245931 | code | 1594 | function all_divisors(factors::Nemo.Fac{T}) where T <: Integer
divisors = []
function generate_divisors(curIndex::Int, curDivisor::T, factors::Nemo.Fac)
result = iterate(factors, curIndex)
if isnothing(result)
push!(divisors, curDivisor)
return
else
(pf, nextIndex) = result
end
for i = 0:pf.second
generate_divisors(nextIndex, curDivisor, factors);
curDivisor *= pf.first
end
end
generate_divisors(1, T(1), factors)
divisors
end
# Find all inverses of the Euler Totient function.
# This implements the algorithm of Contini, Croot, and Shparlinski (2006)
function inv_totient(x::T) where T <: Integer
invs = Set{T}()
function totient_reps(x::T, factor_list::Vector{Tuple{T,Int}})
if x<1
return
end
if x==1
m = prod([p[1]^(p[2]+1) for p in factor_list[2:end]])
push!(invs, m)
end
# TODO: Don't need to recalcuate this each time.
factors = Nemo.factor(x)
# Find all divisors of x of the form (p^0)*(p-1), where p is a prime
for divisor in all_divisors(factors)
d = divisor+1
if Nemo.is_prime(d) && d > factor_list[end][1]
pair = (d, 0)
totient_reps(div(x, divisor), cat(factor_list, pair, dims=1))
end
end
# Find all divisors of x of the form (p^γ)*(p-1), γ>=1, where p is a prime
for factor in factors
for γ = 1:factor.second
d = (factor.first^γ)*(factor.first-1)
if divrem(x, d)[2]==0 && factor.first > factor_list[end][1]
pair = (factor.first, γ)
totient_reps(div(x, d), cat(factor_list, pair, dims=1))
end
end
end
end
totient_reps(x, [(T(0),0)])
return invs
end
| AlgebraicNumbers | https://github.com/anj1/AlgebraicNumbers.jl.git |
|
[
"MIT"
] | 0.1.11 | fbb7013194404c2c6c822af6259766f9ad245931 | code | 3039 | # _Fast_ composed sums and composed products of polynomials,
# using the algorithm described in:
# "Fast computation of special resultants"
# by Bostan, Flajolet, Salvy, and Schost
# derivative of polynomial
derivative(c::Vector) = c[2:end] .* (1:length(c)-1)
function polyinv(coeffs::Vector, n)
R, x = Nemo.power_series_ring(Nemo.FlintQQ, n, "x")
a = R(map(Nemo.FlintQQ, coeffs), length(coeffs), n, 0)
ai = inv(a)
return Nemo.QQFieldElem[coeff(ai,i) for i=0:n-1]
end
# compute newton power series of polynomial given with coefficients coeff,
# in base field R,x.
# See fig.1 in reference
function to_newton(coeffs::Vector{BigInt},n,R,x)
# first, make monic.
coeffs = coeffs//coeffs[end]
d = length(coeffs)-1
a_cfs = reverse(derivative(coeffs))
b_cfs = reverse(coeffs)
# initialize power series polynomials
a = R(map(Nemo.FlintQQ, a_cfs))
b = R(map(Nemo.FlintQQ, b_cfs))
b0 = R(polyinv(b_cfs, n))
c = truncate(a*b0, d)
r = R()
x_power = R(1)
x_d = x^d
l = round(Int64,floor(n/d))
for j = 0 : l
r += c*x_power
x_power *= x_d
c = -mullow(shift_right(b*c,d),b0,d)
end
return r
end
to_array(p) = Rational{BigInt}[Rational(coeff(p,i)) for i=0:Nemo.degree(p)]
# tr: traces i.e. newton series
# This algorithm is based on the Leverrier-Faddeev algorithm
# see: http://math.stackexchange.com/questions/405822/what-is-the-fastest-way-to-find-the-characteristic-polynomial-of-a-matrix
function from_newton(tr::Vector{T}) where {T<:Number}
# special case
if tr==[1]
return T[0,1]
end
n = length(tr)
c = Array{T}(UndefInitializer(),n)
c[end] = one(T)
for k = 1 : n-1
next_c = -sum(tr[2:(k+1)].*c[end-k+1:end])/k
c[end-k] = next_c
end
return c
end
# Hadamard (element-wise) product of two polynomials
function hadm(p,q,R)
n = min(Nemo.degree(p),Nemo.degree(q))
R([Nemo.coeff(p,i)*Nemo.coeff(q,i) for i=0:n])
end
# composed product of two polynomials, given as coeffs p and q
function composed_product(p::Vector{BigInt},q::Vector{BigInt})
# compute newton series
n = (length(p)-1)*(length(q)-1)+1
R, x = Nemo.polynomial_ring(Nemo.FlintQQ, "x")
a = to_newton(p,n,R,x)
b = to_newton(q,n,R,x)
# multiply newton series and invert
pq = from_newton(to_array(hadm(a,b,R)))
# convert to integer and return
return map(numerator, pq*lcm(map(denominator, pq)))
end
# composed sum of two polynomials, given as coeffs p and q
function composed_sum(p::Vector{BigInt},q::Vector{BigInt})
# compute newton series
n = (length(p)-1)*(length(q)-1)+1
R, x = Nemo.polynomial_ring(Nemo.FlintQQ, "x")
a = to_newton(p,n,R,x)
b = to_newton(q,n,R,x)
# exp series
ee = R([Nemo.FlintQQ(1//factorial(BigInt(i))) for i=0:n])
eei = R([Nemo.FlintQQ( factorial(BigInt(i))) for i=0:n])
# multiply newton series and invert
m = mullow(hadm(a,ee,R),hadm(b,ee,R),n+1)
pq = from_newton(to_array(hadm(m,eei,R)))
# convert to integer and return
return map(numerator, pq*lcm(map(denominator, pq)))
end
| AlgebraicNumbers | https://github.com/anj1/AlgebraicNumbers.jl.git |
|
[
"MIT"
] | 0.1.11 | fbb7013194404c2c6c822af6259766f9ad245931 | code | 1080 | # conversions and promotions from integer and rational types to algebraic number types
import Base.convert
import Base.promote_rule
# Algebraic number from integer
#convert{T<:Integer}(::Type{AlgebraicNumber}, x::T) =
# AlgebraicNumber(BigInt[-x,one(T)], Complex{BigFloat}(x))
# Algebraic number from rational
#convert{T<:Integer}(::Type{AlgebraicNumber}, x::Rational{T}) =
# AlgebraicNumber(BigInt[-num(x), den(x)], Complex{BigFloat}(x))
convert(::Type{T}, x::Number) where {T<:AlgebraicNumber} = AlgebraicNumber(x)
# promotions
promote_rule(x::Type{T}, y::Type{AlgebraicNumber{S,F}}) where {T<:Integer,S,F} = AlgebraicNumber
promote_rule(x::Type{Rational{T}},y::Type{AlgebraicNumber{S,F}}) where {T<:Integer,S,F} = AlgebraicNumber
promote_rule(::Type{Bool}, ::Type{AlgebraicNumber{S, F}}) where {S, F} = AlgebraicNumber
# conversions back
function convert(::Type{Int64},an::AlgebraicNumber)
c = an.coeff
if length(c)==2 && abs(c[2])==1
return convert(Int64, -c[1]*c[2])
else
throw(InexactError(:convert, Int64, an))
end
end
| AlgebraicNumbers | https://github.com/anj1/AlgebraicNumbers.jl.git |
|
[
"MIT"
] | 0.1.11 | fbb7013194404c2c6c822af6259766f9ad245931 | code | 1758 | # compute exp(pi*i*q),
# which is algebraic if q is rational.
function exp_alg(q::Rational)
qh = q/2
# first, obtain minimal polynomial
s, x = polynomial_ring(AlgebraicNumbers.Nemo.ZZ, "x")
poly = cyclotomic(denominator(qh), x)
coeffs = [convert(BigInt, coeff(poly, i)) for i=0: denominator(qh)]
# now, select root.
apprx = exp(im*BigFloat(pi)*q)
# Finally, return polynomial w.r.t. that root
return AlgebraicNumber(coeffs, apprx)
end
cos_alg(q::Rational) = real(exp_alg(q))
sin_alg(q::Rational) = imag(exp_alg(q))
# checks if a polynomial is equal to any of the
# cyclotomic polynomials n in a list of candidates
function is_cyclotomic(poly, candidates, x)
for n in candidates
if poly == cyclotomic(n, x)
return (true, n)
end
end
return (false, 0)
end
# compute log(a)/(pi*i),
# which is rational if a is a root of unity.
# If a is not a root of unity, returns Nothing
function log_alg(a::AlgebraicNumber)
s, x = polynomial_ring(Nemo.ZZ, "x")
deg = length(a.coeff)-1
poly = s(map(Nemo.ZZ, a.coeff))
(is_cycl, denom) = is_cyclotomic(poly, inv_totient(deg), x)
if is_cycl
# TODO: should this be BigInt?
num = round(Int, denom*imag(log(a.apprx)/pi))
return num//denom
else
return Nothing
end
end
function acos_alg(x::AlgebraicNumber)
# TODO: check if the number can be made a root of unity.
# First, make the number a root of unity.
y = sqrt(1 - x^2)
z = x + sqrt(AlgebraicNumber(-1))*y
# Now take log
log_alg(z)
end
function asin_alg(x::AlgebraicNumber)
# TODO: check if the number can be made a root of unity.
# First, make the number a root of unity.
y = sqrt(1 - x^2)
z = y + sqrt(AlgebraicNumber(-1))*x
# Now take log
log_alg(z)
end
| AlgebraicNumbers | https://github.com/anj1/AlgebraicNumbers.jl.git |
|
[
"MIT"
] | 0.1.11 | fbb7013194404c2c6c822af6259766f9ad245931 | code | 4611 | using Test
using AlgebraicNumbers
import AlgebraicNumbers.inv_totient
function test1(n)
coeff = rand(1:10,n+1)
a = AlgebraicNumber(coeff, Complex{BigFloat}(0.0), BigFloat(0.0))
a.apprx = prec_roots(a.coeff)[rand(1:n)]
calc_precision!(a)
a = simplify(a)
@show a.coeff, convert(Complex{Float64},a.apprx), a.prec
b = root(a,2)
#b = a*a
@show b.coeff, convert(Complex{Float64},b.apprx), b.prec
#c = root(b,2)
c = b*b
#c = pow2(b)
@show c.coeff, convert(Complex{Float64},c.apprx), c.prec
end
function test2(n)
coeff = rand(1:10,n+1)
a = AlgebraicNumber(coeff, BigFloat(0.0), BigFloat(0.0))
a.apprx = roots(a.coeff)[rand(1:n)]
calc_precision!(a)
coeff = rand(1:10,n+1)
b = AlgebraicNumber(coeff, BigFloat(0.0), BigFloat(0.0))
b.apprx = roots(b.coeff)[rand(1:n)]
calc_precision!(b)
c = a*b
abs(roots(c.coeff) .- c.apprx)
end
function test3()
a = sqrt(AlgebraicNumber(2))
b = sqrt(AlgebraicNumber(3))
axb = a*b
# @show axb, axb.coeff
@test abs(AlgebraicNumbers.confirm_algnumber(axb)) < 1e-10
apb = a+b
# @show apb, apb.coeff
@test abs(AlgebraicNumbers.confirm_algnumber(apb)) < 1e-10
end
function test4()
# simple test
sqrt2 = sqrt(AlgebraicNumber(2))
@test sqrt2^2 == 2
# Golden ratio
ϕ = 1//2 + sqrt(AlgebraicNumber(5)/4)
# As we all know, this has the property that:
@test 1+1/ϕ == ϕ
end
function plastic_constant_test()
# see http://mathworld.wolfram.com/PlasticConstant.html
a = sqrt(AlgebraicNumber(69))
n = cbrt(9-a) + cbrt(9+a)
p = n*inv(cbrt(AlgebraicNumber(18)))
@test p-1==1/(p^4)
@test p+1==p^3
end
function test5()
# just an answer to a stackoverflow Q.
# http://math.stackexchange.com/questions/422233/how-to-find-a-minimal-polynomial-field-theory
n = sqrt(AlgebraicNumber(9*5))-sqrt(AlgebraicNumber(4*7))+sqrt(AlgebraicNumber(35))
d = 1-sqrt(AlgebraicNumber(5))+sqrt(AlgebraicNumber(7))
α=n/d
@test α.coeff == BigInt[3596, 2312, -280, -156, 19]
end
function test_abs()
ii = sqrt(AlgebraicNumber(-1))
@test conj(ii) == -ii
@test abs(ii) == 1
@test abs(AlgebraicNumber(-7//8))==7//8
end
function test_real_imag()
a = root(AlgebraicNumber(-1),5)
alg_im = sqrt(AlgebraicNumber(-1))
@test real(a) + alg_im*imag(a) == a
x = rand(0:20)//10
@test cos_alg(x - 1//2) == sin_alg(x)
x = 2//1
@test cos_alg(x - 1//2) == sin_alg(x)
end
# TODO: add some more tests
function test_pow2()
a = AlgebraicNumber(3//2)
@test pow2(a) == AlgebraicNumber(9//4)
end
function test_show()
a = IOBuffer()
show(a, sqrt(AlgebraicNumber(-1))+1)
@test String(take!(a)) == "1.0 + 1.0im"
a = IOBuffer()
show(a, sqrt(AlgebraicNumber(2)))
@test String(take!(a))[1] == '≈'
end
function test_log_alg()
@test log_alg(exp_alg(1//9)) == 1//9
@test log_alg(AlgebraicNumber(2)) == Nothing
end
function test_trig_alg()
@test acos_alg(cos_alg(3//7)) == 3//7
@test asin_alg(sin_alg(3//7)) == 3//7
@test acos_alg(AlgebraicNumber(1)) == 0//1
@test asin_alg(AlgebraicNumber(1)) == 1//2
@test asin_alg(AlgebraicNumber(3//2)) == Nothing
@test acos_alg(AlgebraicNumber(3//2)) == Nothing
end
function totient(x::T) where T <: Integer
prod([(fac.first^(fac.second-1))*(fac.first-1) for fac in Nemo.factor(x)])
end
# Test the correctness of inv_totient for all totients up to m
function check_inv_totient(m::T) where T <: Integer
# Lower bound on phi(n)==m,
# And thus worst-case maximum range we need to consider.
n = 2*m^2
tots = [totient(i) for i=1:n]
for i = 1:m
_gold = findall(==(i), tots)
_test = sort(collect(inv_totient(i)))
length(_gold) == length(_test) || return false
all(_gold .== _test) || return false
end
return true
end
function test_inv_totient(m::T) where T <: Integer
@test check_inv_totient(m)
end
test3()
test4()
test5()
plastic_constant_test()
test_abs()
test_real_imag()
test_pow2()
test_show()
test_log_alg()
test_trig_alg()
# testcase of issue #5
@test AlgebraicNumber(1)+sqrt(AlgebraicNumber(-1)) != AlgebraicNumber(2)
# sqrt2 = root(AlgebraicNumber(2),2)
# an.p = (x^2-2)*(x^2-3)
# calc_precision!(an)
# an = simplify(an)
# @show an.p (should be x^2-2)
# test multiplication of square roots
sqrt2 = root(AlgebraicNumber(2),2)
sqrt3 = root(AlgebraicNumber(3),2)
sqrt6=sqrt2*sqrt3
sqrt6_ = root(AlgebraicNumber(6),2)
@test sqrt6 == sqrt6_
#
an=root(root(AlgebraicNumber(3),2) + AlgebraicNumber(-1),2)
b = an*an
@test abs(AlgebraicNumbers.confirm_algnumber(b)) < 1e-10
| AlgebraicNumbers | https://github.com/anj1/AlgebraicNumbers.jl.git |
|
[
"MIT"
] | 0.1.11 | fbb7013194404c2c6c822af6259766f9ad245931 | docs | 7284 | AlgebraicNumbers.jl
------
[](https://github.com/anj1/AlgebraicNumbers.jl/actions)
[](https://coveralls.io/github/anj1/AlgebraicNumbers.jl?branch=master)
#### `sqrt(2)^2 == 2`
This package provides a way of dealing with real and complex numbers exactly and with infinite precision. To see how this works, it's useful to compare with familiar number types like integers and so on. Integer arithmetic (e.g. 2+2==4) is exact but is limited to the operations +, -, and \*. While adding, subtracting, or multiplying two integers always produces another integer, that's not always true with division. With [*rational* numbers](http://docs.julialang.org/en/release-0.4/manual/complex-and-rational-numbers/#rational-numbers), division is included as well. Since many numerical operations can be reduced to sequences of the four elementary operations, this allows a wider range of exact arithmetic to be carried out. *Algebraic* numbers take this further, including not only the four elementary operations, but also *root-taking* operations, for example sqrt() and cbrt(). More generally, the *n*th root of an algebraic number `x` can be taken with:
```julia
root(x, n)
```
And this will be represented exactly. For instance, you can see for yourself that:
```julia
# sqrt(x) is defined as root(x, 2)
sqrt(AlgebraicNumber(2))^2 == 2
```
And this is true for any integer:
```julia
# x = any integer
sqrt(AlgebraicNumber(x))^2 == x
```
Here, `AlgebraicNumber` is just a constructor that takes a number (either an integer or a rational number) and produces an algebraic number.
You can do arithmetic on algebraic numbers and all results will be represented exactly:
```julia
sqrt2 = sqrt(AlgebraicNumber(2))
sqrt3 = sqrt(AlgebraicNumber(3))
sqrt6 = sqrt2*sqrt3
# a simple example
assert(sqrt6 == sqrt(AlgebraicNumber(6)))
# slightly more complicated
x = 1 + sqrt6
assert((x - 1)^2 == 6)
# even more complicated
assert(sqrt6 == sqrt(x^2 - 2*sqrt6 - 1))
# and here's another one
y = sqrt(x)
assert((y^2 - 1)^2 == 6)
```
Even *more* generally, arbitrary root-taking operations are possible. That is, you can represent the root of any polynomial (with integer, rational, or algebraic coefficients) as an algebraic number, even if that root doesn't have a representation in terms of a sequence of +, -, /, *, and root-taking operations.
#### Displaying algebraic numbers
Note that sometimes when displaying an algebraic number, you might get a '≈' symbol, like:
```julia
julia> sqrt(AlgebraicNumber(2))
≈1.4142135623730951 + 0.0im
```
That is, something that looks like an approximate complex number, not an exact number. This is *only* the library's way of *displaying* algebraic numbers, and it's simply because in general it is impossible to represent an algebraic number exactly in decimal notation no matter how many digits you display! Internally, algebraic numbers are represented exactly, but they are not represented using decimal or floating-point representation (more on internal representation below).
When displaying algebraic numbers that *can* be represented exactly, they are shown as-is:
```
julia> AlgebraicNumber(2)
2.0 + 0.0im
```
#### Internal implementation
Computer algebra systems (CASes) also allow you to represent algebraic numbers, but the method they use is somewhat different. In CAS systems, numbers are typically represented using the expressions used to generate them. So `sqrt(2)` would be literally represented as `sqrt(2)`. Thus `^2` and `sqrt` cancel out to give `2`. That approach is flexible but it has a fairly large computational cost. The way algebraic numbers are represented here is different - they are represented as discrete roots of minimal polynomials. This approach is a bit more limiting (for example, the `exp()` of an algebraic number is not necessarily an algebraic number) but it is more computationally efficient and allows doing things like equality testing very rapidly and in a way that is always guaranteed to give the correct result, no matter how complicated the algebraic number is. This is something that CAS systems often cannot do.
The tradeoff in using the minimal polynomial representation is that operations like addition and multiplication become non-trivial to compute, since we need to compute a new minimal polynomial, and this involves computation of [resultants](http://specfun.inria.fr/bostan/publications/BoFlSaSc06.pdf) and polynomial factoring. The code for computing resultants has been written in pure julia (in `newton.jl`) and the polynomial factorization is done using the FLINT library, wrapped with the excellent [Nemo.jl](https://github.com/wbhart/Nemo.jl) package. If you are just using this package, though, you usually do not need to worry about any of this.
See [this blog post](https://pseudoprofound.wordpress.com/2016/07/09/some-fun-with-algebraic-numbers/) for some more description and some neat examples.
#### Extra functions
There are a few extra utility functions. For example, `exp_alg(x)` returns exp(iπx), which, assuming x is a rational number, is algebraic. For example:
```julia
# calculate exp(im*pi*2/3) as an algebraic number
x = exp_alg(2//3)
assert(x == sqrt(AlgebraicNumber(-3))/2 - AlgebraicNumber(1)/2)
```
Similarly, `cos_alg(x)` and `sin_alg(x)` return the cosine and sine of πx, which is algebraic if x is rational. These numbers are known as the [trigonometric](https://en.wikipedia.org/wiki/Trigonometric_number) [numbers](https://en.wikipedia.org/wiki/Trigonometric_constants_expressed_in_real_radicals#2.25.C2.B0:_regular_octacontagon_.2880-sided_polygon.29):
```julia
# An example trigonometric number
x = sin_alg(1//8)
y = sqrt(2 - sqrt(AlgebraicNumber(2)))/2
assert(x == y)
# Another example
x = cos_alg(2//5)
y = (sqrt(AlgebraicNumber(5))-1)/4
assert(x == y)
```
The inverse of these functions also exist. `log_alg(x)` returns log(x)/iπ, which, assuming x is a root of unity, is rational. If x is not a root of unity, this function returns `Nothing`.
```julia
x = exp_alg(3//7)
y = log_alg(x)
assert(y == 3//7)
```
There are also inverse trigonometric functions `acos_alg` and `asin_alg`. If the input is a trigonometric number, the output will be a rational fraction of π, otherwise the return value will be `Nothing`. These functions are useful when doing various geometric computations.
```julia
x = sqrt(AlgebraicNumber(3))/2
assert(acos_alg(x) == 1//6)
assert(asin_alg(x) == 1//3)
# More complicated example.
x = sqrt(10 + 2*sqrt(AlgebraicNumber(5)))/4
assert(acos_alg(x) == 1//10)
assert(asin_alg(x) == 4//10)
```
Internally, these functions work by calling `log_alg`. `log_alg` then checks if the polynomial coefficients for the algebraic number are cyclotomic. If this polynomial is the nth cyclotomic polynomial, then the denominator of the result is simply n. The numerator can be calculated by taking the approximate floating-point log of the number and then multiplying by the denominator and rounding to the nearest integer. | AlgebraicNumbers | https://github.com/anj1/AlgebraicNumbers.jl.git |
|
[
"MIT"
] | 0.2.0 | 3331d7ed85874e3e4d73a01980c08f31068997f6 | code | 859 | using TropicalCyclonePotentialIntensity
using Documenter
DocMeta.setdocmeta!(TropicalCyclonePotentialIntensity, :DocTestSetup, :(using TropicalCyclonePotentialIntensity); recursive=true)
makedocs(;
modules=[TropicalCyclonePotentialIntensity],
authors="Argel Ramirez Reyes <[email protected]> and contributors",
repo="https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl/blob/{commit}{path}#{line}",
sitename="TropicalCyclonePotentialIntensity.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://aramirezreyes.github.io/TropicalCyclonePotentialIntensity.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl",
devbranch="main",
)
| TropicalCyclonePotentialIntensity | https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl.git |
|
[
"MIT"
] | 0.2.0 | 3331d7ed85874e3e4d73a01980c08f31068997f6 | code | 727 | module TropicalCyclonePotentialIntensity
# Write your package code here.
using Unitful: @u_str, unit, ustrip, Quantity
export mixing_ratio_to_specific_humidity,
specific_humidity_to_mixing_ratio,
get_saturation_vapor_pressure,
get_partial_vapor_pressure,
get_mixing_ratio,
get_specific_entropy,
get_lifted_condensation_level,
get_potential_temperature,
get_virtual_temperature,
###Potential intensity
get_buoyancy_of_lifted_parcel,
get_cape_and_outflow_temp_from_sounding,
get_potential_intensity_of_tropical_cyclone
include("physicalconstants.jl")
include("physicsfunctions.jl")
include("rootfinding.jl")
include("potentialintensity.jl")
const ϵ = Dryair.R / Watervapor.R
end
| TropicalCyclonePotentialIntensity | https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl.git |
|
[
"MIT"
] | 0.2.0 | 3331d7ed85874e3e4d73a01980c08f31068997f6 | code | 759 |
const epsilon = 18.016/28.966
const g = 10u"m/s/s" #acceleration of gravity
struct Substance{T}
cp :: Union{Nothing,T}
cv :: Union{Nothing,T}
R :: Union{Nothing,T}
Lv :: Union{Nothing,T}
Lf :: Union{Nothing,T}
end
Substance{T}(;cp = nothing, cv = nothing, R = nothing, Lv = nothing, Lf = nothing) where T = Substance{T}(cp,cv,R,Lv,Lf)
const Dryair = Substance{Quantity}(
cp = 1005.7u"J/kg/K", #J/kg/k at 1013 hPa
cv = 718.0u"J/kg/K",
R = 287.05u"J/kg/K" # J/kg/k
)
const Liquidwater = Substance{Quantity}(
Lv = 2.501e6u"J/kg", #J/kg
Lf = 3.33e5u"J/kg",
cp = 4190.0u"J/kg/K" #j/kg/k
)
const Watervapor = Substance{Quantity}(
R = 461.52u"J/kg/K", #j/kg/K
cp = 1870.0u"J/kg/K"
)
| TropicalCyclonePotentialIntensity | https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl.git |
|
[
"MIT"
] | 0.2.0 | 3331d7ed85874e3e4d73a01980c08f31068997f6 | code | 7657 |
"""
get_potential_temperature(temperature, pressure, reference_pressure)
Compute potential temperature from temperature and pressure.
"""
function get_potential_temperature(temperature, pressure, reference_pressure)
exponent = ustrip(Dryair.R / Dryair.cp)
return temperature * (reference_pressure/pressure)^exponent
end
function get_potential_temperature(temperature :: Quantity, pressure :: Quantity, reference_pressure :: Quantity)
exponent = Dryair.R / Dryair.cp
return temperature * (reference_pressure/pressure)^exponent
end
"""
get_virtual_temperature(temperature, specific_humidity)
Compute virtual temperature from temperature and specific humidity.
Receive temperature (K) and specific humidity (unitless kg/kg) and compute the virtual temperature
"""
function get_virtual_temperature(temperature, specific_humidity)
return temperature*(one(specific_humidity) + specific_humidity*(one(specific_humidity)/epsilon - one(specific_humidity)))
end
function get_virtual_temperature(temperature :: Quantity, specific_humidity :: Quantity)
return temperature*(one(specific_humidity) + specific_humidity*(one(specific_humidity)/epsilon - one(specific_humidity)))
end
"""
get_virtual_temperature(temperature,mixing_ratio_total_water,mixing_ratio_water_vapor)
Receive temperature (K) and mixing ratios of total water and water vapor (unitless kg/kg) and compute the virtual temperature
"""
function get_virtual_temperature(temperature,mixing_ratio_total_water,mixing_ratio_water_vapor)
return temperature*(one(mixing_ratio_water_vapor) + mixing_ratio_water_vapor/epsilon)/(1 + mixing_ratio_total_water)
end
"""
specific_humidity_to_mixing_ratio(specific_humidity)
Take a specific humidity (unitless g/g) and return a mixing ratio
"""
function specific_humidity_to_mixing_ratio(specific_humidity)
return mixing_ratio = specific_humidity / (1 - specific_humidity)
end
"""
mixing_ratio_to_specific_humidity(mixing_ratio)
Take a mixing ratio (unitless g/g) and return a specific humidity
"""
function mixing_ratio_to_specific_humidity(mixing_ratio)
return specific_humidity = mixing_ratio / (1 + mixing_ratio)
end
"""
get_saturation_vapor_pressure(T)
Receive temperature T in Kelvin and compute the saturation vapor pressure in hPa from the August-Roche-Magnus formula that approximates the solution to the Clausius-Clapeyron relationship (Wikipedia contributors. (2020, December 19). Clausius–Clapeyron relation. In Wikipedia, The Free Encyclopedia. Retrieved 06:57, December 20, 2020, from https://en.wikipedia.org/w/index.php?title=Clausius%E2%80%93Clapeyron_relation&oldid=995159175)
"""
function get_saturation_vapor_pressure(T)
return 6.112*exp(17.67 * (T-273.15) / (243.5 + (T - 273.15)))
end
function get_saturation_vapor_pressure(T :: Quantity)
return 6.112u"hPa"*exp(17.67 * (T-273.15u"K") / (243.5u"K" + (T - 273.15u"K")))
end
"""
get_partial_vapor_pressure(mixing_ratio,pressure)
Receive a water vapor mixing ratio (unitless g/g) and environmental pressure and compute the partial pressure of water vapor in the same units as the input pressure.
"""
function get_partial_vapor_pressure(mixing_ratio,pressure)
return mixing_ratio*pressure/(epsilon + mixing_ratio)
end
"""
get_mixing_ratio(water_vapor_partial_pressure,env_pressure)
Receive a water vapor mixing ratio (unitless g/g) and environmental pressure and compute the partial pressure of water vapor in the same units as the incoming pressure.
"""
function get_mixing_ratio(water_vapor_partial_pressure,env_pressure)
return epsilon*water_vapor_partial_pressure/(env_pressure - water_vapor_partial_pressure)
end
"""
get_specific_entropy(temperature,mixing_ratio,pressure)
Receive temperature in Kelvin, water vapor mixing ratio (unitless g/g) and pressure (hPa) and compute the specific entropy of a parcel using equation in Emmanuel's (E94, EQN. 4.5.9)
"""
function get_specific_entropy(temperature,mixing_ratio,pressure ; adjust_for_ice_phase = false)
# Adjust for ice phase is a modified value of liquid water specific heat capacity to compensate for the lack of explicit ice phase when lifting a parcel(Personal communication with Kerry Emanuel on April 22 2022 - Argel Ramirez Reyes
adjusted_cl = adjust_for_ice_phase ? Liquidwater.cp - 1690u"J/kg/K" : Liquidwater.cp
alv = Liquidwater.Lv + (Watervapor.cp - adjusted_cl)*(temperature - 273.15f0u"K")
vapor_pressure = get_partial_vapor_pressure(mixing_ratio,pressure)
saturation_vapor_pressure = get_saturation_vapor_pressure(temperature)
RH = min(vapor_pressure/saturation_vapor_pressure,1.0)
specific_entropy = (Dryair.cp + mixing_ratio * adjusted_cl) *
log(temperature/unit(temperature)) - Dryair.R * log((pressure - vapor_pressure)/unit(pressure)) +
alv * mixing_ratio / temperature - mixing_ratio * Watervapor.R * log(RH)
end
"""
get_lifted_condensation_level(temperature,relative_humidity,pressure)
Receive temperature in Kelvin, relative humidity (unitless) and pressure (hPa) and compute the lifted condensation level based on Emanuel's E94 "calcsound.f" code at http://texmex.mit.edu/pub/emanuel/BOOK/
"""
function get_lifted_condensation_level(temperature,relative_humidity,pressure)
return pressure * (relative_humidity^(temperature/(1669.0-122.0*relative_humidity-temperature)))
end
function get_lifted_condensation_level(temperature :: Quantity ,relative_humidity ,pressure :: Quantity)
return pressure * (relative_humidity^(temperature/(1669.0u"K"-122.0u"K"*relative_humidity-temperature)))
end
##### Specific to potential intensity
"""
"""
function ∂specific_entropy_∂temp(temperature, mixing_ratio)
∂specific_entropy_∂temp = (Dryair.cp + mixing_ratio * Liquidwater.cp)/temperature - Liquidwater.Lv * mixing_ratio / temperature^2
end
function ∂specific_entropy_∂temp_emanuel(temperature, mixing_ratio, pressure)
CL = Liquidwater.cp - 1690.0f0u"J/kg/K" # This is a modified value of liquid water specific heat capacity to compensate for the lack of explicit ice phase when lifting a parcel(Personal communication with Kerry Emanuel on April 22 2022 - Argel Ramirez Reyes
alv = Liquidwater.Lv + (Watervapor.cp - CL)*(temperature - 273.15u"K")
saturation_vapor_pressure = get_saturation_vapor_pressure(temperature)
saturation_mixing_ratio = get_mixing_ratio(saturation_vapor_pressure, pressure)
∂specific_entropy_∂temp = (Dryair.cp + mixing_ratio * Liquidwater.cp + alv^2 * saturation_mixing_ratio /(Watervapor.R*temperature^2))/temperature
end
"""
get_specific_entropy_emanuel(temperature,mixing_ratio,pressure)
Receive temperature in Kelvin, water vapor mixing ratio (unitless g/g) and pressure (hPa) and compute the specific entropy of a parcel using equation in Emmanuel's (E94, EQN. 4.5.9)
"""
function get_specific_entropy_emanuel(temperature,mixing_ratio,pressure)
CL = Liquidwater.cp - 1690.0f0u"J/kg/K" # This is a modified value of liquid water specific heat capacity to compensate for the lack of explicit ice phase (Personal communication with Kerry Emanuel
alv = Liquidwater.Lv + (Watervapor.cp - CL)*(temperature - 273.15u"K")
saturation_vapor_pressure = get_saturation_vapor_pressure(temperature)
#vapor_pressure = get_partial_vapor_pressure(saturation_vapor_pressure,pressure)
saturation_mixing_ratio = get_mixing_ratio(saturation_vapor_pressure, pressure)
specific_entropy = (Dryair.cp + mixing_ratio * CL) *
log(temperature/unit(temperature)) - Dryair.R * log((pressure - saturation_vapor_pressure)/unit(pressure)) +
alv * saturation_mixing_ratio / temperature
end | TropicalCyclonePotentialIntensity | https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl.git |
|
[
"MIT"
] | 0.2.0 | 3331d7ed85874e3e4d73a01980c08f31068997f6 | code | 10274 | """
get_buoyancy_of_lifted_parcel(tparcel <: Real,rparcel <: Real,pparcel <: Real,t :: Array{<: Real},r :: Array{<: Real},p :: Array{<: Real},ptop=50)
Get the buoyancy profile defined for each level `z` as the difference between the temperature of a parcel lifted from the level `p = pparcel` to the level `p(z)`.
`Buoyancy(z) = Tv_lifted(z) - Tv_env(z)`
This computes the lifting condensation level (LCL) to decide if the parcel should be lifted adiabatically or not. If not adiabatically, it uses the Newton-Raphson method to find the temperature at a level while conserving specific entropy.
`Buoyancy(z) = Tv_lifted(z) - Tv_env(z)`
"""
function get_buoyancy_of_lifted_parcel(tparcel, rparcel, pparcel, t, r, p, ptop=59*u"hPa")
n_valid_levels = findfirst(<(ptop),p)
p = p[begin:n_valid_levels]
t = t[begin:n_valid_levels]
r = r[begin:n_valid_levels]
tvirtual_diff_parcel_env = zero(t)
parcel_sat_vapor_pressure = get_saturation_vapor_pressure(tparcel)
parcel_vapor_pressure = get_partial_vapor_pressure(rparcel,pparcel)
parcel_rh = min(parcel_vapor_pressure/parcel_sat_vapor_pressure , 1.0)
parcel_specific_entropy = get_specific_entropy(tparcel,rparcel,pparcel; adjust_for_ice_phase = true)
parcel_lcl = get_lifted_condensation_level(tparcel,parcel_rh,pparcel)
levels_below_lcl = findall(>=(parcel_lcl),p)
levels_above_lcl = findall(pres -> ptop < pres < parcel_lcl,p)
for level in levels_below_lcl
tlifted = tparcel*(p[level]/pparcel)^(Dryair.R/Dryair.cp)
tvirtual_lifted = get_virtual_temperature(tlifted,rparcel,rparcel)
tvirtual_env = get_virtual_temperature(t[level],r[level],r[level])
tvirtual_diff_parcel_env[level] = tvirtual_lifted - tvirtual_env
end
for level in levels_above_lcl
initial_guess = t[level]
target_value = parcel_specific_entropy
tlifted = find_root_newton_raphson(temp -> get_specific_entropy_emanuel(temp,rparcel,p[level]), temp -> ∂specific_entropy_∂temp_emanuel(temp,rparcel, p[level]); target_value, initial_guess)
saturation_vapor_pressure_lifted = get_saturation_vapor_pressure(tlifted)
mixing_ratio_lifted = get_mixing_ratio(saturation_vapor_pressure_lifted,p[level])
tvirtual_lifted = get_virtual_temperature(tlifted, rparcel, mixing_ratio_lifted)
tvirtual_env = get_virtual_temperature(t[level],r[level], r[level])
tvirtual_diff_parcel_env[level] = tvirtual_lifted - tvirtual_env
end
return tvirtual_diff_parcel_env
end
function get_buoyancy_of_lifted_parcel(tparcel :: Real, rparcel :: Real ,pparcel :: Real, t :: Array{ <: Real} ,r :: Array{ <: Real},p :: Array{ <: Real}, ptop=59)
ustrip.(get_buoyancy_of_lifted_parcel(u"K"*tparcel , u"kg/kg" * rparcel ,u"hPa" * pparcel, u"K" .* t , u"kg/kg" .* r, u"hPa" .* p, u"hPa" * ptop ))
end
"""
get_potential_intensity_of_tropical_cyclone(sea_surface_temp <: Real,
sea_surface_pressure <: Real,
pressure :: Array{<: Real},
temperature :: Array{<: Real},
mixing_ratio :: Array{<: Real};
ckovercd = 0.9, reversible_ascent=1, dissipative_heating = true)
Compute the minimum pressure at the center and the maximum wind speed of a tropical cyclone using Emanuel's potential intensity theory.
"""
function get_potential_intensity_of_tropical_cyclone(sea_surface_temperature,sea_surface_pressure, pressure, temperature, mixing_ratio; ck_over_cd = 0.9, reversible_ascent=true, dissipative_heating = true, vreduc = 0.8)
initial_level_for_lifting = 1
exponent_central_pressure = 2.0
saturation_vapor_pressure_surface = get_saturation_vapor_pressure(sea_surface_temperature)
#
# *** Find environmental CAPE ***
#
tparcel=temperature[initial_level_for_lifting];
rparcel=mixing_ratio[initial_level_for_lifting]
pparcel=pressure[initial_level_for_lifting]
cape_env, ~ , index_level_of_neutral_buoyancy = get_cape_and_outflow_temp_from_sounding(tparcel,rparcel,pparcel,temperature,mixing_ratio,pressure)
pressure_at_rmax = 950.0u"hPa"
pressure_at_rmax_old = 0.0u"hPa"
saturation_cape_at_rmax = 0.0u"J/kg"
cape_at_rmax = 0.0u"J/kg"
average_virtual_temp = 0u"K"
temp_ratio = 0.0
niter = 1
mixing_ratio_lowest_level = mixing_ratio[initial_level_for_lifting]
temperature_lowest_level = temperature[initial_level_for_lifting]
virtual_temp_lowest_level = get_virtual_temperature(temperature_lowest_level, mixing_ratio_lowest_level, mixing_ratio_lowest_level)
#@show cape_env
while (abs(pressure_at_rmax_old-pressure_at_rmax)) > 0.2u"hPa" && niter < 200
#These three is where the iteration happens
pparcel_approx=min(pressure_at_rmax,1000.0u"hPa") #these two are the ones we are iterating over
rparcel_approx = ϵ*mixing_ratio_lowest_level*sea_surface_pressure / (pparcel_approx*(ϵ+mixing_ratio_lowest_level) - mixing_ratio_lowest_level*sea_surface_pressure) #what in the name of god is this? it is not documented
rparcel_sat=get_mixing_ratio(saturation_vapor_pressure_surface,pparcel_approx)
# *** Find CAPE at radius of maximum winds ***
cape_at_rmax, ~ , index_level_of_neutral_buoyancy = get_cape_and_outflow_temp_from_sounding(tparcel,rparcel_approx,pparcel_approx,temperature,mixing_ratio,pressure)
# *** Find saturation CAPE at radius of maximum winds ***
saturation_cape_at_rmax, temp_outflow, index_lnb =
get_cape_and_outflow_temp_from_sounding(sea_surface_temperature,
get_mixing_ratio(saturation_vapor_pressure_surface,pparcel_approx),
pparcel_approx,
temperature,
mixing_ratio,
pressure)
# *** Estimate of pressure at radius of maximum winds ***
virtual_temp_parcel_sst=get_virtual_temperature(sea_surface_temperature, rparcel_sat, rparcel_sat)
average_virtual_temp = 0.5 * (virtual_temp_lowest_level + virtual_temp_parcel_sst)
temp_ratio = dissipative_heating ? sea_surface_temperature/temp_outflow : 1.0
CAT=cape_at_rmax-cape_env + 0.5 * ck_over_cd * temp_ratio *(saturation_cape_at_rmax - cape_at_rmax)
CAT=max(CAT,0.0u"J/kg")
pressure_at_rmax_old = pressure_at_rmax
pressure_at_rmax = sea_surface_pressure*exp(-CAT / (Dryair.R * average_virtual_temp) )
niter = niter + 1
end
reduction_factor=0.5(1.0 + 1.0/exponent_central_pressure)
CAT=(cape_at_rmax-cape_env)+reduction_factor*ck_over_cd*temp_ratio*(saturation_cape_at_rmax-cape_at_rmax)
#@info CAT
CAT=max(CAT,0.0u"J/kg")
# Calculate the minimum pressure at the eye of the storm
# BE02 EQN. 4
min_pressure_at_center = sea_surface_pressure*exp(-CAT/(Dryair.R*average_virtual_temp))
# Calculate the potential intensity at the radius of maximum winds
# BE02 EQN. 3, reduced by some fraction (default 20%) to account for the reduction
# of 10-m winds from gradient wind speeds (Emanuel 2000, Powell 1980)
vmax=vreduc*sqrt(ck_over_cd*temp_ratio*(saturation_cape_at_rmax-cape_at_rmax))
return min_pressure_at_center, vmax
end
function get_potential_intensity_of_tropical_cyclone(sea_surface_temperature :: Real,sea_surface_pressure :: Real, pressure :: Array{<: Real}, temperature :: Array{<: Real}, mixing_ratio :: Array{<: Real}; ck_over_cd = 0.9, reversible_ascent=true, dissipative_heating = true, vreduc = 0.8)
return ustrip.(get_potential_intensity_of_tropical_cyclone(u"K" * sea_surface_temperature, u"hPa" * sea_surface_pressure, u"hPa" .* pressure, u"K" .* temperature, u"kg/kg" .* mixing_ratio; ck_over_cd, reversible_ascent, dissipative_heating, vreduc))
end
"""
get_cape_and_outflow_temp_from_sounding(tparcel :: Real,
rparcel :: Real,
pparcel :: Real,
t Array{<: Real},
r Array{<: Real},
p Array{<: Real}, ptop=50u"hPa")
Compute cape, outflow temperature and index of neutral buoyancy from thermodynamic profiles.
"""
function get_cape_and_outflow_temp_from_sounding(tparcel, rparcel, pparcel, t, r, p, ptop=59u"hPa")
buoyancy_profile = get_buoyancy_of_lifted_parcel(tparcel,rparcel,pparcel,t,r,p,ptop)
negative_area=0.0*unit(buoyancy_profile[1]*Dryair.R)
positive_area=0.0*unit(buoyancy_profile[1]*Dryair.R)
level_neutral_buoyancy = searchsortedlast(buoyancy_profile, 0.0u"K"; rev=true)
for level in 2:(level_neutral_buoyancy + 1)
area=Dryair.R*(buoyancy_profile[level]+buoyancy_profile[level-1])*(p[level-1]-p[level])/(p[level]+p[level-1])
positive_area += max(area,0.0u"J/kg")
negative_area -= min(area,0.0u"J/kg")
end
outflow_temp = t[level_neutral_buoyancy]
#Add buoyancy of parcel with respect to first level? (lcl may not be a level in the profile)
parcel_buoyancy_area = Dryair.R *(pparcel - p[1])/(pparcel + p[1])
positive_area += parcel_buoyancy_area*max(buoyancy_profile[1],0.0*unit(buoyancy_profile[1]))
negative_area -= parcel_buoyancy_area*min(buoyancy_profile[1],0.0*unit(buoyancy_profile[1]))
#Add residual above inb and t0
## This is unsafe, need to check for bounds
pres_neutral_buoyancy = (p[level_neutral_buoyancy + 1]*buoyancy_profile[level_neutral_buoyancy] - p[level_neutral_buoyancy]*buoyancy_profile[level_neutral_buoyancy + 1]) / (buoyancy_profile[level_neutral_buoyancy] - buoyancy_profile[level_neutral_buoyancy + 1])
residual_area = Dryair.R * buoyancy_profile[level_neutral_buoyancy]*(p[level_neutral_buoyancy] - pres_neutral_buoyancy)/(p[level_neutral_buoyancy] + pres_neutral_buoyancy)
outflow_temp = (outflow_temp * (pres_neutral_buoyancy - p[level_neutral_buoyancy + 1]) + t[level_neutral_buoyancy + 1] * (p[level_neutral_buoyancy] - pres_neutral_buoyancy)) / (p[level_neutral_buoyancy] - p[level_neutral_buoyancy + 1])
return positive_area - negative_area + residual_area, outflow_temp, level_neutral_buoyancy
end
function get_cape_and_outflow_temp_from_sounding(tparcel :: Real,rparcel :: Real,pparcel :: Real,t :: Array{<: Real},r :: Array{<: Real},p :: Array{<: Real},ptop=59)
ustrip.(get_cape_and_outflow_temp_from_sounding(u"K" * tparcel,u"kg/kg" * rparcel,u"hPa" * pparcel, u"K" .* t, u"kg/kg" .*r,u"hPa" .* p,u"hPa" * ptop))
end | TropicalCyclonePotentialIntensity | https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl.git |
|
[
"MIT"
] | 0.2.0 | 3331d7ed85874e3e4d73a01980c08f31068997f6 | code | 1149 | """
approximate_t_newton_raphson(tguess,penv,target_entropy)
Given a parcel's total entropy s_parcel and a pressure level P, use the Newton-Raphson method to find T⋆ such that s(T⋆,P) = s_parcel. It assumes reversible moist process.
Total entropy of the parcel before condensation:
s = (cpd + rt*cl)ln(T) - Rd*ln(pd) + Lv*r/T - r*Rvln(H) : rt is total water (vapor plus liquid)
"""
function find_root_newton_raphson(func, func_derivative; target_value = 0.0, initial_guess = 0.0, atol = 0.001*unit(initial_guess), max_iter = 500)
niter = 0
initial_err = atol + 5unit(initial_guess)
approximation = initial_guess
err = initial_err
step_size = 1.0
while (abs(err) > 0.001 * unit(initial_guess))
niter += 1
step_size = niter < 3 ? 0.3 : 1.0
approximation = approximation - step_size*(func(approximation) - target_value)/func_derivative(approximation)
err = step_size*(func(approximation) - target_value)/func_derivative(approximation)
if (niter > max_iter )
error("Function didn't converge after $max_iter iterations")
end
end
return approximation
end
| TropicalCyclonePotentialIntensity | https://github.com/aramirezreyes/TropicalCyclonePotentialIntensity.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.