licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.1 | e819b0d814ad86f0b6edbd52278fe6f5660c7fec | code | 215 | include("../../src/IpyJL.jl")
using Main.IpyJL
using Test
# Doesn't properly test now, ideally this will
# read the file after and test output, but I am lazy.
ipynbjl("ipynbtestbook.ipynb", "example.jl")
| IPyCells | https://github.com/ChifiSource/IPyCells.jl.git |
|
[
"MIT"
] | 0.1.1 | e819b0d814ad86f0b6edbd52278fe6f5660c7fec | docs | 2205 | <div align="center" style = "box-pack: start;">
</br>
<img width = 300 src="https://github.com/ChifiSource/image_dump/blob/main/ipyjl/logo.png" >
[](https://juliahub.com/ui/Packages/Lathe/6rMNJ)
[](https://juliahub.com/ui/Packages/Lathe/6rMNJ?t=2)
[](https://juliahub.com/ui/Packages/Lathe/6rMNJ)
</br>
</br>
<h1>IPyCells</h1>
</div>
`IPyCells` provides parametric cell-based functionality, as well as readers and writers for different cell formats (Ipynb, JL), as well as offering the option to extend the cells via parametric typing. This module provides
###### cells
- `AbstractCell`
- `Cell(n::Int64, type::String, content::String, outputs::Any = ""; id::String = "")`
- `string(Cell{<:Any})`
- `string(cell::Cell{:md})`
- `string(cell::Cell{:doc})`
- `getindex(v::Vector{Cell{<:Any}}, s::String)`
###### read/write
- `read_plto(uri::String)`
- `read_jlcells(uri::String)`
- `read_jl(uri::String)`
- `save(cells::Vector{<:AbstractCell}, path::String)`
- `save_ipynb(cells::Vector{<:AbstractCell}, path::String)` (this **does not** work just right yet) cells are readable by Olive, not jupyter post-save.
- `read_ipynb(f::String)`
- `ipyjl(ipynb_path::String, output_path::String)`
###### (internal)
- `plto_cell_lines(uri::String)`
- `sep(content::Any)`
### Adding
```julia
julia> ]
pkg> add IPyCells
```
### Usage
There are many ways to use `IPyCells` -- This package could be used to convert Pluto notebooks into Olive notebooks, IPython notebooks into Julia notebooks. Currently, the ipynb save method will break your `.ipynb` files where IJulia cannot read them, [Olive](https://github.com/ChifiSource/Olive.jl) eventually this is planned to be fixed. Anyway, this package could be used to read any package and save it into (currently) ipynb or julia.
```julia
ipynbjl("ipynbtestbook.ipynb", "example.jl")
```
```julia
cells = read_ipynb
save_jl(cells)
```
This preserves both the output and markdown. Alternatively, you could write functions around cells enabling for different cell types to be read by this reader.
| IPyCells | https://github.com/ChifiSource/IPyCells.jl.git |
|
[
"MIT"
] | 0.1.3 | 483e5daedd6b57d74f951bab69f3cb68948fed55 | code | 760 | using SimpleDrawing, Plots, Clines
#resize_gr_window()
C = Circle(0, 0, 15)
D = Circle(23, 0, 8)
newdraw()
draw(C, linewidth = 2);
draw(D, linewidth = 2);
x = 15 * cos(pi / 6)
y = 15 * sin(pi / 6)
draw_vector(x, y, linecolor = :black)
annotate!(x / 2, y / 2 + 1, "a")
x = 8 * cos(pi / 6)
y = 8 * sin(pi / 6)
draw_vector(x, y, 23, 0, color = :black)
annotate!(x + 19, y / 2 + 0.7, "b")
#draw_point(23+offset,0,marker=4,color=:red,linecolor=:white)
offset = 6
draw(Circle(23 + offset + 0.2, 0, 0.2), fill = true, linecolor = :red, color = :red)
draw_vector(offset, 0, 23, 0, linecolor = :red)
annotate!(26, -0.5, "offset", annotationfontsize = 10)
draw_point(0, 0, color = :black, marker = 4)
draw_point(23, 0, color = :black, marker = 4)
finish()
| Spirograph | https://github.com/scheinerman/Spirograph.jl.git |
|
[
"MIT"
] | 0.1.3 | 483e5daedd6b57d74f951bab69f3cb68948fed55 | code | 1354 | using SimpleDrawing, Plots, ProgressMeter
"""
mechanism(a,b,offset,t)
Draw a picture of the spirograph mechanism. Draw with `spirograph` first, then follow
with `mechanism` using the same parameters.
"""
function mechanism(a::Int, b::Int, offset::Real, t::Real = 0)
draw_circle(0, 0, a, linecolor = :black)
θ = Spirograph.inner_angle(a, b, t)
x = a + b
z = (a + b) * exp(im * θ) * im
draw_circle(z, abs(b), linecolor = :black)
pen = Spirograph.spot(a, b, offset, t)
draw_segment(z,pen,color=:black)
draw_point(pen, color = :red, linecolor = :red, marker = 3, linewidth = 0)
finish()
end
function spiro_movie(a, b, offset, step = Spirograph._DEFAULT_STEP)
zvals = Spirograph.spiro_points(a, b, offset, step)
T = Spirograph.stop_t(a, b)
tvals = collect(0:step:T)
npts = length(zvals)
x = real(zvals)
y = imag(zvals)
bound = 0
if b>0
bound = a+b+max(abs(offset), b)
else
bound = a + max(abs(offset)-abs(b), 0)
end
PM = Progress(npts)
movie = @animate for j = 1:npts
newdraw()
draw_rectangle(-bound, -bound, bound, bound, linecolor = :white)
xx = x[1:j]
yy = y[1:j]
plot!(xx, yy, linecolor = :blue)
mechanism(a, b, offset, tvals[j])
next!(PM)
finish()
end
return movie
end | Spirograph | https://github.com/scheinerman/Spirograph.jl.git |
|
[
"MIT"
] | 0.1.3 | 483e5daedd6b57d74f951bab69f3cb68948fed55 | code | 1665 | module Spirograph
using Plots
function lastly()
plot!(aspectratio = 1, legend = false, axis = false, grid = false, ticks = false)
end
@inline inner_angle(a::Int, b::Int, t::Real)::Float64 = b * t / a
@inline function spot(a::Int, b::Int, offset::Real, t::Real)::Complex{Float64}
θ = inner_angle(a, b, t)
return im * ((a + b) * exp(θ * im) + offset * exp(t * im))
end
@inline stop_t(a::Int, b::Int) = 2 * π * (lcm(a, b) ÷ abs(b))
function spiro_points(a::Int, b::Int, offset::Real, step::Real)
if a <= 0 || b == 0
error(
"Radius of fixed circle must be positive and radius of the rolling disk must be nonzero",
)
end
T = stop_t(a, b) #+ step
return [spot(a, b, offset, t) for t = 0:step:T]
end
_DEFAULT_STEP = 0.05
"""
spirograph(a,b,offset;args...)
Draw a spirograph picture where
+ `a` is the radius of the fixed wheel,
+ `b` is the radius of the moving wheel, and
+ `offset` is the distance from the center of the moving wheel to the pen.
The optional `args` are passed to `plot`.
## Example
`spirograph(20,-9,12,linecolor=:green)`
"""
function spirograph(a::Int, b::Int, offset::Real, step::Real = _DEFAULT_STEP; args...)
pts = spiro_points(a, b, offset, step)
plot(real(pts), imag(pts); args...)
lastly()
end
"""
spirograph!
Same as `spirograph` but does not clear the drawing screen first. This way spirograph
images can be combined.
"""
function spirograph!(a::Int, b::Int, offset::Real, step::Real = _DEFAULT_STEP; args...)
pts = spiro_points(a, b, offset, step)
plot!(real(pts), imag(pts); args...)
lastly()
end
export spirograph, spirograph!
end # module
| Spirograph | https://github.com/scheinerman/Spirograph.jl.git |
|
[
"MIT"
] | 0.1.3 | 483e5daedd6b57d74f951bab69f3cb68948fed55 | code | 61 | using Spirograph
using Test
spirograph(10, 3, 5)
@test true
| Spirograph | https://github.com/scheinerman/Spirograph.jl.git |
|
[
"MIT"
] | 0.1.3 | 483e5daedd6b57d74f951bab69f3cb68948fed55 | docs | 1822 | # Spirograph
Julia implementation of the classic [Spirograph](https://en.wikipedia.org/wiki/Spirograph) toy.
## Creating Spirograph Drawings
To make a spirograph drawing, use the function
```
spirograph(a,b,offset,args...)
```
where
+ `a` is the radius of the fixed wheel,
+ `b` is the radius of the moving wheel, and
+ `offset` is the distance from the center of the moving wheel to the pen location.
Here `a` and `b` are integers. The argument `b` may be negative, in which case the moving wheel glides along the inside of the fixed wheel. This is shown in the following diagram in which the red dot shows the pen location.

See also [this diagram](diagram2.pdf) that shows the spirograph with `a=14`, `b=-5`, and `offset=4` overlayed with a picture of the two wheels with a red dot for the pen location. (This picture was created using the `mechanism` function defined in the file `diagram/mechanism.jl`.) See also [this animation](animation.gif).
### Drawing Options
The optional `args` can be used to change aspects of the plot.
For example:
```
spirograph(25,11,18,linecolor=:red)
```
produces this image:

### Combining Drawings
We also provide the function `spirograph!` that operates exactly like `spirograph` but does not first clear the drawing window. In this way, multiple spirograph images can be combined.
```
julia> spirograph(40,-11,18,linecolor=:red)
julia> spirograph!(40,-11,16,linecolor=:blue)
julia> spirograph!(40,-11,14,linecolor=:green)
```

## Saving
To save an image, use the `savefig` function from [Plots](http://docs.juliaplots.org/latest/). For example:
```
julia> using Spirograph, Plots
julia> spirograph(80,-33,45,linewidth=0.5)
julia> savefig("my-spiro.pdf")
```
[Click here](my-spiro.pdf) to see the result. | Spirograph | https://github.com/scheinerman/Spirograph.jl.git |
|
[
"MIT"
] | 1.1.0 | d039f3ef7481270be1584a50edea804ae3c83953 | code | 31839 | module StructUtils
using Dates, UUIDs
export @noarg, @defaults, @tags, Selectors
"""
StructUtils.StructStyle
Abstract type that all concrete struct styles must subtype.
Custom struct styles allow fine-grained control over various
StructUtils.jl interface methods like `fieldtags`, `fielddefaults`,
`lift`, `lower`, etc.
"""
abstract type StructStyle end
"""
StructUtils.DefaultStyle
Default struct style that all StructUtils.jl interface methods
are defined for by default.
"""
struct DefaultStyle <: StructStyle end
include("macros.jl")
"""
StructUtils.dictlike(::Type{T}) -> Bool
StructUtils.dictlike(::StructStyle, ::Type{T}) -> Bool
Returns `true` if `T` is a dictionary-like type, `false` otherwise.
When `StructUtils.make(T, source)` is called, if `dictlike(T)` is `true`,
an instance will be `initialize`d, and then `addkeyval!`ed for each
key-value pair in `source`.
"""
function dictlike end
dictlike(::Type{<:AbstractDict}) = true
dictlike(::Type{<:AbstractVector{<:Pair}}) = true
dictlike(_) = false
dictlike(_, ::Type{T}) where {T} = dictlike(T)
dictlike(st, ::T) where {T} = dictlike(st, T)
"""
StructUtils.noarg(::Type{T}) -> Bool
StructUtils.noarg(::StructStyle, ::Type{T}) -> Bool
Signals that `T` is a mutable type that can be constructed by calling an empty
constructor, like `t = T()`. Automatically overloaded when structs use the
`@noarg` macro in their struct definition. The default value is `false` unless
explicitly overloaded.
"""
function noarg end
noarg(_) = false
noarg(_, ::Type{T}) where {T} = noarg(T)
noarg(st, ::T) where {T} = noarg(st, T)
"""
StructUtils.kwdef(::Type{T}) -> Bool
StructUtils.kwdef(::StructStyle, ::Type{T}) -> Bool
Signals that `T` can be constructed by passing struct fields as keyword arguments
to the constructor, like `t = T(field1=a, field2=b, ...)`. Automatically overloaded
when structs use the `@kwdef` macro in their struct definition. The default value
is `false` unless explicitly overloaded.
Note that `StructUtils.@kwdef` currently has no relation to `Base.@kwdef`, yet should
be a drop-in replacement for it.
"""
function kwdef end
kwdef(_) = false
kwdef(_, ::Type{T}) where {T} = kwdef(T)
kwdef(st, ::T) where {T} = kwdef(st, T)
"""
StructUtils.fieldtagkey(::Type{<:StructStyle}) -> Symbol
Field tags defined on struct fields can be grouped by keys that are associated with
a particular struct style. This function returns the key that should be used to
retrieve field tags for a given struct style. By default, this function returns
`nothing`. An example overload might look like:
```julia
struct MySQLStyle <: StructStyle end
StructUtils.fieldtagkey(::Type{MySQLStyle}) = :mysql
@tags struct Foo
a::Int &(mysql=(name="foo_a",),)
b::String
end
```
In this example, when `StructUtils.make` is called on `Foo` with the `MySQLStyle` style,
only `(name="foo_a",)` will be retrieved from the field tags for `a` because the
`mysql` key is associated with the `MySQLStyle` struct style.
"""
function fieldtagkey end
fieldtagkey(::Type{T}) where {T} = nothing
"""
StructUtils.fieldtags(::Type{T}) -> NamedTuple
StructUtils.fieldtags(::StructStyle, ::Type{T}) -> NamedTuple
StructUtils.fieldtags(::StructStyle, ::Type{T}, fieldname) -> NamedTuple
Returns a `NamedTuple` of field tags for the struct `T`. Field tags can be
added manually by overloading `fieldtags`, or included via convenient syntax
using the StructUtils.jl macros: `@tags`, `@noarg`, `@defaults`, or `@kwdef`.
"""
function fieldtags end
fieldtags(::Type{T}) where {T} = (;)
fieldtags(_, ::Type{T}) where {T} = fieldtags(T)
fieldtags(::Type{T}, field) where {T} = get(() -> (;), fieldtags(T), field)
function fieldtags(st::StructStyle, ::Type{T}, field) where {T}
ft = fieldtags(st, T)
fft = get(() -> (;), ft, field)
ftk = fieldtagkey(typeof(st))
return ftk === nothing ? fft : get(() -> (;), fft, ftk)
end
"""
StructUtils.fielddefaults(::Type{T}) -> NamedTuple
StructUtils.fielddefaults(::StructStyle, ::Type{T}) -> NamedTuple
StructUtils.fielddefault(::StructStyle, ::Type{T}, fieldname) -> NamedTuple
Returns a `NamedTuple` of field defaults for the struct `T`. Field defaults can be
added manually by overloading `fielddefaults`, or included via convenient syntax
using the StructUtils.jl macros: `@tags`, `@noarg`, `@defaults`, or `@kwdef`.
"""
function fielddefaults end
fielddefaults(::Type{T}) where {T} = (;)
fielddefaults(_, ::Type{T}) where {T} = fielddefaults(T)
fielddefault(::Type{T}, key) where {T} = haskey(fielddefaults(T), key) ? fielddefaults(T)[key] : nothing
fielddefault(st, ::Type{T}, key) where {T} = haskey(fielddefaults(st, T), key) ? fielddefaults(st, T)[key] : nothing
@doc (@doc fielddefaults) fielddefault
"""
StructUtils.initialize(T) -> T
StructUtils.initialize(T, dims) -> T
StructUtils.initialize(::StructStyle, T) -> T
StructUtils.initialize(::StructStyle, T, dims) -> T
In `StructUtils.make`, this function is called to initialize a new instance of `T`,
when `T` is `dictlike`, `arraylike`, or `noarg`. For `arraylike`, if the `source`
in `make` is discovered to have multiple dimensions, `dims` will be passed to
`initialize`. The default implementation of `initialize` is to call `T()` or `T(undef, dims...)`
for `<:AbstractArray` types.
"""
function initialize end
initialize(::Type{T}) where {T} = T()
initialize(AT::Type{<:AbstractArray{T,0}}) where {T} = AT(undef)
initialize(::Type{T}) where {T<:AbstractVector} = T(undef, 0)
initialize(::Type{T}, dims) where {T<:AbstractArray} = T(undef, dims)
initialize(_, ::Type{T}) where {T} = initialize(T)
initialize(_, ::Type{T}, dims) where {T} = initialize(T, dims)
"""
StructUtils.addkeyval!(d, k, v)
Add a key-value pair to a dictionary-like object `d`. This function is called
by `StructUtils.make` when `d` is `dictlike`. The default implementation is to
call `d[k] = v` for `AbstractDict`.
"""
function addkeyval! end
addkeyval!(d::AbstractDict, k, v) = d[k] = v
addkeyval!(d::AbstractVector, k, v) = push!(d, k => v)
_keytype(d) = keytype(d)
_keytype(::AbstractVector{Pair{A,B}}) where {A,B} = A
_valtype(d) = valtype(d)
_valtype(::AbstractVector{Pair{A,B}}) where {A,B} = B
"""
StructUtils.arraylike(::Type{T}) -> Bool
StructUtils.arraylike(::StructStyle, ::Type{T}) -> Bool
Returns `true` if `T` is an array-like type, `false` otherwise. This function is
called by `StructUtils.make` to determine if `T` is array-like. The default
implementation returns `true` for `<:AbstractArray`, `<:AbstractSet`, `<:Tuple`,
`<:Base.Generator`, and `<:Core.SimpleVector` types, and `false` for `<:AbstractArray{T,0}`.
Once `initialize` is called, `StructUtils.make` will call `push!` to add values
to the array-like object.
"""
function arraylike end
arraylike(::Type{T}) where {T} = false
arraylike(::T) where {T} = arraylike(T)
arraylike(_, ::Type{T}) where {T} = arraylike(T)
arraylike(_, ::T) where {T} = arraylike(T)
arraylike(::Type{<:Union{AbstractArray,AbstractSet,Tuple,Base.Generator,Core.SimpleVector}}) = true
arraylike(::Type{<:AbstractArray{T,0}}) where {T} = false
"""
StructUtils.structlike(::Type{T}) -> Bool
StructUtils.structlike(::StructStyle, ::Type{T}) -> Bool
Returns `true` if `T` is a struct-like type, `false` otherwise. This function is
called by `StructUtils.make` to determine if `T` is struct-like. The default
implementation returns `true` for `isstructtype(T)` and `!Base.issingletontype(T)`.
`structlike` structs are expected to be able to be constructed by the default constructor
like `T(field1, field2, ...)`.
Due to how `StructUtils.make` works, `structlike` is often overloaded to `false` by "unit" types
where fields should be considered private the `make` process should instead attempt to
`lift` the `source` object into the `unit` type.
"""
function structlike end
structlike(::Type{T}) where {T} = isstructtype(T) && !Base.issingletontype(T)
structlike(_, ::Type{T}) where {T} = structlike(T)
structlike(::T) where {T} = structlike(T)
structlike(_, ::T) where {T} = structlike(T)
structlike(::Type{<:Dates.TimeType}) = false
structlike(::Type{<:AbstractString}) = false
structlike(::Type{Symbol}) = false
structlike(::Type{<:Number}) = false
structlike(::Type{<:AbstractChar}) = false
structlike(::Type{Nothing}) = false
structlike(::Type{Missing}) = false
structlike(::Type{UUID}) = false
structlike(::Type{VersionNumber}) = false
structlike(::Type{Regex}) = false
structlike(::Type{<:AbstractArray{T,0}}) where {T} = false
structlike(::Module) = false
structlike(::Function) = false
"""
StructUtils.nulllike(T) -> Bool
StructUtils.nulllike(::StructStyle, T) -> Bool
Returns `true` if `T` is a null-like type, `false` otherwise. This function is
mainly used in the default `choosetype` implementation to determine if a
`Union` type can be narrowed by excluding `nulllike` types like `Nothing` and `Missing`.
"""
function nulllike end
nulllike(::Type{T}) where {T} = false
nulllike(::T) where {T} = nulllike(T)
nulllike(_, ::Type{T}) where {T} = nulllike(T)
nulllike(_, ::T) where {T} = nulllike(T)
nulllike(::Type{Nothing}) = true
nulllike(::Type{Missing}) = true
"""
StructUtils.lower(x) -> x
StructUtils.lower(::StructStyle, x) -> x
Domain value transformation function. This function is called by
`StructUtils.applyeach` on each value in the `source` object before
calling the apply function. By default, `lower` is the identity function.
This allows a domain transformation of values according to the
style used.
"""
function lower end
lower(x) = x
lower(::StructStyle, x) = lower(x)
function lower(st::StructStyle, x, tags)
if x isa Dates.TimeType && haskey(tags, :dateformat)
return Dates.format(x, tags.dateformat)
elseif haskey(tags, :lower)
return tags.lower(x)
else
return lower(st, x)
end
end
"""
StructUtils.lift(::Type{T}, x) -> T
StructUtils.lift(::StructStyle, ::Type{T}, x) -> T
Lifts a value `x` to a type `T`. This function is called by `StructUtils.make`
to lift values to the appropriate type. The default implementation is
the identity function for most types, but it also includes special cases
for `Symbol`, `Char`, `UUID`, `VersionNumber`, `Regex`, and `TimeType` types.
Allows transforming a "domain value" that may be some primitive representation
into a more complex Julia type.
"""
function lift end
lift(::Type{Symbol}, x) = Symbol(x)
lift(::Type{T}, x) where {T} = Base.issingletontype(T) ? T() : convert(T, x)
lift(::Type{>:Missing}, ::Nothing) = missing
lift(::Type{>:Nothing}, ::Nothing) = nothing
lift(::Type{>:Union{Missing,Nothing}}, ::Nothing) = nothing
lift(::Type{>:Union{Missing,Nothing}}, ::Missing) = missing
lift(::Type{Char}, x::AbstractString) = length(x) == 1 ? x[1] : throw(ArgumentError("expected single character, got $x"))
lift(::Type{UUID}, x::AbstractString) = UUID(x)
lift(::Type{VersionNumber}, x::AbstractString) = VersionNumber(x)
lift(::Type{Regex}, x::AbstractString) = Regex(x)
lift(::Type{T}, x::AbstractString) where {T<:Dates.TimeType} = T(x)
function lift(::Type{T}, x, tags) where {T<:Dates.TimeType}
if haskey(tags, :dateformat)
return T(x, tags.dateformat)
else
return T(x)
end
end
# bit of an odd case, but support 0-dimensional array lifting from scalar value
function lift(::Type{A}, x) where {A<:AbstractArray{T,0}} where {T}
m = A(undef)
m[1] = lift(T, x)
return m
end
@inline lift(::Type{T}, x, _) where {T} = lift(T, x)
@inline lift(::StructStyle, ::Type{T}, x) where {T} = lift(T, x)
@inline function lift(st::StructStyle, ::Type{T}, x, tags) where {T}
if haskey(tags, :lift)
return tags.lift(x)
elseif !isempty(tags)
return lift(T, x, tags)
else
return lift(st, T, x)
end
end
@inline lift(f::F, st::StructStyle, ::Type{T}, x, tags) where {F,T} = f(lift(st, T, x, tags))
"""
StructUtils.choosetype(::Type{T}, x) -> T
StructUtils.choosetype(::StructStyle, ::Type{T}, x) -> T
Chooses a concrete type from an abstract or union type `T` based on the value `x`, where
`x` is the "source" object in the context of `StructUtils.make`.
This allows a runtime decision to be made around a concrete subtype
that should be used for `StructUtils.make` based on potentially dynamic values
of the source object.
"""
function choosetype end
choosetype(::Type{T}, x) where {T} = (T >: Missing && !nulllike(x)) ? nonmissingtype(T) :
(T >: Nothing && !nulllike(x)) ? Base.nonnothingtype(T) : T
@inline choosetype(::Type{T}, x, tags) where {T} = choosetype(T, x)
@inline choosetype(::StructStyle, ::Type{T}, x) where {T} = choosetype(T, x)
@inline function choosetype(st::StructStyle, ::Type{T}, x, tags) where {T}
if haskey(tags, :choosetype)
return tags.choosetype(x)
elseif !isempty(tags)
return choosetype(T, x, tags)
else
return choosetype(st, T, x)
end
end
@inline choosetype(f, style::StructStyle, ::Type{T}, x, tags) where {T} = f(style, choosetype(style, T, x, tags), x, tags)
"""
StructUtils.applyeach(style, f, x) -> Union{StructUtils.EarlyReturn, Nothing}
A custom `foreach`-like function that operates specifically on `(key, val)` or `(ind, val)` pairs,
and supports short-circuiting (via `StructUtils.EarlyReturn`). It also supports a `StructStyle` argument
to allow for style-specific behavior for non-owned types.
For each key-value or index-value pair in `x`, call `f(k, v)`.
If `f` returns a `StructUtils.EarlyReturn` instance, `applyeach` should
return the `EarlyReturn` immediately and stop iterating (i.e. short-circuit).
Otherwise, the return value of `f` is ignored and iteration continues.
Key types are generally expected to be Symbols, Strings, or Integers.
An example overload of `applyeach` for a generic iterable would be:
```julia
function StructUtils.applyeach(style::StructUtils.StructStyle, f, x::MyIterable)
for (i, v) in enumerate(x)
ret = f(i, StructUtils.lower(style, v))
# if `f` returns EarlyReturn, return immediately
ret isa StructUtils.EarlyReturn && return ret
end
return
end
```
Note that `applyeach` must include the `style` argument when overloading,
even though it can be _called_ with out it, where the `DefaultStyle` will be used.
Also note that before applying `f`, the value `v` is passed through `StructUtils.lower(style, v)`.
If a value is `#undef` or otherwise not defined, the `f` function should be called with `nothing`.
"""
function applyeach end
"""
StructUtils.EarlyReturn{T}
A wrapper type that can be used in function arguments to `applyeach`
to short-circuit iteration and return a value from `applyeach`.
Example usage:
```julia
function find_needle_in_haystack(haystack, needle)
ret = applyeach(haystack) do k, v
k == needle && return StructUtils.EarlyReturn(v)
end
ret isa StructUtils.EarlyReturn && return ret.value
throw(ArgumentError("needle not found in haystack")
end
````
"""
struct EarlyReturn{T}
value::T
end
@inline applyeach(f, x) = applyeach(DefaultStyle(), f, x)
@inline applyeach(f, st::StructStyle, x) = applyeach(st, f, x)
@inline function applyeach(st::StructStyle, f, x::AbstractArray)
for i in eachindex(x)
ret = if @inbounds(isassigned(x, i))
f(i, lower(st, @inbounds(x[i])))
else
f(i, lower(st, nothing))
end
ret isa EarlyReturn && return ret
end
return length(x)
end
# special-case Pair vectors to act like Dicts
@inline function applyeach(st::StructStyle, f, x::AbstractVector{Pair{K,V}}) where {K,V}
for (k, v) in x
ret = f(k, lower(st, v))
ret isa EarlyReturn && return ret
end
return
end
# appropriate definition for iterables that
# can't have #undef values
@inline function applyeach(st::StructStyle, f, x::Union{AbstractSet,Base.Generator,Core.SimpleVector})
for (i, v) in enumerate(x)
ret = f(i, lower(st, v))
ret isa EarlyReturn && return ret
end
return
end
# generic definition for Tuple, NamedTuple, structs
function applyeach(st::StructStyle, f, x::T) where {T}
if @generated
N = fieldcount(T)
ex = quote
Base.@_inline_meta
defs = fielddefaults(st, T)
end
for i = 1:N
fname = Meta.quot(fieldname(T, i))
push!(ex.args, quote
ftags = fieldtags(st, T, $fname)
if !haskey(ftags, :ignore) || !ftags.ignore
fname = get(ftags, :name, $fname)
ret = if isdefined(x, $i)
f(fname, lower(st, getfield(x, $i), ftags))
elseif haskey(defs, $fname)
# this branch should be really rare because we should
# have applied a field default in the struct constructor
f(fname, lower(st, defs[$fname], ftags))
else
f(fname, lower(st, nothing, ftags))
end
ret isa EarlyReturn && return ret
end
end)
end
push!(ex.args, :(return))
return ex
else
defs = fielddefaults(st, T)
for i = 1:fieldcount(T)
fname = fieldname(T, i)
ftags = fieldtags(st, T, fname)
if !haskey(ftags, :ignore) || !ftags.ignore
fname = get(ftags, :name, fname)
ret = if isdefined(x, i)
f(fname, lower(st, getfield(x, i), ftags))
elseif haskey(defs, fname)
f(fname, lower(st, defs[fname], ftags))
else
f(fname, lower(st, nothing, ftags))
end
ret isa EarlyReturn && return ret
end
end
return
end
end
@inline function applyeach(st::StructStyle, f, x::AbstractDict)
for (k, v) in x
ret = f(k, lower(st, v))
ret isa EarlyReturn && return ret
end
return
end
struct KeyValStructClosure{T,S,V}
style::S
val::V # either actual T for noarg or Tuple of Refs otherwise and tuples
i::Union{Base.RefValue{Int},Nothing}
end
KeyValStructClosure{T}(style::S, val::V) where {T,S,V} = KeyValStructClosure{T,S,V}(style, val, T <: Tuple ? Ref(0) : nothing)
struct NoArgFieldRef{T}
val::T
i::Int
end
@static if VERSION < v"1.10"
function _isfieldatomic(@nospecialize(t::Type), s::Int)
t = Base.unwrap_unionall(t)
# TODO: what to do for `Union`?
isa(t, DataType) || return false # uncertain
ismutabletype(t) || return false # immutable structs are never atomic
1 <= s <= length(t.name.names) || return false # OOB reads are not atomic (they always throw)
atomicfields = t.name.atomicfields
atomicfields === C_NULL && return false
s -= 1
return unsafe_load(Ptr{UInt32}(atomicfields), 1 + s÷32) & (1 << (s%32)) != 0
end
else
const _isfieldatomic = Base.isfieldatomic
end
(f::NoArgFieldRef{T})(val::S) where {T,S} = setfield!(f.val, f.i, val, _isfieldatomic(T, f.i) ? :sequentially_consistent : :not_atomic)
mutable struct FieldRef{T}
set::Bool
val::T
FieldRef{T}() where {T} = new{T}(false)
FieldRef{T}(x) where {T} = new{T}(true, x)
end
(f::FieldRef{T})(val) where {T} = (f.set = true; f.val = val)
Base.getindex(f::FieldRef) = f.set ? f.val : nothing
keyeq(a, b::String) = string(a) == b
keyeq(a::AbstractString, b::String) = String(a) == b
keyeq(a, b) = isequal(a, b)
keyeq(x) = y -> keyeq(x, y)
function (f::KeyValStructClosure{T,S,V})(key::K, val::VV) where {T,S,V,K,VV}
if @generated
N = fieldcount(T)
ex = quote
Base.@_inline_meta
if T <: Tuple
f.i[] += 1
end
end
for i = 1:N
ftype = fieldtype(T, i)
if T <: Tuple
push!(ex.args, quote
if f.i[] == $i
return make(f.val[$i], f.style, $ftype, val)
end
end)
else
fname = Meta.quot(fieldname(T, i))
mexpr = quote
if noarg(f.style, T)
return make(NoArgFieldRef(f.val, $i), f.style, $ftype, val, ftags)
else
return make(f.val[$i], f.style, $ftype, val, ftags)
end
end
if K == Int
push!(ex.args, quote
if key == $i
ftags = fieldtags(f.style, T, $fname)
$mexpr
end
end)
elseif K == Symbol
push!(ex.args, quote
ftags = fieldtags(f.style, T, $fname)
fname = get(ftags, :name, $fname)
if key == fname
$mexpr
end
end)
else
fstr = String(fieldname(T, i))
push!(ex.args, quote
ftags = fieldtags(f.style, T, $fname)
fstr = String(get(ftags, :name, $fstr))
if keyeq(key, fstr)
$mexpr
end
end)
end
end
end
# Core.println(ex)
return ex
else
if T <: Tuple
i = f.i[] += 1
if i <= fieldcount(T)
return make(f.val[i], f.style, fieldtype(T, i), val)
end
else
for i = 1:fieldcount(T)
ftype = fieldtype(T, i)
if K == Int
if key == i
fname = fieldname(T, key)
ftags = fieldtags(f.style, T, fname)
if noarg(f.style, T)
return make(NoArgFieldRef(f.val, i), f.style, ftype, val, ftags)
else
return make(f.val[i], f.style, ftype, val, ftags)
end
end
elseif K == Symbol
fname = fieldname(T, i)
ftags = fieldtags(f.style, T, fname)
fname = get(ftags, :name, fname)
if key == fname
if noarg(f.style, T)
return make(NoArgFieldRef(f.val, i), f.style, ftype, val, ftags)
else
return make(f.val[i], f.style, ftype, val, ftags)
end
end
else # K == String
fname = fieldname(T, i)
ftags = fieldtags(f.style, T, fname)
fstr = String(fname)
fstr = String(get(ftags, :name, fstr))
if keyeq(key, fstr)
if noarg(f.style, T)
return make(NoArgFieldRef(f.val, i), f.style, ftype, val, ftags)
else
return make(f.val[i], f.style, ftype, val, ftags)
end
end
end
end
end
end
end
function getfielddefault(style::S, ::Type{T}, key) where {S,T}
fd = fielddefault(style, T, key)
if fd !== nothing
return FieldRef{fieldtype(T, key)}(fd)
else
return FieldRef{fieldtype(T, key)}()
end
end
function makestruct(f::F, style::S, ::Type{T}, source) where {F,S,T}
if @generated
N = fieldcount(T)
ex = quote
Base.@_inline_meta
end
syms = Symbol[]
for i = 1:N
nm = gensym(T <: Tuple ? :tuple : fieldname(T, i))
push!(syms, nm)
fname = Meta.quot(fieldname(T, i))
FT = fieldtype(T, i)
push!(ex.args, quote
fd = fielddefault(style, $T, $fname)
$nm = fd === nothing ? FieldRef{$FT}() : FieldRef{$FT}(fd)
end)
end
if T <: Tuple
vals = Expr(:tuple, Any[:($(syms[i])) for i = 1:N]...)
push!(ex.args, :(st = applyeach(style, KeyValStructClosure{$T}(style, $vals), source)))
push!(ex.args, :(x = $(Expr(:tuple, Any[:($(syms[i])[]) for i = 1:N]...))))
else
vals = Expr(:tuple, Any[:($(syms[i])) for i = 1:N]...)
push!(ex.args, :(st = applyeach(style, KeyValStructClosure{$T}(style, $vals), source)))
push!(ex.args, :(x = $(Expr(:new, :($T), Any[:($(syms[i])[]) for i = 1:N]...))))
end
push!(ex.args, :(f(x)))
push!(ex.args, :(return st))
# println(ex)
return ex
else
# println("non-generated makestruct")
vals = ntuple(i -> getfielddefault(style, T, fieldname(T, i)), fieldcount(T))
st = applyeach(style, KeyValStructClosure{T}(style, vals), source)
if T <: Tuple
f(ntuple(i -> vals[i][], fieldcount(T)))
elseif T <: NamedTuple
f(T(Tuple(vals[i][] for i = 1:fieldcount(T))))
else
f(T((vals[i][] for i = 1:fieldcount(T))...))
end
return st
end
end
struct DictClosure{S,T}
style::S
dict::T
end
mutable struct DictKeyClosure{K}
key::K
DictKeyClosure{K}() where {K} = new{K}()
end
@inline (f::DictKeyClosure{K})(x) where {K} = setfield!(f, :key, x)
struct DictValClosure{D,K}
dict::D # Dict instance
key::DictKeyClosure{K}
end
@inline (f::DictValClosure{D,K})(x) where {D,K} = addkeyval!(f.dict, f.key.key, x)
@inline function (f::DictClosure{S,T})(k, v) where {S,T}
KT = _keytype(f.dict)
VT = _valtype(f.dict)
kc = DictKeyClosure{KT}()
make(kc, f.style, KT, k)
return make(DictValClosure(f.dict, kc), f.style, VT, v)
end
struct ArrayClosure{S,T}
style::S
arr::T
end
struct ArrayPushClosure{T}
arr::T
end
@inline (f::ArrayPushClosure{T})(x) where {T} = push!(f.arr, x)
function (f::ArrayClosure{S,T})(k, v) where {S,T}
return make(ArrayPushClosure(f.arr), f.style, eltype(f.arr), v)
end
struct LengthClosure
len::Ptr{Int}
end
@inline (f::LengthClosure)(_, _) = unsafe_store!(f.len, unsafe_load(f.len) + 1)
function applylength(x)
ref = Ref(0)
lc = LengthClosure(Base.unsafe_convert(Ptr{Int}, ref))
GC.@preserve ref begin
StructUtils.applyeach(lc, x)
return unsafe_load(lc.len)
end
end
# recursively build up multidimensional array dimensions
# "[[1.0],[2.0]]" => (1, 2)
# "[[1.0,2.0]]" => (2, 1)
# "[[[1.0]],[[2.0]]]" => (1, 1, 2)
# "[[[1.0],[2.0]]]" => (1, 2, 1)
# "[[[1.0,2.0]]]" => (2, 1, 1)
# length of innermost array is 1st dim
function discover_dims(x)
@assert arraylike(x)
len = applylength(x)
ret = applyeach(x) do _, v
return arraylike(v) ? EarlyReturn(discover_dims(v)) : EarlyReturn(())
end
return (ret.value..., len)
end
struct MultiDimClosure{S,A}
style::S
arr::A
dims::Vector{Int}
cur_dim::Base.RefValue{Int}
end
@inline function (f::MultiDimClosure{S,A})(i, val) where {S,A}
f.dims[f.cur_dim[]] = i
if arraylike(val)
f.cur_dim[] -= 1
st = applyeach(f, val)
f.cur_dim[] += 1
else
st = make(MultiDimValFunc(f.style, f.arr, f.dims), f.style, eltype(f.arr), val)
end
return st
end
struct MultiDimValFunc{S,A}
style::S
arr::A
dims::Vector{Int}
end
@inline (f::MultiDimValFunc{S,A})(x) where {S,A} = setindex!(f.arr, x, f.dims...)
"""
StructUtils.make(T, source) -> T
StructUtils.make(style, T, source) -> T
StructUtils.make(f, style, T, source) -> nothing
StructUtils.make!(style, x::T, source)
Construct a struct of type `T` from `source` using the given `style`. The `source` can be any
type of object, and the `style` can be any `StructStyle` subtype.
`make` will use any knowledge of `noarg`, `arraylike`, or `dictlike` in order to
determine how to construct an instance of `T`. The fallback for structs is to rely on
the automatic "all argument" constructor that structs have defined by default.
`make` calls `applyeach` on the `source` object, where the key-value pairs
from `source` will be used in constructing `T`.
The 3rd definition above allows passing in an "applicator" function that is
applied to the constructed struct. This is useful when the initial `T` is
abstract or a union type and `choosetype` is used to determine the concrete
runtime type to construct.
The 4th definition allows passing in an already-constructed instance of `T` (`x`),,
which must be mutable, and source key-value pairs will be applied as
appropriate to `x`.
For structs, `fieldtags` will be accounted for and certain tags can be used
to influence the construction of the struct.
"""
function make end
make(::Type{T}, source; style::StructStyle=DefaultStyle()) where {T} = make(style, T, source)
mutable struct ValueClosure{T}
x::T
ValueClosure{T}() where {T} = new{T}()
end
@inline (f::ValueClosure{T})(x) where {T} = setfield!(f, :x, x)
struct MakeClosure{F}
f::F
end
@inline (f::MakeClosure)(style::S, ::Type{T}, source::V, tags) where {S,T,V} = _make(f.f, style, T, source, tags)
@inline function make(style::StructStyle, ::Type{T}, source, tags=(;)) where {T}
vc = ValueClosure{T}()
choosetype(MakeClosure(vc), style, T, source, tags)
return vc.x
end
@inline function make(f::F, style::StructStyle, ::Type{T}, source, tags=(;)) where {F,T}
return choosetype(MakeClosure(f), style, T, source, tags)
end
# assume choosetype has been applied to T
# f is function to be applied to made value
# returns state from applyeach if any
function _make(f::F, style::StructStyle, ::Type{T}, source, tags=(;)) where {F,T}
if dictlike(style, T)
x = initialize(style, T)
st = applyeach(style, DictClosure(style, x), source)
f(x)
elseif arraylike(style, T)
if T <: Tuple # special-case Tuple since it's arraylike, but we want to "make" it like a struct
st = makestruct(f, style, T, source)
elseif ndims(T) > 1
# multidimensional arrays
dims = discover_dims(source)
x = initialize(style, T, dims)
n = ndims(T)
st = applyeach(style, MultiDimClosure(style, x, ones(Int, n), Ref(n)), source)
f(x)
else
x = initialize(style, T)
st = applyeach(style, ArrayClosure(style, x), source)
f(x)
end
elseif noarg(style, T)
x = initialize(style, T)
st = applyeach(style, KeyValStructClosure{T}(style, x), source)
f(x)
elseif structlike(style, T)
st = makestruct(f, style, T, source)
else
st = lift(f, style, T, source, tags)
end
return st
end
make!(x::T, source; style::StructStyle=DefaultStyle()) where {T} = make!(style, x, source)
make!(::Type{T}, source; style::StructStyle=DefaultStyle()) where {T} = make!(style, T, source)
function make!(style::StructStyle, x::T, source) where {T}
if dictlike(style, T)
st = applyeach(DictClosure(style, x), source)
return x
elseif arraylike(style, T)
st = applyeach(ArrayClosure(style, x), source)
return x
elseif noarg(style, T)
st = applyeach(KeyValStructClosure{T}(style, x), source)
return x
else
throw(ArgumentError("Type `$T` does not support in-place `make!`"))
end
end
make!(style::StructStyle, ::Type{T}, source) where {T} = make!(style, initialize(style, T), source)
@doc (@doc make) make!
include("selectors.jl")
end
| StructUtils | https://github.com/quinnj/StructUtils.jl.git |
|
[
"MIT"
] | 1.1.0 | d039f3ef7481270be1584a50edea804ae3c83953 | code | 13066 | struct None end
const none = None()
struct FieldExpr
isconst::Bool
isatomic::Bool
name::Symbol
type::Any # none or Symbol or Expr
default::Any # literal or Expr or none
tags::Union{None, Expr}
end
function _expr(f::FieldExpr)
nm_typ = f.type === none ? f.name : :($(f.name)::$(f.type))
if f.isconst
return Expr(:const, nm_typ)
elseif f.isatomic
return :(@atomic($nm_typ))
else
return nm_typ
end
end
function _kw(f::FieldExpr)
# don't include type to allow for more flexible inputs
# and we can rely on implicit convert call in setfield!
nm_typ = f.name # f.type === none ? f.name : :($(f.name)::$(f.type))
return f.default === none ? nm_typ : Expr(:kw, nm_typ, f.default)
end
# parse a single-line field expression
# we progressively peel off layers of the expression to extract field information
# supported field expression types include:
# mutable struct Foo
# no_type
# with_type::Int
# with_default = 1
# with_type_default::Int = 1
# with_tag &(xml=(key="with-tag",),)
# with_tag_type::Int &(xml=(key="with-tag-type",),)
# with_tag_default = 1 &(xml=(key="with-tag-default",),)
# with_tag_type_default::Int = 1 &(xml=(key="with-tag-default",),)
# @atomic no_type_atomic
# @atomic with_type_atomic::Int
# @atomic(with_default_atomic) = 1
# @atomic(with_type_default_atomic::Int) = 1
# @atomic(with_tag_atomic) &(xml=(key="with-tag-atomic",),)
# @atomic(with_tag_type_atomic::Int) &(xml=(key="with-tag-type-atomic",),)
# @atomic(with_tag_default_atomic) = 1 &(xml=(key="with-tag-default-atomic",),)
# @atomic(with_tag_type_default_atomic::Int) = 1 &(xml=(key="with-tag-default-atomic",),)
# const no_type_const
# const with_type_const::Int
# const with_default_const = 1
# const with_type_default_const::Int = 1
# const with_tag_const &(xml=(key="with-tag-const",),)
# const with_tag_type_const::Int &(xml=(key="with-tag-type-const",),)
# const with_tag_default_const = 1 &(xml=(key="with-tag-default-const",),)
# const with_tag_type_default_const::Int = 1 &(xml=(key="with-tag-default-const",),)
# end
function FieldExpr(ex)
isconst = isatomic = false
name = Symbol()
type = none
default = none
tags = none
if Meta.isexpr(ex, :const)
isconst = true
ex = ex.args[1]
end
if Meta.isexpr(ex, :(=))
def_and_tags = ex.args[2]
if Meta.isexpr(def_and_tags, :call) && def_and_tags.args[1] == :&
default = def_and_tags.args[2]
tags = def_and_tags.args[3]
else
default = def_and_tags
end
ex = ex.args[1]
end
if Meta.isexpr(ex, :call) && ex.args[1] == :&
tags = ex.args[3]
ex = ex.args[2]
end
if Meta.isexpr(ex, :atomic)
isatomic = true
ex = ex.args[1]
end
if ex isa Symbol
name = ex
elseif Meta.isexpr(ex, :(::))
name, type = ex.args
else
return nothing
end
name = Meta.isexpr(name, :escape) ? name.args[1] : name
return FieldExpr(isconst, isatomic, name, type, default, tags)
end
function parsefields!(field_exprs::Vector{Any})
fields = FieldExpr[]
for (i, fex) in enumerate(field_exprs)
fex isa LineNumberNode && continue
f = FieldExpr(fex)
if f !== nothing
push!(fields, f)
# replace field expression that may include defaults/tags w/ just the field name/type
# + const/atomic annotations
field_exprs[i] = _expr(f)
else
# ignore lines that aren't fields
end
end
return fields
end
function parse_struct_def(kind, src, mod, expr)
expr = macroexpand(mod, expr)
Meta.isexpr(expr, :struct) || throw(ArgumentError("Invalid usage of @$kind macro"))
ismutable, T, fieldsblock = expr.args
if Meta.isexpr(T, :<:)
T = T.args[1]
end
if Meta.isexpr(T, :curly)
T_with_typeparams = copy(T)
# sanitize T_with_typeparams to remove any type param bounds
for i = 2:length(T_with_typeparams.args)
if T_with_typeparams.args[i] isa Expr
T_with_typeparams.args[i] = T_with_typeparams.args[i].args[1]
end
end
typeparams = T.args[2:end]
T = T.args[1]
end
# kind is: :noarg, :kwdef, :defaults, :tags
ret = Expr(:block)
# we always want to return original struct definition expression
push!(ret.args, :($Base.@__doc__ $expr))
# parse field exprs and sanitize field definitions
fields = parsefields!(fieldsblock.args)
if kind == :noarg
ismutable || throw(ArgumentError("@noarg structs must be mutable"))
if any(f.isconst for f in fields)
#TODO: we could allow non-trailing const fields if they have default values
# by setting the default value in the initial new() call
# or if all fields have default values, we could allow const fields
# because we'd have a value for each field to pass to new()
throw(ArgumentError("const fields are not allowed in @noarg structs"))
end
# generate noarg constructor
if @isdefined(T_with_typeparams)
sig = Expr(:where, Expr(:call, T_with_typeparams), typeparams...)
new_expr = Expr(:(=), :x, Expr(:call, Expr(:curly, :new, T_with_typeparams.args[2:end]...)))
else
sig = Expr(:call, T)
new_expr = :(x = new())
end
cexpr = Expr(:function, sig, Expr(:block, src, new_expr))
defs = [:(setfield!(x, $(Meta.quot(f.name)), $(f.default), $(f.isatomic ? Meta.quot(:sequentially_consistent) : Meta.quot(:not_atomic)))) for f in fields if f.default !== none]
append!(cexpr.args[2].args, defs)
push!(cexpr.args[2].args, Expr(:return, :x))
# add inner constructor right after field definitions
push!(expr.args[3].args, cexpr)
#TODO: should we also generate an all-arg constructor like default struct constructors
# that call convert to the field type for each field?
# override StructUtils.noarg(::Type{nm}) = true and add outside struct definition
push!(ret.args, :(StructUtils.noarg(::Type{<:$T}) = true))
generate_field_defaults_and_tags!(ret, T, fields)
elseif kind == :kwdef
if !isempty(fields)
# generate outer kwdef constructor, like: Foo(; a=1, b=2, ...) = Foo(a, b, ...)
params = Expr(:parameters, (_kw(fex) for fex in fields)...)
sig = Expr(:call, T, params)
fexpr = Expr(:function, sig, Expr(:block, src, :(return $T($((f.name for f in fields)...)))))
push!(ret.args, fexpr)
if @isdefined(T_with_typeparams)
# generate another kwdef constructor with type parameters
sig = Expr(:where, Expr(:call, T_with_typeparams, params), typeparams...)
fexpr = Expr(:function, sig, Expr(:block, src, :(return $T_with_typeparams($((f.name for f in fields)...)))))
push!(ret.args, fexpr)
end
end
# override StructUtils.kwdef(::Type{T}) = true and add outside struct definition
push!(ret.args, :(StructUtils.kwdef(::Type{<:$T}) = true))
generate_field_defaults_and_tags!(ret, T, fields)
else
# if any default are specified, ensure all trailing fields have defaults
# then generate an outer constructor with leading non-default fields as arguments
# passing defaults to inner constructor
anydefaults = false
for (i, f) in enumerate(fields)
anydefaults |= f.default !== none
if anydefaults && f.default === none
throw(ArgumentError("All trailing fields must have default values in @$kind structs"))
end
end
if anydefaults
sig = Expr(:call, T, (f.name for f in fields if f.default === none)...)
fexpr = Expr(:function, sig, Expr(:block, src, :(return $T($((f.default === none ? f.name : f.default for f in fields)...)))))
push!(ret.args, fexpr)
end
generate_field_defaults_and_tags!(ret, T, fields)
end
return esc(ret)
end
function generate_field_defaults_and_tags!(ret, T, fields)
# generate fielddefaults override if applicable
if any(f.default !== none for f in fields)
defs_nt = Expr(:tuple, Expr(:parameters, [:(($(f.name)=$(f.default))) for f in fields if f.default !== none]...))
push!(ret.args, :(StructUtils.fielddefaults(::Type{<:$T}) = $defs_nt))
end
# generate fieldtags override if applicable
if any(f.tags !== none for f in fields)
tags_nt = Expr(:tuple, Expr(:parameters, [:($(f.name)=$(f.tags)) for f in fields if f.tags !== none]...))
push!(ret.args, :(StructUtils.fieldtags(::Type{<:$T}) = $tags_nt))
end
end
const SHARED_MACRO_DOCS = """
The `@noarg`, `@kwdef`, `@defaults`, and `@tags` macros all support
specifying "field tags" for each field in a struct. Field tags are
a NamedTuple prefixed by `&` and are a way to attach metadata to a field. The
field tags are accessible via the `StructUtils.fieldtags` function, and certain
field tags are used by the `StructUtils.make` function to control how fields are
constructed, including:
* `dateformat`: a `DateFormat` object to use when parsing or formatting a `Dates.TimeType` field
* `lower`: a function to apply to a field when `applyeach` is called on a struct
* `lift`: a function to apply to a field when `StructUtils.make` is called on for a struct
* `ignore`: a `Bool` to indicate if a field should be skipped/ignored when `applyeach` or `make` is called
* `name`: a `Symbol` to be used instead of a defined field name in `applyeach` or used to match a field in `make`
* `choosetype`: a function to apply to a field when `StructUtils.make` is called to determine the concrete type of an abstract or Union typed field
For example, the following struct definition includes a field with a `dateformat` tag:
```julia
@tags struct MyStruct
date::Date &(dateformat=dateformat"yyyy-mm-dd",)
end
```
"""
"""
@noarg mutable struct T
...
end
Macro to enhance a `mutable struct` definition by automatically
generating an empty or "no-argument" constructor. Similar to the
`@kwdef` macro, default values can be specified for fields, which will
be set in the generated constructor. `StructUtils.noarg` trait is also
overridden to return `true` for the struct type. This allows
structs to easily participate in programmatic construction via
`StructUtils.make`.
Note that `const` fields are currently not allowed in `@noarg` structs.
$SHARED_MACRO_DOCS
Example
```julia
@noarg mutable struct Foo
a::Int
b::String
c::Float64 = 1.0
d::Vector{Int} = [1, 2, 3]
end
```
In the above example, the `@noarg` macro generates the following inner constructor:
```julia
function Foo()
x = new()
x.c = 1.0
x.d = [1, 2, 3]
return x
end
```
"""
macro noarg(expr)
parse_struct_def(:noarg, __source__, __module__, expr)
end
"""
@kwdef struct T
...
end
Macro to enhance a `struct` definition by automatically generating a
keyword argument constructor. Default values can be specified for fields,
which will be set in the generated constructor. `StructUtils.kwdef` trait is
also overridden to return `true` for the struct type. This allows structs
to easily participate in programmatic construction via `StructUtils.make`.
$SHARED_MACRO_DOCS
Example
```julia
@kwdef struct Foo
a::Int
b::String = "foo"
c::Float64 = 1.0
d::Vector{Int} = [1, 2, 3]
end
```
In the above example, the `@kwdef` macro generates the following inner constructor:
```julia
function Foo(; a, b="foo", c=1.0, d=[1, 2, 3])
return Foo(a, b, c, d)
end
```
"""
macro kwdef(expr)
parse_struct_def(:kwdef, __source__, __module__, expr)
end
"""
@defaults struct T
...
end
Macro to enhance a `struct` definition by automatically generating an
outer constructor with default values for trailing fields. The generated
constructor will accept arguments for non-default fields and pass default
values to the inner constructor. `StructUtils.fielddefaults` trait is also
overridden to return a `NamedTuple` of default values for the struct type.
$SHARED_MACRO_DOCS
Example
```julia
@defaults struct Foo
a::Int
b::String = "foo"
c::Float64 = 1.0
d::Vector{Int} = [1, 2, 3]
end
```
In the above example, the `@defaults` macro generates the following outer constructor:
```julia
function Foo(a)
return Foo(a, "foo", 1.0, [1, 2, 3])
end
```
"""
macro defaults(expr)
parse_struct_def(:defaults, __source__, __module__, expr)
end
"""
@tags struct T
...
end
Macro to enhance a `struct` definition by allowing field tags to be
specified for each field.
$SHARED_MACRO_DOCS
"""
macro tags(expr)
parse_struct_def(:tags, __source__, __module__, expr)
end
| StructUtils | https://github.com/quinnj/StructUtils.jl.git |
|
[
"MIT"
] | 1.1.0 | d039f3ef7481270be1584a50edea804ae3c83953 | code | 7457 | """
Selection syntax
Special "selection syntax" is provided that allows easy querying of objects/arrays that implement `StructUtils.applyeach` using a syntax similar to XPath or CSS selectors,
applied using common Julia syntax.
This syntax mainly uses various forms of `getindex` to select elements of an object or array.
Supported syntax includes:
* `x["key"]` / `x.key` / `x[:key]` / `x[1]` - select the value associated for a key in object `x` (key can be a String, Symbol, or Integer for an array)
* `x[:]` - select all values in object or array `x`, returned as a `Selectors.List`, which is a custom array type that supports the selection syntax
* `x.key` - when `x` is a `List`, select the value for `key` in each element of the `List` (like a broadcasted `getindex`)
* `x[~, key]` - recursively select all values in object or array `x` that have `key`
* `x[~, :]` - recursively select all values in object or array `x`, returned as a flattened `List`
* `x[:, (k, v) -> Bool]` - apply a key-value function `f` to each key-value/index-value in object or array `x`, and return a `List` of all values for which `f` returns `true`
"""
module Selectors
using ..StructUtils
export List, @selectors
"""
List(...)
A custom array wrapper that supports the Selectors selection syntax.
"""
struct List{T} <: AbstractVector{T}
items::Vector{T}
end
items(x::List) = getfield(x, :items)
Base.getindex(x::List) = map(getindex, items(x))
List(T=Any) = List(T[])
Base.size(x::List) = size(items(x))
Base.eltype(::List{T}) where {T} = T
Base.isassigned(x::List, args::Integer...) = isassigned(items(x), args...)
Base.push!(x::List, item) = push!(items(x), item)
Base.append!(x::List, items_to_append) = append!(items(x), items_to_append)
StructUtils.arraylike(::List) = true
function StructUtils.applyeach(f, x::List)
# note that there should *never* be #undef
# values in a list, since we only ever initialize empty
# then push!/append! to it
for (i, v) in enumerate(items(x))
ret = f(i, v)
ret isa StructUtils.EarlyReturn && return ret
end
return
end
const KeyInd = Union{AbstractString, Symbol}
const Inds = Union{AbstractVector{<:KeyInd}, NTuple{N, <:KeyInd} where {N},
AbstractVector{<:Integer}, NTuple{N, <:Integer} where {N}}
function _getindex(x, key::Union{KeyInd, Integer})
if StructUtils.arraylike(x) && key isa KeyInd
# indexing an array with a key, so we check
# each element if it's an object and if the
# object has the key
# like a broadcasted getindex over x
values = List()
StructUtils.applyeach(x) do _, item
if StructUtils.structlike(item)
# if array elements are objects, we do a broadcasted getproperty with `key`
# should we try-catch and ignore KeyErrors?
push!(values, _getindex(item, key))
else
# non-objects are just ignored
end
return
end
return values
elseif StructUtils.structlike(x) || StructUtils.arraylike(x)
# indexing object w/ key or array w/ index
# returns a single value
ret = StructUtils.applyeach(x) do k, v
StructUtils.keyeq(k, key) && return StructUtils.EarlyReturn(v)
return
end
ret isa StructUtils.EarlyReturn || throw(KeyError(key))
return ret.value
else
noselection(x)
end
end
# return all values of an object or elements of an array as a List
function _getindex(x, ::Colon)
selectioncheck(x)
values = List()
StructUtils.applyeach(x) do _, v
push!(values, v)
return
end
return values
end
# a list is already a list of all its elements
_getindex(x::List, ::Colon) = x
# indexing object or array w/ a list of keys/indexes
function _getindex(x, inds::Inds)
selectioncheck(x)
values = List()
StructUtils.applyeach(x) do k, v
i = findfirst(StructUtils.keyeq(k), inds)
i !== nothing && push!(values, v)
return
end
return values
end
# return all values of an object or elements of an array as a List
# that satisfy a key-value function
function _getindex(x, S::Union{typeof(~), Colon}, f::Base.Callable)
selectioncheck(x)
values = List()
StructUtils.applyeach(x) do k, v
f(k, v) && push!(values, v)
if S == ~
if StructUtils.structlike(v)
ret = _getindex(v, ~, f)
append!(values, ret)
elseif StructUtils.arraylike(v)
ret = _getindex(v, ~, f)
append!(values, ret)
end
end
return
end
return values
end
# recursively return all values of an object or elements of an array as a List (:)
# as a single flattened List; or all properties that match key
function _getindex(x, ::typeof(~), key::Union{KeyInd, Colon})
values = List()
if StructUtils.structlike(x)
StructUtils.applyeach(x) do k, v
if key === Colon()
push!(values, v)
elseif StructUtils.keyeq(k, key)
if StructUtils.arraylike(v)
StructUtils.applyeach(v) do _, vv
push!(values, vv)
return
end
else
push!(values, v)
end
end
if StructUtils.structlike(v)
ret = _getindex(v, ~, key)
append!(values, ret)
elseif StructUtils.arraylike(v)
ret = _getindex(v, ~, key)
append!(values, ret)
end
return
end
elseif StructUtils.arraylike(x)
StructUtils.applyeach(x) do _, item
if StructUtils.structlike(item)
ret = _getindex(item, ~, key)
append!(values, ret)
elseif StructUtils.arraylike(item)
ret = _getindex(item, ~, key)
append!(values, ret)
end
return
end
else
noselection(x)
end
return values
end
selectioncheck(x) = StructUtils.structlike(x) || StructUtils.arraylike(x) || noselection(x)
@noinline noselection(x) = throw(ArgumentError("Selection syntax not defined for this object of type: `$(typeof(x))`"))
tosymbol(x::Symbol) = x
tosymbol(x) = Symbol(x)
# build up propertynames by iterating over each key-value pair
function _propertynames(x)
selectioncheck(x)
nms = Symbol[]
StructUtils.applyeach(x) do k, _
push!(nms, tosymbol(k))
return
end
return nms
end
# convenience macro for defining high-level getindex/getproperty methods
macro selectors(T)
esc(quote
Base.getindex(x::$T, arg) = StructUtils.Selectors._getindex(x, arg)
Base.getindex(x::$T, ::Colon, arg) = StructUtils.Selectors._getindex(x, :, arg)
Base.getindex(x::$T, ::typeof(~), arg) = StructUtils.Selectors._getindex(x, ~, arg)
Base.getindex(x::$T, ::typeof(~), key, val) = StructUtils.Selectors._getindex(x, ~, key, val)
Base.getproperty(x::$T, key::Symbol) = StructUtils.Selectors._getindex(x, key)
Base.propertynames(x::$T) = StructUtils.Selectors._propertynames(x)
Base.hasproperty(x::$T, key::Symbol) = key in propertynames(x)
Base.length(x::$T) = StructUtils.applylength(x)
end)
end
@selectors List
end # module | StructUtils | https://github.com/quinnj/StructUtils.jl.git |
|
[
"MIT"
] | 1.1.0 | d039f3ef7481270be1584a50edea804ae3c83953 | code | 10621 | @noarg mutable struct NoArg
end
@noarg mutable struct NoArg2
no_type
with_type::Int
with_default = 1
with_type_default::Int = 1
with_tag &(xml=(key="with-tag",),)
with_tag_type::Int &(xml=(key="with-tag-type",),)
with_tag_default = 1 &(xml=(key="with-tag-default",),)
with_tag_type_default::Int = 1 &(xml=(key="with-tag-default",),)
@atomic no_type_atomic
@atomic with_type_atomic::Int
# atomic fields w/ defaults *must* use parens to disambiguate from atomic
# variable assignment syntax: https://github.com/JuliaLang/julia/issues/53893
@atomic(with_default_atomic) = 1
@atomic(with_type_default_atomic::Int) = 1
@atomic(with_tag_atomic) &(xml=(key="with-tag-atomic",),)
@atomic(with_tag_type_atomic::Int) &(xml=(key="with-tag-type-atomic",),)
@atomic(with_tag_default_atomic) = 1 &(xml=(key="with-tag-default-atomic",),)
@atomic(with_tag_type_default_atomic::Int) = 1 &(xml=(key="with-tag-default-atomic",),)
end
abstract type AbstractNoArg end
@noarg mutable struct NoArg3{T, S <: IO} <: AbstractNoArg
a::T = 10 * 20
io::S
end
"""
Documentation for NoArg4
"""
@noarg mutable struct NoArg4
a::String
end
StructUtils.@kwdef mutable struct KwDef1
no_type
with_type::Int
with_default = 1
with_type_default::Int = 1
with_tag &(xml=(key="with-tag",),)
with_tag_type::Int &(xml=(key="with-tag-type",),)
with_tag_default = 1 &(xml=(key="with-tag-default",),)
with_tag_type_default::Int = 1 &(xml=(key="with-tag-default",),)
@atomic no_type_atomic
@atomic with_type_atomic::Int
@atomic(with_default_atomic) = 1
@atomic(with_type_default_atomic::Int) = 1
@atomic(with_tag_atomic) &(xml=(key="with-tag-atomic",),)
@atomic(with_tag_type_atomic::Int) &(xml=(key="with-tag-type-atomic",),)
@atomic(with_tag_default_atomic) = 1 &(xml=(key="with-tag-default-atomic",),)
@atomic(with_tag_type_default_atomic::Int) = 1 &(xml=(key="with-tag-default-atomic",),)
const no_type_const
const with_type_const::Int
const with_default_const = 1
const with_type_default_const::Int = 1
const with_tag_const &(xml=(key="with-tag-const",),)
const with_tag_type_const::Int &(xml=(key="with-tag-type-const",),)
const with_tag_default_const = 1 &(xml=(key="with-tag-default-const",),)
const with_tag_type_default_const::Int = 1 &(xml=(key="with-tag-default-const",),)
end
StructUtils.@kwdef struct NoFields
end
StructUtils.@kwdef struct KwDef2{T, S <: IO} <: AbstractNoArg
a::T = 10 * 20
io::S
end
# tests from Base.@kwdef misc.jl
StructUtils.@kwdef struct Test27970Typed
a::Int
b::String = "hi"
end
StructUtils.@kwdef struct Test27970Untyped
a
end
StructUtils.@kwdef struct Test27970Empty end
abstract type AbstractTest29307 end
StructUtils.@kwdef struct Test29307{T<:Integer} <: AbstractTest29307
a::T=2
end
StructUtils.@kwdef struct TestInnerConstructor
a = 1
TestInnerConstructor(a::Int) = (@assert a>0; new(a))
function TestInnerConstructor(a::String)
@assert length(a) > 0
new(a)
end
end
const outsidevar = 7
StructUtils.@kwdef struct TestOutsideVar
a::Int=outsidevar
end
@test TestOutsideVar() == TestOutsideVar(7)
StructUtils.@kwdef mutable struct Test_kwdef_const_atomic
a
b::Int
c::Int = 1
const d
const e::Int
const f = 1
const g::Int = 1
@atomic h::Int
end
StructUtils.@kwdef struct Test_kwdef_lineinfo
a::String
end
# @testset "StructUtils.@kwdef constructor line info" begin
# for method in methods(Test_kwdef_lineinfo)
# @test method.file === Symbol(@__FILE__)
# @test ((@__LINE__)-6) ≤ method.line ≤ ((@__LINE__)-5)
# end
# end
StructUtils.@kwdef struct Test_kwdef_lineinfo_sparam{S<:AbstractString}
a::S
end
# @testset "@kwdef constructor line info with static parameter" begin
# for method in methods(Test_kwdef_lineinfo_sparam)
# @test method.file === Symbol(@__FILE__)
# @test ((@__LINE__)-6) ≤ method.line ≤ ((@__LINE__)-5)
# end
# end
module KwdefWithEsc
using StructUtils
const Int1 = Int
const val1 = 42
macro define_struct()
quote
StructUtils.@kwdef struct $(esc(:Struct))
a
b = val1
c::Int1
d::Int1 = val1
$(esc(quote
e
f = val2
g::Int2
h::Int2 = val2
end))
$(esc(:(i = val2)))
$(esc(:(j::Int2)))
$(esc(:(k::Int2 = val2)))
l::$(esc(:Int2))
m::$(esc(:Int2)) = val1
n = $(esc(:val2))
o::Int1 = $(esc(:val2))
$(esc(:p))
$(esc(:q)) = val1
$(esc(:s))::Int1
$(esc(:t))::Int1 = val1
end
end
end
end
module KwdefWithEsc_TestModule
using ..KwdefWithEsc
const Int2 = Int
const val2 = 42
KwdefWithEsc.@define_struct()
end
@defaults struct Defaults1
a::Int
b::String
c::Float64 = 3.14
d::Int = 42
end
@defaults struct Defaults2{S <: IO, T} <: AbstractNoArg
io::S
a::T = 10 * 20
end
@testset "macros" begin
@test NoArg() isa NoArg
x = NoArg2()
x.no_type = :hey
@test x isa NoArg2
@test StructUtils.noarg(NoArg2)
fd = StructUtils.fielddefaults(NoArg2)
@test fd.with_default == 1 && fd.with_type_default == 1 && fd.with_tag_default == 1 && fd.with_tag_type_default == 1 && fd.with_default_atomic == 1 && fd.with_type_default_atomic == 1 && fd.with_tag_default_atomic == 1 && fd.with_tag_type_default_atomic == 1
ft = StructUtils.fieldtags(NoArg2)
@test ft.with_tag == (xml=(key="with-tag",),) && ft.with_tag_type == (xml=(key="with-tag-type",),) && ft.with_tag_default == (xml=(key="with-tag-default",),) && ft.with_tag_type_default == (xml=(key="with-tag-default",),) && ft.with_tag_atomic == (xml=(key="with-tag-atomic",),) && ft.with_tag_type_atomic == (xml=(key="with-tag-type-atomic",),) && ft.with_tag_default_atomic == (xml=(key="with-tag-default-atomic",),) && ft.with_tag_type_default_atomic == (xml=(key="with-tag-default-atomic",),)
@static if VERSION >= v"1.11-DEV"
@test @doc(NoArg4).text[1] == "Documentation for NoArg4\n"
else
@test string(@doc(NoArg4)) == "Documentation for NoArg4\n"
end
@test_throws ArgumentError @macroexpand StructUtils.@noarg struct NonMutableNoArg
with_type::Int = 1
end
# can't use @noarg w/ struct w/ const fields
@test_throws ArgumentError @macroexpand StructUtils.@noarg mutable struct NoArgConst
const a::Int = 1
end
x = KwDef1(no_type=1, with_type=1, with_tag=1, with_tag_type=1, no_type_atomic=1, with_type_atomic=1, with_tag_atomic=1, with_tag_type_atomic=1, no_type_const=1, with_type_const=1, with_tag_const=1, with_tag_type_const=1)
@test x.no_type == 1
@test StructUtils.kwdef(KwDef1)
fd = StructUtils.fielddefaults(KwDef1)
@test fd.with_default == 1 && fd.with_type_default == 1 && fd.with_tag_default == 1 && fd.with_tag_type_default == 1 && fd.with_default_atomic == 1 && fd.with_type_default_atomic == 1 && fd.with_tag_default_atomic == 1 && fd.with_tag_type_default_atomic == 1 && fd.with_default_const == 1 && fd.with_type_default_const == 1 && fd.with_tag_default_const == 1 && fd.with_tag_type_default_const == 1
ft = StructUtils.fieldtags(KwDef1)
@test ft.with_tag == (xml=(key="with-tag",),) && ft.with_tag_type == (xml=(key="with-tag-type",),) && ft.with_tag_default == (xml=(key="with-tag-default",),) && ft.with_tag_type_default == (xml=(key="with-tag-default",),) && ft.with_tag_atomic == (xml=(key="with-tag-atomic",),) && ft.with_tag_type_atomic == (xml=(key="with-tag-type-atomic",),) && ft.with_tag_default_atomic == (xml=(key="with-tag-default-atomic",),) && ft.with_tag_type_default_atomic == (xml=(key="with-tag-default-atomic",),) && ft.with_tag_const == (xml=(key="with-tag-const",),) && ft.with_tag_type_const == (xml=(key="with-tag-type-const",),) && ft.with_tag_default_const == (xml=(key="with-tag-default-const",),) && ft.with_tag_type_default_const == (xml=(key="with-tag-default-const",),)
@test NoFields() isa NoFields
@testset "No default values in @kwdef" begin
@test Test27970Typed(a=1) == Test27970Typed(1, "hi")
# Implicit type conversion (no assertion on kwarg)
@test Test27970Typed(a=0x03) == Test27970Typed(3, "hi")
@test_throws UndefKeywordError Test27970Typed()
@test Test27970Untyped(a=1) == Test27970Untyped(1)
@test_throws UndefKeywordError Test27970Untyped()
# Just checking that this doesn't stack overflow on construction
@test Test27970Empty() == Test27970Empty()
end
@testset "subtyped @kwdef" begin
@test Test29307() == Test29307{Int}(2)
@test Test29307(a=0x03) == Test29307{UInt8}(0x03)
@test Test29307{UInt32}() == Test29307{UInt32}(2)
@test Test29307{UInt32}(a=0x03) == Test29307{UInt32}(0x03)
end
@testset "@kwdef inner constructor" begin
@test TestInnerConstructor() == TestInnerConstructor(1)
@test TestInnerConstructor(a=2) == TestInnerConstructor(2)
@test_throws AssertionError TestInnerConstructor(a=0)
@test TestInnerConstructor(a="2") == TestInnerConstructor("2")
@test_throws AssertionError TestInnerConstructor(a="")
end
@testset "const and @atomic fields in @kwdef" begin
x = Test_kwdef_const_atomic(a = 1, b = 1, d = 1, e = 1, h = 1)
for f in fieldnames(Test_kwdef_const_atomic)
@test getfield(x, f) == 1
end
@testset "const fields" begin
@test_throws ErrorException x.d = 2
@test_throws ErrorException x.e = 2
@test_throws MethodError x.e = "2"
@test_throws ErrorException x.f = 2
@test_throws ErrorException x.g = 2
end
@testset "atomic fields" begin
@test_throws ConcurrencyViolationError x.h = 1
@atomic x.h = 1
@test @atomic(x.h) == 1
@atomic x.h = 2
@test @atomic(x.h) == 2
end
end
@test isdefined(KwdefWithEsc_TestModule, :Struct)
@test Defaults1(1, "hey") == Defaults1(1, "hey", 3.14, 42)
io = IOBuffer()
@test Defaults2(io) == Defaults2(io, 200)
@test_throws ArgumentError @macroexpand @defaults struct FooFoo
a::Int = 1
b::String
end
end # @testset "macros" | StructUtils | https://github.com/quinnj/StructUtils.jl.git |
|
[
"MIT"
] | 1.1.0 | d039f3ef7481270be1584a50edea804ae3c83953 | code | 7167 | using Test, Dates, UUIDs, StructUtils
struct TestStyle <: StructUtils.StructStyle end
include(joinpath(dirname(pathof(StructUtils)), "../test/macros.jl"))
include(joinpath(dirname(pathof(StructUtils)), "../test/struct.jl"))
@testset "StructUtils" begin
# Dict{Symbol, Int}, NamedTuple, A struct, Vector Pair
d = Dict(:a => 1, :b => 2, :c => 3, :d => 4)
ds = Dict("a" => 1, "b" => 2, "c" => 3, "d" => 4)
nt = (a = 1, b = 2, c = 3, d = 4)
a = A(1, 2, 3, 4)
aa = AA(1, 2, 3, 4, 5)
b = B(); b.a = 1; b.b = 2; b.c = 3; b.d = 4; b
bb = BB()
vp = [:a => 1, :b => 2, :c => 3, :d => 4]
v = [1, 2, 3, 4]
t = (1, 2, 3, 4)
println("Dict{Symbol, Int}")
@test StructUtils.make(Dict{Symbol, Int}, d) == d
@test StructUtils.make(Dict{Symbol, Int}, ds) == d
@test StructUtils.make(Dict{Symbol, Int}, nt) == d
@test StructUtils.make(Dict{Symbol, Int}, a) == d
@test StructUtils.make(Dict{Symbol, Int}, vp) == d
# @test StructUtils.make(Dict{Symbol, Int}, aa) == d # fails because of extra field
@test StructUtils.make(Dict{Symbol, Int}, v) == Dict(Symbol(1) => 1, Symbol(2) => 2, Symbol(3) => 3, Symbol(4) => 4)
@test StructUtils.make(Dict{Symbol, Int}, t) == Dict(Symbol(1) => 1, Symbol(2) => 2, Symbol(3) => 3, Symbol(4) => 4)
@test StructUtils.make(Dict{Symbol, Int}, b) == d
@test StructUtils.make(Dict{Symbol, Int}, bb) == d
println("NamedTuple")
@test StructUtils.make(typeof(nt), d) == nt
@test StructUtils.make(typeof(nt), ds) == nt
@test StructUtils.make(typeof(nt), nt) == nt
@test StructUtils.make(typeof(nt), a) == nt
@test StructUtils.make(typeof(nt), vp) == nt
@test StructUtils.make(typeof(nt), v) == nt
@test StructUtils.make(typeof(nt), t) == nt
@test StructUtils.make(typeof(nt), aa) == nt # extra field is ignored
@test StructUtils.make(typeof(nt), b) == nt
@test StructUtils.make(typeof(nt), bb) == nt
println("A struct")
@test StructUtils.make(A, d) == a
@test StructUtils.make(A, ds) == a # slower
@test StructUtils.make(A, nt) == a
@test StructUtils.make(A, a) == a
@test StructUtils.make(A, vp) == a
@test StructUtils.make(A, v) == a # relies on order of vector elements
@test StructUtils.make(A, t) == a # relies on order of tuple elements
@test StructUtils.make(A, aa) == a # extra field is ignored
@test StructUtils.make(A, b) == a
@test StructUtils.make(A, bb) == a
println("AA struct")
@test StructUtils.make(AA, d) == aa # works because of AA field e default
@test StructUtils.make(AA, ds) == aa
@test StructUtils.make(AA, nt) == aa
@test StructUtils.make(AA, a) == aa
@test StructUtils.make(AA, vp) == aa
@test StructUtils.make(AA, v) == aa # relies on order of vector elements
@test StructUtils.make(AA, t) == aa # relies on order of tuple elements
@test StructUtils.make(AA, aa) == aa # extra field is ignored
@test StructUtils.make(AA, b) == aa
@test StructUtils.make(AA, bb) == aa
println("B struct")
@test StructUtils.make(B, d) == b
@test StructUtils.make(B, ds) == b
@test StructUtils.make(B, nt) == b
@test StructUtils.make(B, a) == b
@test StructUtils.make(B, vp) == b
@test StructUtils.make(B, v) == b # relies on order of vector elements
@test StructUtils.make(B, t) == b # relies on order of tuple elements
@test StructUtils.make(B, aa) == b # extra field is ignored
@test StructUtils.make(B, b) == b
@test StructUtils.make(B, bb) == b
println("BB struct")
@test StructUtils.make(BB, d) == bb
@test StructUtils.make(BB, ds) == bb
@test StructUtils.make(BB, nt) == bb
@test StructUtils.make(BB, a) == bb
@test StructUtils.make(BB, vp) == bb
@test StructUtils.make(BB, v) == bb # relies on order of vector elements
@test StructUtils.make(BB, t) == bb # relies on order of tuple elements
@test StructUtils.make(BB, aa) == bb # extra field is ignored
@test StructUtils.make(BB, b) == bb
@test StructUtils.make(BB, bb) == bb
println("Vector Pair")
# @test StructUtils.make(typeof(vp), d) == vp # unordered Dict doesn't work
# @test StructUtils.make(typeof(vp), ds) == vp # unordered Dict doesn't work
@test StructUtils.make(typeof(vp), nt) == vp
@test StructUtils.make(typeof(vp), a) == vp
@test StructUtils.make(typeof(vp), vp) == vp
println("Vector")
# @test StructUtils.make(typeof(v), d) == v # relies on order of Dict elements
# @test StructUtils.make(typeof(v), ds) == v # relies on order of Dict elements
@test StructUtils.make(typeof(v), nt) == v
@test StructUtils.make(typeof(v), a) == v
@test StructUtils.make(typeof(v), vp) == v
@test StructUtils.make(typeof(v), v) == v
@test StructUtils.make(typeof(v), t) == v
# @test StructUtils.make(typeof(v), aa) == v # fails because of extra field
println("Tuple")
# @test StructUtils.make(typeof(t), d) == t # relies on order of Dict elements
# @test StructUtils.make(typeof(t), ds) == t # relies on order of Dict elements
@test StructUtils.make(typeof(t), nt) == t
@test StructUtils.make(typeof(t), a) == t
@test StructUtils.make(typeof(t), vp) == t
@test StructUtils.make(typeof(t), v) == t
@test StructUtils.make(typeof(t), t) == t
@test StructUtils.make(typeof(t), aa) == t
println("C")
@test StructUtils.make(C, ()) == C()
@test StructUtils.make(C, (1,)) == C()
println("D")
@test StructUtils.make(D, (1, 3.14, "hey")) == D(1, 3.14, "hey")
println("Wrapper")
@test StructUtils.make(Wrapper, Dict("x" => Dict("a" => 1, "b" => "hey"))) == Wrapper((a=1, b="hey"))
println("UndefGuy")
x = StructUtils.make(UndefGuy, (id=1, name="2"))
@test x.id == 1 && x.name == "2"
x = StructUtils.make(UndefGuy, (id=1,))
@test x.id == 1 && !isdefined(x, :name)
println("E")
@test StructUtils.make(E, Dict("id" => 1, "a" => (a=1, b=2, c=3, d=4))) == E(1, A(1, 2, 3, 4))
println("G")
@test StructUtils.make(G, Dict("id" => 1, "rate" => 3.14, "name" => "Jim", "f" => Dict("id" => 2, "rate" => 6.28, "name" => "Bob"))) == G(1, 3.14, "Jim", F(2, 6.28, "Bob"))
println("H")
x = StructUtils.make(H, (id=0, name="", properties=Dict("a" => 1), addresses=["a", "b", "c"]))
@test x.id == 0 && x.name == "" && x.properties == Dict("a" => 1) && x.addresses == ["a", "b", "c"]
println("I")
@test StructUtils.make(I, (id=2, name="Aubrey", fruit=apple)) == I(2, "Aubrey", apple)
println("Vehicle")
StructUtils.choosetype(::Type{Vehicle}, source) = source["type"] == "car" ? Car : Truck
x = StructUtils.make(Vehicle, Dict("type" => "car", "make" => "Toyota", "model" => "Corolla", "seatingCapacity" => 4, "topSpeed" => 120.5))
@test x == Car("Toyota", "Corolla", 4, 120.5)
println("J")
@test StructUtils.make(J, (id=1, name=nothing, rate=3.14)) == J(1, nothing, 3.14)
@test StructUtils.make(J, (id=nothing, name=nothing, rate=3)) == J(nothing, nothing, 3)
println("Recurs")
@test StructUtils.make(Recurs, (id=0, value=(id=1, value=(id=2, value=(id=3, value=(id=4, value=nothing)))))) == Recurs(0, Recurs(1, Recurs(2, Recurs(3, Recurs(4, nothing)))))
println("O")
@test StructUtils.make(O, (id=0, name=missing)) == O(0, missing)
@test StructUtils.make(O, (id=0, name=nothing)) == O(0, nothing)
@test StructUtils.make(O, (id=0, name=(id=2, first_name="Jim", rate=3.14))) == O(0, L(2, "Jim", 3.14))
@test StructUtils.make(O, (id=0, name=(id=2, name="Jane", fruit=banana))) == O(0, I(2, "Jane", banana))
println("P")
p = StructUtils.make(P, (id=0, name="Jane"))
@test p.id == 0 && p.name == "Jane"
end | StructUtils | https://github.com/quinnj/StructUtils.jl.git |
|
[
"MIT"
] | 1.1.0 | d039f3ef7481270be1584a50edea804ae3c83953 | code | 3020 | struct A
a::Int
b::Int
c::Int
d::Int
end
struct AA
a::Int
b::Int
c::Int
d::Int
e::Int
end
StructUtils.fielddefaults(::Type{AA}) = (e=5,)
mutable struct B
a::Int
b::Int
c::Int
d::Int
B() = new()
end
StructUtils.noarg(::Type{B}) = true
Base.:(==)(b1::B, b2::B) = b1.a == b2.a && b1.b == b2.b && b1.c == b2.c && b1.d == b2.d
Base.@kwdef struct BB
a::Int = 1
b::Int = 2
c::Int = 3
d::Int = 4
end
StructUtils.kwdef(::Type{BB}) = true
struct C
end
struct D
a::Int
b::Float64
c::String
end
struct LotsOfFields
x1::String
x2::String
x3::String
x4::String
x5::String
x6::String
x7::String
x8::String
x9::String
x10::String
x11::String
x12::String
x13::String
x14::String
x15::String
x16::String
x17::String
x18::String
x19::String
x20::String
x21::String
x22::String
x23::String
x24::String
x25::String
x26::String
x27::String
x28::String
x29::String
x30::String
x31::String
x32::String
x33::String
x34::String
x35::String
end
struct Wrapper
x::NamedTuple{(:a, :b), Tuple{Int, String}}
end
mutable struct UndefGuy
id::Int
name::String
UndefGuy() = new()
end
StructUtils.noarg(::Type{UndefGuy}) = true
struct E
id::Int
a::A
end
Base.@kwdef struct F
id::Int
rate::Float64
name::String
end
StructUtils.kwdef(::Type{F}) = true
Base.@kwdef struct G
id::Int
rate::Float64
name::String
f::F
end
StructUtils.kwdef(::Type{G}) = true
struct H
id::Int
name::String
properties::Dict{String, Any}
addresses::Vector{String}
end
@enum Fruit apple banana
struct I
id::Int
name::String
fruit::Fruit
end
abstract type Vehicle end
struct Car <: Vehicle
make::String
model::String
seatingCapacity::Int
topSpeed::Float64
end
struct Truck <: Vehicle
make::String
model::String
payloadCapacity::Float64
end
struct J
id::Union{Int, Nothing}
name::Union{String, Nothing}
rate::Union{Int64, Float64}
end
struct K
id::Int
value::Union{Float64, Missing}
end
Base.@kwdef struct System
duration::Real = 0 # mandatory
cwd::Union{Nothing, String} = nothing
environment::Union{Nothing, Dict} = nothing
batch::Union{Nothing, Dict} = nothing
shell::Union{Nothing, Dict} = nothing
end
StructUtils.kwdef(::Type{System}) = true
struct L
id::Int
first_name::String
rate::Float64
end
struct ThreeDates
date::Date
datetime::DateTime
time::Time
end
struct M
id::Int
value::Union{Nothing,K}
end
struct Recurs
id::Int
value::Union{Nothing,Recurs}
end
struct N
id::Int
uuid::UUID
end
@tags struct O
id::Int
name::Union{I,L,Missing,Nothing} &(choosetype=x->isnothing(x) ? Nothing : ismissing(x) ? Missing : haskey(x, :fruit) ? I : L,)
end
@noarg mutable struct P
id::Int
@atomic(name::String) = "Jim"
end
| StructUtils | https://github.com/quinnj/StructUtils.jl.git |
|
[
"MIT"
] | 1.1.0 | d039f3ef7481270be1584a50edea804ae3c83953 | docs | 3491 | # StructUtils
[](https://github.com/quinnj/StructUtils.jl/actions?query=workflow%3ACI)
[](https://codecov.io/gh/quinnj/StructUtils.jl)
[](https://juliahub.com/ui/Packages/StructUtils/HHBkp?t=2)
[](https://juliahub.com/ui/Packages/StructUtils/HHBkp)
[](https://juliahub.com/ui/Packages/StructUtils/HHBkp)
## Installation
The package is registered in the [`General`](https://github.com/JuliaRegistries/General) registry and so can be installed at the REPL with `] add StructUtils`.
## Documentation
The primary interface provided by StructUtils.jl is in the form of the exported `@noarg`, `@defaults`, and `@tags` macros, along with the unexported (to avoid clashing with the Base definition) of `StructUtils.@kwdef`. These macros can be used on struct definitions to provide a more ergonomic and flexible way to define structs with default values, keyword constructors, and more.
The `@noarg` macro can be used to define a "no argument" constructor, and must be used with mutable structs. This allows
for programmatic construction and discovery of the supported behavior. Default values and field tags can also be defined in `@noarg` structs.
The `@defaults` macro can be used to define default values for fields in any kind of struct, and constructors will be defined that allow for the omission of fields with default values. Note that all fields with default values must be defined after any fields without default values.
The `@tags` macro can be used to define tags for fields in any kind of struct.
The `@kwdef` macro mirros the functionality of the Base defintion, while also allowing for the inclusion of field tags.
The other major interface StructUtils.jl provides is the `StructUtils.make(T, source)` function. It allows programmatic construction of a type `T` from a variety of source objects.
For example, I could have a custom struct `Foo` and be able to construct an instance from an array of values, a dictionary, a database cursor, a JSON object, etc. This is done by allowing source objects to implement interfaces for how fields should be provided programmatically (the primary means being the `StructUtils.applyeach` function), while `StructUtils.make` uses the programmatic knowledge from the above-mentioned macros, along with potential field tags, to construct the object.
Additional documentation is forth-coming around how package developers can use the "under the hood" machinery of StructUtils.jl to provide a more flexible and ergonomic interface to their users, like custom serialization/deserialization, database interaction, etc.
## Contributing and Questions
Contributions are very welcome, as are feature requests and suggestions. Please open an
[issue][issues-url] if you encounter any problems or would just like to ask a question.
[ci-img]: https://github.com/quinnj/StructUtils.jl/workflows/CI/badge.svg
[ci-url]: https://github.com/quinnj/StructUtils.jl/actions?query=workflow%3ACI+branch%3Amaster
[codecov-img]: https://codecov.io/gh/quinnj/StructUtils.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/quinnj/StructUtils.jl
[issues-url]: https://github.com/quinnj/StructUtils.jl/issues
| StructUtils | https://github.com/quinnj/StructUtils.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 227 | using Documenter, AxisArrays
makedocs(
modules = [AxisArrays],
sitename = "AxisArrays",
)
deploydocs(
deps = Deps.pip("mkdocs", "python-markdown-math"),
repo = "github.com/JuliaArrays/AxisArrays.jl.git"
)
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 539 | VERSION < v"0.7.0-beta2.199" && __precompile__()
module AxisArrays
using Base: tail
import Base.Iterators: repeated
using RangeArrays, IntervalSets
using IterTools
using Dates
function axes end
export AxisArray, Axis, AxisMatrix, AxisVector
export axisnames, axisvalues, axisdim, axes, atindex, atvalue, collapse
# From IntervalSets:
export ClosedInterval, ..
include("core.jl")
include("intervals.jl")
include("search.jl")
include("indexing.jl")
include("sortedvector.jl")
include("categoricalvector.jl")
include("combine.jl")
end
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 2489 | """
A CategoricalVector is an AbstractVector which is treated as a categorical axis regardless
of the element type. Duplicate values are not allowed but are not filtered out.
A CategoricalVector axis can be indexed with an ClosedInterval, with a value, or with a
vector of values. Use of a CategoricalVector{Tuple} axis allows indexing similar to the
hierarchical index of the Python Pandas package or the R data.table package.
In general, indexing into a CategoricalVector will be much slower than the corresponding
SortedVector or another sorted axis type, as linear search is required.
### Constructors
```julia
CategoricalVector(x::AbstractVector)
```
### Arguments
* `x::AbstractVector` : the wrapped vector
### Examples
```julia
v = CategoricalVector(collect([1; 8; 10:15]))
A = AxisArray(reshape(1:16, 8, 2), v, [:a, :b])
A[Axis{:row}(1), :]
A[Axis{:row}(10), :]
A[Axis{:row}([1, 10]), :]
## Hierarchical index example with three key levels
data = reshape(1.:40., 20, 2)
v = collect(zip([:a, :b, :c][rand(1:3,20)], [:x,:y][rand(1:2,20)], [:x,:y][rand(1:2,20)]))
A = AxisArray(data, CategoricalVector(v), [:a, :b])
A[:b, :]
A[[:a,:c], :]
A[(:a,:x), :]
A[(:a,:x,:x), :]
```
"""
struct CategoricalVector{T, A<:AbstractVector{T}} <: AbstractVector{T}
data::A
end
Base.getindex(v::CategoricalVector, idx::Int) = v.data[idx]
Base.getindex(v::CategoricalVector, idx::AbstractVector) = CategoricalVector(v.data[idx])
Base.length(v::CategoricalVector) = length(v.data)
Base.size(v::CategoricalVector) = size(v.data)
Base.size(v::CategoricalVector, i) = size(v.data, i)
Base.axes(v::CategoricalVector) = Base.axes(v.data)
axistrait(::Type{CategoricalVector{T,A}}) where {T,A} = Categorical
checkaxis(::CategoricalVector) = nothing
## Add some special indexing for CategoricalVector{Tuple}'s to achieve something like
## Panda's hierarchical indexing
axisindexes(ax::Axis{S,CategoricalVector{T,A}}, idx) where {T<:Tuple,S,A} = axisindexes(ax, (idx,))
function axisindexes(ax::Axis{S,CategoricalVector{T,A}}, idx::Tuple) where {T<:Tuple,S,A}
collect(filter(ax_idx->_tuple_matches(ax.val[ax_idx], idx), Base.axes(ax.val)...))
end
function _tuple_matches(element::Tuple, idx::Tuple)
length(idx) <= length(element) || return false
for (x, y) in zip(element, idx)
x == y || return false
end
return true
end
axisindexes(ax::Axis{S,CategoricalVector{T,A}}, idx::AbstractArray) where {T<:Tuple,S,A} =
vcat([axisindexes(ax, i) for i in idx]...)
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 11854 | function equalvalued(X::NTuple)
n = length(X)
allequal = true
i = 2
while allequal && i <= n
allequal = X[i] == X[i-1]
i += 1
end #while
return allequal
end #equalvalued
sizes(As::AxisArray...) = tuple(zip(map(a -> map(length, Base.axes(a)), As)...)...)
matchingdims(As::Tuple{Vararg{AxisArray}}) = all(equalvalued, sizes(As...))
matchingdimsexcept(As::Tuple{Vararg{AxisArray}}, n::Int) = all(equalvalued, sizes(As...)[[1:n-1; n+1:end]])
Base.cat(A1::AxisArray{T}, As::AxisArray{T}...; dims) where {T} = _cat(dims, A1, As...)
_cat(::Val{n}, As...) where {n} = _cat(n, As...)
@inline function _cat(n::Integer, As...)
if n <= ndims(As[1])
matchingdimsexcept(As, n) || error("All non-concatenated axes must be identically-valued")
newaxis = Axis{axisnames(As[1])[n]}(vcat(map(A -> A.axes[n].val, As)...))
checkaxis(newaxis)
return AxisArray(cat(map(A->A.data, As)..., dims=n), (As[1].axes[1:n-1]..., newaxis, As[1].axes[n+1:end]...))
else
matchingdims(As) || error("All axes must be identically-valued")
return AxisArray(cat(map(A->A.data, As)..., dims=n), As[1].axes)
end #if
end
function axismerge(method::Symbol, axes::Axis{name,T}...) where {name,T}
axisvals = if method == :inner
intersect(axisvalues(axes...)...)
elseif method == :left
axisvalues(axes[1])[1]
elseif method == :right
axisvalues(axes[end])[1]
elseif method == :outer
union(axisvalues(axes...)...)
else
error("Join method must be one of :inner, :left, :right, :outer")
end #if
isa(axistrait(axisvals), Dimensional) && sort!(axisvals)
return Axis{name}(collect(axisvals))
end
function indexmappings(oldaxes::NTuple{N,Axis}, newaxes::NTuple{N,Axis}) where N
oldvals = axisvalues(oldaxes...)
newvals = axisvalues(newaxes...)
return collect(zip(indexmapping.(oldvals, newvals)...))
end
function indexmapping(old::AbstractVector, new::AbstractVector)
before = Int[]
after = Int[]
oldperm = sortperm(old)
newperm = sortperm(new)
oldsorted = old[oldperm]
newsorted = new[newperm]
oldlength = length(old)
newlength = length(new)
oi = ni = 1
while oi <= oldlength && ni <= newlength
oldval = oldsorted[oi]
newval = newsorted[ni]
if oldval == newval
push!(before, oldperm[oi])
push!(after, newperm[ni])
oi += 1
ni += 1
elseif oldval < newval
oi += 1
else
ni += 1
end
end
return before, after
end
"""
merge(As::AxisArray...)
Combines AxisArrays with matching axis names into a single AxisArray spanning all of the axis values of the inputs. If a coordinate is defined in more than ones of the inputs, it takes its value from last input in which it appears. If a coordinate in the output array is not defined in any of the input arrays, it takes the value of the optional `fillvalue` keyword argument (default zero).
"""
function Base.merge(As::AxisArray{T,N,D,Ax}...; fillvalue::T=zero(T)) where {T,N,D,Ax}
resultaxes = map(as -> axismerge(:outer, as...), map(tuple, axes.(As)...))
resultdata = fill(fillvalue, length.(resultaxes)...)
result = AxisArray(resultdata, resultaxes...)
for A in As
before_idxs, after_idxs = indexmappings(A.axes, result.axes)
result.data[after_idxs...] = A.data[before_idxs...]
end
return result
end #merge
"""
join(As::AxisArray...)
Combines AxisArrays with matching axis names into a single AxisArray. Unlike `merge`, the inputs are joined along a newly created axis (optionally specified with the `newaxis` keyword argument). The `method` keyword argument can be used to specify the join type:
`:inner` - keep only those array values at axis values common to all AxisArrays to be joined
`:left` - keep only those array values at axis values present in the first AxisArray passed
`:right` - keep only those array values at axis values present in the last AxisArray passed
`:outer` (default) - keep all array values: create an AxisArray spanning all of the input axis values
If an array value in the output array is not defined in any of the input arrays (i.e. in the case of a left, right, or outer join), it takes the value of the optional `fillvalue` keyword argument (default zero).
"""
function Base.join(As::AxisArray{T,N,D,Ax}...; fillvalue::T=zero(T),
newaxis::Axis=_default_axis(1:length(As), ndims(As[1])+1),
method::Symbol=:outer) where {T,N,D,Ax}
prejoin_resultaxes = map(as -> axismerge(method, as...), map(tuple, axes.(As)...))
resultaxes = (prejoin_resultaxes..., newaxis)
resultdata = fill(fillvalue, length.(resultaxes)...)
result = AxisArray(resultdata, resultaxes...)
for (i, A) in enumerate(As)
before_idxs, after_idxs = indexmappings(A.axes, prejoin_resultaxes)
result.data[(after_idxs..., i)...] = A.data[before_idxs...]
end #for
return result
end #join
function _collapse_array_axes(array_name, array_axes...)
((array_name, (idx isa Tuple ? idx : (idx,))...) for idx in Iterators.product((Ax.val for Ax in array_axes)...))
end
function _collapse_axes(array_names, array_axes)
collect(Iterators.flatten(map(array_names, array_axes) do tup_name, tup_array_axes
_collapse_array_axes(tup_name, tup_array_axes...)
end))
end
function _splitall(::Val{N}, As...) where N
tuple((Base.IteratorsMD.split(A, Val(N)) for A in As)...)
end
function _reshapeall(::Val{N}, As...) where N
tuple((reshape(A, Val(N)) for A in As)...)
end
function _check_common_axes(common_axis_tuple)
if !all(axisname(first(common_axis_tuple)) .=== axisname.(common_axis_tuple[2:end]))
throw(ArgumentError("Leading common axes must have the same name in each array"))
end
return nothing
end
function _collapsed_axis_eltype(LType, trailing_axes)
eltypes = map(trailing_axes) do array_trailing_axes
Tuple{LType, eltype.(array_trailing_axes)...}
end
return typejoin(eltypes...)
end
function collapse(::Val{N}, As::Vararg{AxisArray, AN}) where {N, AN}
collapse(Val(N), ntuple(identity, AN), As...)
end
function collapse(::Val{N}, ::Type{NewArrayType}, As::Vararg{AxisArray, AN}) where {N, AN, NewArrayType<:AbstractArray}
collapse(Val(N), NewArrayType, ntuple(identity, AN), As...)
end
@generated function collapse(::Val{N}, labels::NTuple{AN, LType}, As::Vararg{AxisArray, AN}) where {N, AN, LType}
collapsed_dim_int = Int(N) + 1
new_eltype = Base.promote_eltype(As...)
quote
collapse(Val(N), Array{$new_eltype, $collapsed_dim_int}, labels, As...)
end
end
"""
collapse(::Val{N}, As::AxisArray...) -> AxisArray
collapse(::Val{N}, labels::Tuple, As::AxisArray...) -> AxisArray
collapse(::Val{N}, ::Type{NewArrayType}, As::AxisArray...) -> AxisArray
collapse(::Val{N}, ::Type{NewArrayType}, labels::Tuple, As::AxisArray...) -> AxisArray
Collapses `AxisArray`s with `N` equal leading axes into a single `AxisArray`.
All additional axes in any of the arrays are collapsed into a single additional
axis of type `Axis{:collapsed, CategoricalVector{Tuple}}`.
### Arguments
* `::Val{N}`: the greatest common dimension to share between all input
arrays. The remaining axes are collapsed. All `N` axes must be common
to each input array, at the same dimension. Values from `0` up to the
minimum number of dimensions across all input arrays are allowed.
* `labels::Tuple`: (optional) an index for each array in `As` used as the leading element in
the index tuples in the `:collapsed` axis. Defaults to `1:length(As)`.
* `::Type{NewArrayType<:AbstractArray{_, N+1}}`: (optional) the desired underlying array
type for the returned `AxisArray`.
* `As::AxisArray...`: `AxisArray`s to be collapsed together.
### Examples
```
julia> price_data = AxisArray(rand(10), Axis{:time}(Date(2016,01,01):Day(1):Date(2016,01,10)))
1-dimensional AxisArray{Float64,1,...} with axes:
:time, 2016-01-01:1 day:2016-01-10
And data, a 10-element Array{Float64,1}:
0.885014
0.418562
0.609344
0.72221
0.43656
0.840304
0.455337
0.65954
0.393801
0.260207
julia> size_data = AxisArray(rand(10,2), Axis{:time}(Date(2016,01,01):Day(1):Date(2016,01,10)), Axis{:measure}([:area, :volume]))
2-dimensional AxisArray{Float64,2,...} with axes:
:time, 2016-01-01:1 day:2016-01-10
:measure, Symbol[:area, :volume]
And data, a 10×2 Array{Float64,2}:
0.159434 0.456992
0.344521 0.374623
0.522077 0.313256
0.994697 0.320953
0.95104 0.900526
0.921854 0.729311
0.000922581 0.148822
0.449128 0.761714
0.650277 0.135061
0.688773 0.513845
julia> collapsed = collapse(Val(1), (:price, :size), price_data, size_data)
2-dimensional AxisArray{Float64,2,...} with axes:
:time, 2016-01-01:1 day:2016-01-10
:collapsed, Tuple{Symbol,Vararg{Symbol,N} where N}[(:price,), (:size, :area), (:size, :volume)]
And data, a 10×3 Array{Float64,2}:
0.885014 0.159434 0.456992
0.418562 0.344521 0.374623
0.609344 0.522077 0.313256
0.72221 0.994697 0.320953
0.43656 0.95104 0.900526
0.840304 0.921854 0.729311
0.455337 0.000922581 0.148822
0.65954 0.449128 0.761714
0.393801 0.650277 0.135061
0.260207 0.688773 0.513845
julia> collapsed[Axis{:collapsed}(:size)] == size_data
true
```
"""
@generated function collapse(::Val{N},
::Type{NewArrayType},
labels::NTuple{AN, LType},
As::Vararg{AxisArray, AN}) where {N, AN, LType, NewArrayType<:AbstractArray}
if N < 0
throw(ArgumentError("collapse dimension N must be at least 0"))
end
if N > minimum(ndims.(As))
throw(ArgumentError(
"""
collapse dimension N must not be greater than the maximum number of dimensions
across all input arrays
"""
))
end
collapsed_dim = Val(N + 1)
collapsed_dim_int = Int(N) + 1
common_axes, trailing_axes = zip(_splitall(Val(N), axisparams.(As)...)...)
foreach(_check_common_axes, zip(common_axes...))
new_common_axes = first(common_axes)
collapsed_axis_eltype = _collapsed_axis_eltype(LType, trailing_axes)
collapsed_axis_type = CategoricalVector{collapsed_axis_eltype, Vector{collapsed_axis_eltype}}
new_axes_type = Tuple{new_common_axes..., Axis{:collapsed, collapsed_axis_type}}
new_eltype = Base.promote_eltype(As...)
quote
common_axes, trailing_axes = zip(_splitall(Val(N), axes.(As)...)...)
for common_axis_tuple in zip(common_axes...)
if !isempty(common_axis_tuple)
for common_axis in common_axis_tuple[2:end]
if !all(axisvalues(common_axis) .== axisvalues(common_axis_tuple[1]))
throw(ArgumentError(
"""
Leading common axes must be identical across
all input arrays"""
))
end
end
end
end
array_data = cat(_reshapeall($collapsed_dim, As...)..., dims=$collapsed_dim)
axis_array_type = AxisArray{
$new_eltype,
$collapsed_dim_int,
$NewArrayType,
$new_axes_type
}
new_axes = (
first(common_axes)...,
Axis{:collapsed, $collapsed_axis_type}($collapsed_axis_type(_collapse_axes(labels, trailing_axes))),
)
return axis_array_type(array_data, new_axes)
end
end
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 25497 | # Core types and definitions
using Base: @pure
const Symbols = Tuple{Symbol,Vararg{Symbol}}
# We absolutely need ntuple of small numbers (ndims) to inline, but it might not in Base. Do it here:
@inline function ntuple(f::F, n::Integer) where F
t = n == 0 ? () :
n == 1 ? (f(1),) :
n == 2 ? (f(1), f(2)) :
n == 3 ? (f(1), f(2), f(3)) :
n == 4 ? (f(1), f(2), f(3), f(4)) :
n == 5 ? (f(1), f(2), f(3), f(4), f(5)) :
n == 6 ? (f(1), f(2), f(3), f(4), f(5), f(6)) :
n == 7 ? (f(1), f(2), f(3), f(4), f(5), f(6), f(7)) :
n == 8 ? (f(1), f(2), f(3), f(4), f(5), f(6), f(7), f(8)) :
n == 9 ? (f(1), f(2), f(3), f(4), f(5), f(6), f(7), f(8), f(9)) :
n == 10 ? (f(1), f(2), f(3), f(4), f(5), f(6), f(7), f(8), f(9), f(10)) :
Base._ntuple(f, n)
return t
end
"""
Type-stable axis-specific indexing and identification with a
parametric type.
### Type parameters
```julia
struct Axis{name,T}
```
* `name` : the name of the axis, a Symbol
* `T` : the type of the axis
### Constructors
```julia
Axis{name}(I)
```
### Arguments
* `name` : the axis name Symbol or integer dimension
* `I` : the indexer, any indexing type that the axis supports
### Examples
Here is an example with a Dimensional axis representing a time
sequence along rows and a Categorical axis of Symbols for column
headers.
```julia
A = AxisArray(reshape(1:60, 12, 5), .1:.1:1.2, [:a, :b, :c, :d, :e])
A[Axis{:col}(2)] # grabs the second column
A[Axis{:col}(:b)] # Same as above, grabs column :b (the second column)
A[Axis{:row}(2)] # grabs the second row
A[Axis{2}(2:5)] # grabs the second through 5th columns
```
"""
struct Axis{name,T}
val::T
end
# Constructed exclusively through Axis{:symbol}(...) or Axis{1}(...)
Axis{name}(I::T=()) where {name,T} = Axis{name,T}(I)
Base.:(==)(A::Axis{name}, B::Axis{name}) where {name} = A.val == B.val
Base.hash(A::Axis{name}, hx::UInt) where {name} = hash(A.val, hash(name, hx))
axistype(::Axis{name,T}) where {name,T} = T
axistype(::Type{Axis{name,T}}) where {name,T} = T
# Pass indexing and related functions straight through to the wrapped value
# TODO: should Axis be an AbstractArray? AbstractArray{T,0} for scalar T?
Base.getindex(A::Axis, i...) = A.val[i...]
Base.eltype(::Type{Axis{name,T}}) where {name,T} = eltype(T)
Base.size(A::Axis) = size(A.val)
VERSION < v"0.7.0" && (Base.endof(A::Axis) = length(A))
Base.lastindex(A::Axis) = length(A)
Base.axes(A::Axis) = Base.axes(A.val)
Base.axes(A::Axis, d) = Base.axes(A.val, d)
Base.length(A::Axis) = length(A.val)
(A::Axis{name})(i) where {name} = Axis{name}(i)
Base.convert(::Type{Axis{name,T}}, ax::Axis{name,T}) where {name,T} = ax
Base.convert(::Type{Axis{name,T}}, ax::Axis{name}) where {name,T} = Axis{name}(convert(T, ax.val))
Base.iterate(A::Axis, i...) = Base.iterate(A.val, i...)
Base.IteratorSize(::Type{<:Axis}) = Base.HasShape{1}()
Base.IteratorEltype(::Type{<:Axis}) = Base.HasEltype()
Base.iterate(::Type{T}) where {T<:Axis} = (T, nothing)
Base.iterate(::Type{T}, ::Any) where {T<:Axis} = nothing
Base.first(A::Axis) = first(A.val)
Base.last(A::Axis) = last(A.val)
Base.step(A::Axis{name,<:AbstractRange}) where {name} = step(A.val)
"""
An AxisArray is an AbstractArray that wraps another AbstractArray and
adds axis names and values to each array dimension. AxisArrays can be indexed
by using the named axes as an alternative to positional indexing by
dimension. Other advanced indexing along axis values are also provided.
### Type parameters
The AxisArray contains several type parameters:
```julia
struct AxisArray{T,N,D,Ax} <: AbstractArray{T,N}
```
* `T` : the elemental type of the AbstractArray
* `N` : the number of dimensions
* `D` : the type of the wrapped AbstractArray
* `Ax` : the names and types of the axes, as a (specialized) NTuple{N, Axis}
### Constructors
```julia
AxisArray(A::AbstractArray, axes::Axis...)
AxisArray(A::AbstractArray, names::Symbol...)
AxisArray(A::AbstractArray, vectors::AbstractVector...)
AxisArray(A::AbstractArray, (names...,), (steps...,), [(offsets...,)])
```
### Arguments
* `A::AbstractArray` : the wrapped array data
* `axes` or `names` or `vectors` : dimensional information for the wrapped array
The dimensional information may be passed in one of three ways and is
entirely optional. When the axis name or value is missing for a
dimension, a default is substituted. The default axis names for
dimensions `(1, 2, 3, 4, 5, ...)` are `(:row, :col, :page, :dim_4,
:dim_5, ...)`. The default axis values are `Base.axes(A, d)` for each
missing dimension `d`.
### Indexing
Indexing returns a view into the original data. The returned view is a
new AxisArray that wraps a SubArray. Indexing should be type
stable. Use `Axis{axisname}(idx)` to index based on a specific
axis. `axisname` is a Symbol specifying the axis to index/slice, and
`idx` is a normal indexing object (`Int`, `Array{Int,1}`, etc.) or a
custom indexing type for that particular type of axis.
Two main types of axes supported by default include:
* Categorical axis -- These are vectors of labels, normally Symbols or
strings. Elements or slices can be indexed by elements or vectors
of elements.
* Dimensional axis -- These are sorted vectors or iterators that can
be indexed by `ClosedInterval()`. These are commonly used for sequences of
times or date-times. For regular sample rates, ranges can be used.
User-defined axis types can be added along with custom indexing
behaviors. To add add a custom type as a Categorical or Dimensional
axis, add a trait using [`AxisArrays.axistrait`](@ref).
For more advanced indexing, you can define custom methods for
[`AxisArrays.axisindexes`](@ref).
### Examples
Here is an example with a Dimensional axis representing a time
sequence along rows (it's a FloatRange) and a Categorical axis of
Symbols for column headers.
```julia
A = AxisArray(reshape(1:15, 5, 3), Axis{:time}(.1:.1:0.5), Axis{:col}([:a, :b, :c]))
A[Axis{:time}(1:3)] # equivalent to A[1:3,:]
A[Axis{:time}(ClosedInterval(.2,.4))] # restrict the AxisArray along the time axis
A[ClosedInterval(0.,.3), [:a, :c]] # select an interval and two columns
```
"""
struct AxisArray{T,N,D,Ax} <: AbstractArray{T,N}
data::D # D <:AbstractArray, enforced in constructor to avoid dispatch bugs (https://github.com/JuliaLang/julia/issues/6383)
axes::Ax # Ax<:NTuple{N, Axis}, but with specialized Axis{...} types
AxisArray{T,N,D,Ax}(data::AbstractArray{T,N}, axs::Tuple{Vararg{Axis,N}}) where {T,N,D,Ax} = new{T,N,D,Ax}(data, axs)
end
"""
AxisMatrix{T}
Alias for [`AxisArray{T,2,D,Ax}`](@ref AxisArray).
"""
const AxisMatrix{T,D,Ax} = AxisArray{T,2,D,Ax}
const AxisVector{T,D,Ax} = AxisArray{T,1,D,Ax}
# Helper functions: Default axis names (if not provided)
@inline _defaultdimname(i) = i == 1 ? (:row) : i == 2 ? (:col) : i == 3 ? (:page) : i == 4 ? :dim_4 : Symbol(:dim_, i)
_merge(::Tuple{}, ::Tuple{}) = ()
_merge(array_axes::Tuple{}, given_axes) = given_axes
_merge(array_axes, given_axes::Tuple{}) = array_axes
@inline _merge(array_axes, given_axes) = (given_axes[1], _merge(tail(array_axes), tail(given_axes))...)
@inline _default_axis(ax, i) = Axis{_defaultdimname(i)}(ax)
_default_axis(ax::Axis, i) = ax
"""
default_axes(A::AbstractArray)
default_axes(A::AbstractArray, axs)
Return a tuple of Axis objects that appropriately index into the array A.
The optional second argument can take a tuple of vectors or axes, which will be
wrapped with the appropriate axis name.
"""
@inline function default_axes(A::AbstractArray, given_axs=())
axs = _merge(Base.axes(A), given_axs)
ntuple(i->(Base.@_inline_meta; _default_axis(axs[i], i)), length(axs))
end
# Axis consistency checks — ensure sizes match and the names are unique
@inline checksizes(axs, sz) =
(length(axs[1]) == sz[1]) & checksizes(tail(axs), tail(sz))
checksizes(::Tuple{}, sz) = true
@inline function checknames(name::Symbol, names...)
matches = false
for n in names
matches |= name == n
end
matches && throw(ArgumentError("axis name :$name is used more than once"))
checknames(names...)
end
checknames(name, names...) = throw(ArgumentError("the Axis names must be Symbols"))
checknames() = ()
# The primary AxisArray constructors — specify an array to wrap and the axes
AxisArray(A::AbstractArray, vects::Union{AbstractVector, Axis}...) = AxisArray(A, vects)
AxisArray(A::AbstractArray, vects::Tuple{Vararg{Union{AbstractVector, Axis}}}) = AxisArray(A, default_axes(A, vects))
function AxisArray(A::AbstractArray, vects::Tuple{Vararg{Axis}})
length(vects) > ndims(A) && throw(ArgumentError("too many axes ($(length(vects))) given for a $(ndims(A))-dimensional array"))
AxisArray(A, default_axes(A, vects))
end
function AxisArray(A::D, axs::Ax) where {T,N,D<:AbstractArray{T,N},Ax<:NTuple{N,Axis}}
checksizes(axs, _size(A)) || throw(ArgumentError("the length of each axis must match the corresponding size of data"))
checknames(axisnames(axs...)...)
AxisArray{T,N,D,Ax}(A, axs)
end
# Simple non-type-stable constructors to specify names as symbols
AxisArray(A::AbstractArray) = AxisArray(A, ()) # Disambiguation
AxisArray(A::AbstractArray, names::Symbol...) = (inds = Base.axes(A); AxisArray(A, ntuple(i->Axis{names[i]}(inds[i]), length(names))))
function AxisArray(A::AbstractArray{T,N}, names::NTuple{N,Symbol}, steps::NTuple{N,Number}, offsets::NTuple{N,Number}=map(zero, steps)) where {T,N}
axs = ntuple(i->Axis{names[i]}(range(offsets[i], step=steps[i], length=size(A,i))), N)
AxisArray(A, axs...)
end
# Alternative constructor, takes names as keywords:
AxisArray(A; kw...) = AxisArray(A, nt_to_axes(values(kw)))
@generated nt_to_axes(nt::NamedTuple{names}) where {names} =
Expr(:tuple, (:(Axis{$(QuoteNode(n))}(getfield(nt, $(QuoteNode(n))))) for n in names)...)
AxisArray(A::AxisArray) = A
AxisArray(A::AxisArray, ax::Vararg{Axis, N}) where N =
AxisArray(A.data, ax..., last(Base.IteratorsMD.split(axes(A), Val(N)))...)
AxisArray(A::AxisArray, ax::NTuple{N, Axis}) where N =
AxisArray(A.data, ax..., last(Base.IteratorsMD.split(axes(A), Val(N)))...)
# Traits
struct HasAxes{B} end
HasAxes(::Type{<:AxisArray}) = HasAxes{true}()
HasAxes(::Type{<:AbstractArray}) = HasAxes{false}()
HasAxes(::A) where A<:AbstractArray = HasAxes(A)
"""
axisnames(A::AxisArray) -> (Symbol...)
axisnames(::Type{AxisArray{...}}) -> (Symbol...)
axisnames(ax::Axis...) -> (Symbol...)
axisnames(::Type{Axis{...}}...) -> (Symbol...)
Returns the axis names of an AxisArray or list of Axises as a tuple of Symbols.
"""
axisnames(::AxisArray{T,N,D,Ax}) where {T,N,D,Ax} = _axisnames(Ax)
axisnames(::Type{AxisArray{T,N,D,Ax}}) where {T,N,D,Ax} = _axisnames(Ax)
axisnames(::Type{Ax}) where {Ax<:Tuple{Vararg{Axis}}} = _axisnames(Ax)
@pure _axisnames(Ax) = axisnames(Ax.parameters...)
axisnames() = ()
@inline axisnames(::Axis{name}, B::Axis...) where {name} = tuple(name, axisnames(B...)...)
@inline axisnames(::Type{<:Axis{name}}, B::Type...) where {name} = tuple(name, axisnames(B...)...)
axisname(::Union{Type{<:Axis{name}},Axis{name}}) where {name} = name
# Axis definitions
"""
axisdim(::AxisArray, ::Axis) -> Int
axisdim(::AxisArray, ::Type{Axis}) -> Int
Given an AxisArray and an Axis, return the integer dimension of
the Axis within the array.
"""
axisdim(A::AxisArray, ax::Axis) = axisdim(A, typeof(ax))
axisdim(A::AxisArray, ax::Type{Ax}) where Ax<:Axis = axisdim(typeof(A), Ax)
# The actual computation is done in the type domain, which is a little tricky
# due to type invariance.
@generated function axisdim(::Type{AxisArray{T,N,D,Ax}}, ::Type{<:Axis{name,S} where S}) where {T,N,D,Ax,name}
isa(name, Int) && return name <= N ? name : error("axis $name greater than array dimensionality $N")
names = axisnames(Ax)
idx = findfirst(isequal(name), names)
idx === nothing && error("axis $name not found in array axes $names")
idx
end
# Base definitions that aren't provided by AbstractArray
@inline Base.size(A::AxisArray) = size(A.data)
@inline Base.size(A::AxisArray, Ax::Axis) = size(A.data, axisdim(A, Ax))
@inline Base.size(A::AxisArray, ::Type{Ax}) where {Ax<:Axis} = size(A.data, axisdim(A, Ax))
@inline Base.axes(A::AxisArray) = Base.axes(A.data)
@inline Base.axes(A::AxisArray, Ax::Axis) = Base.axes(A.data, axisdim(A, Ax))
@inline Base.axes(A::AxisArray, ::Type{Ax}) where {Ax<:Axis} = Base.axes(A.data, axisdim(A, Ax))
Base.convert(::Type{Array{T,N}}, A::AxisArray{T,N}) where {T,N} = convert(Array{T,N}, A.data)
Base.parent(A::AxisArray) = A.data
# Similar is tricky. If we're just changing the element type, it can stay as an
# AxisArray. But if we're changing dimensions, there's no way it can know how
# to keep track of the axes, so just punt and return a regular old Array.
# TODO: would it feel more consistent to return an AxisArray without any axes?
Base.similar(A::AxisArray, ::Type{S}) where {S} = (d = similar(A.data, S); AxisArray(d, A.axes))
Base.similar(A::AxisArray, ::Type{S}, dims::Dims{N}) where {S,N} = similar(A.data, S, dims)
# If, however, we pass Axis objects containing the new axis for that dimension,
# we can return a similar AxisArray with an appropriately modified size
Base.similar(A::AxisArray{T}, ax1::Axis, axs::Axis...) where {T} = similar(A, T, (ax1, axs...))
Base.similar(A::AxisArray, ::Type{S}, ax1::Axis, axs::Axis...) where {S} = similar(A, S, (ax1, axs...))
@generated function Base.similar(A::AxisArray{T,N}, ::Type{S}, axs::Tuple{Axis,Vararg{Axis}}) where {T,S,N}
inds = Expr(:tuple)
ax = Expr(:tuple)
for d=1:N
push!(inds.args, :(Base.axes(A, Axis{$d})))
push!(ax.args, :(axes(A, Axis{$d})))
end
to_delete = Int[]
for i=1:length(axs.parameters)
a = axs.parameters[i]
d = axisdim(A, a)
axistype(a) <: Tuple{} && push!(to_delete, d)
inds.args[d] = :(Base.axes(axs[$i].val, 1))
ax.args[d] = :(axs[$i])
end
sort!(to_delete)
deleteat!(inds.args, to_delete)
deleteat!(ax.args, to_delete)
quote
d = similar(A.data, S, $inds)
AxisArray(d, $ax)
end
end
const AxisUnitRange{T,N,D<:AbstractUnitRange,Ax} = AxisArray{T,N,D,Ax}
Base.similar(A::AxisArray{T}, ax1::AxisUnitRange, axs::AxisUnitRange...) where {T} = similar(A, T, (ax1, axs...))
Base.similar(A::AxisArray, ::Type{S}, ax1::AxisUnitRange, axs::AxisUnitRange...) where {S} = similar(A, S, (ax1, axs...))
Base.similar(A::AxisArray, ::Type{S}, axs::Tuple{AxisUnitRange,Vararg{AxisUnitRange}}) where {S} = similar(A, S, map(x->x.axes[1], axs))
# These methods allow us to preserve the AxisArray under reductions
# Note that we only extend the following two methods, and then have it
# dispatch to package-local `reduced_indices` and `reduced_indices0`
# methods. This avoids a whole slew of ambiguities.
Base.reduced_indices(A::AxisArray, region) = map(ax->AxisArray(Base.axes(ax.val, 1), ax), reduced_indices(axes(A), region))
Base.reduced_indices0(A::AxisArray, region) = map(ax->AxisArray(Base.axes(ax.val, 1), ax), reduced_indices0(axes(A), region))
reduced_indices(axs::Tuple{Vararg{Axis}}, ::Tuple{}) = axs
reduced_indices0(axs::Tuple{Vararg{Axis}}, ::Tuple{}) = axs
reduced_indices(axs::Tuple{Vararg{Axis}}, region::Integer) =
reduced_indices(axs, (region,))
reduced_indices0(axs::Tuple{Vararg{Axis}}, region::Integer) =
reduced_indices0(axs, (region,))
reduced_indices(axs::Tuple{Vararg{Axis,N}}, region::Dims) where {N} =
map((ax,d)->d∈region ? reduced_axis(ax) : ax, axs, ntuple(identity, N))
reduced_indices0(axs::Tuple{Vararg{Axis,N}}, region::Dims) where {N} =
map((ax,d)->d∈region ? reduced_axis0(ax) : ax, axs, ntuple(identity, N))
@inline reduced_indices(axs::Tuple{Vararg{Axis}}, region::Type{<:Axis}) =
_reduced_indices(reduced_axis, (), region, axs...)
@inline reduced_indices0(axs::Tuple{Vararg{Axis}}, region::Type{<:Axis}) =
_reduced_indices(reduced_axis0, (), region, axs...)
@inline reduced_indices(axs::Tuple{Vararg{Axis}}, region::Axis) =
_reduced_indices(reduced_axis, (), region, axs...)
@inline reduced_indices0(axs::Tuple{Vararg{Axis}}, region::Axis) =
_reduced_indices(reduced_axis0, (), region, axs...)
reduced_indices(axs::Tuple{Vararg{Axis}}, region::Tuple) =
reduced_indices(reduced_indices(axs, region[1]), tail(region))
reduced_indices(axs::Tuple{Vararg{Axis}}, region::Tuple{Vararg{Axis}}) =
reduced_indices(reduced_indices(axs, region[1]), tail(region))
reduced_indices0(axs::Tuple{Vararg{Axis}}, region::Tuple) =
reduced_indices0(reduced_indices0(axs, region[1]), tail(region))
reduced_indices0(axs::Tuple{Vararg{Axis}}, region::Tuple{Vararg{Axis}}) =
reduced_indices0(reduced_indices0(axs, region[1]), tail(region))
@pure samesym(::Type{Axis{n1}}, ::Type{Axis{n2}}) where {n1,n2} = Val(n1==n2)
samesym(::Type{<:Axis{n1}}, ::Type{<:Axis{n2}}) where {n1,n2} = samesym(Axis{n1},Axis{n2})
samesym(::Type{Axis{n1}}, ::Axis{n2}) where {n1,n2} = samesym(Axis{n1}, Axis{n2})
samesym(::Axis{n1}, ::Type{Axis{n2}}) where {n1,n2} = samesym(Axis{n1}, Axis{n2})
samesym(::Axis{n1}, ::Axis{n2}) where {n1,n2} = samesym(Axis{n1}, Axis{n2})
@inline _reduced_indices(f, out, chosen::Type{<:Axis}, ax::Axis, axs...) =
__reduced_indices(f, out, samesym(chosen, ax), chosen, ax, axs)
@inline _reduced_indices(f, out, chosen::Axis, ax::Axis, axs...) =
__reduced_indices(f, out, samesym(chosen, ax), chosen, ax, axs)
_reduced_indices(f, out, chosen) = out
@inline __reduced_indices(f, out, ::Val{true}, chosen, ax, axs) =
_reduced_indices(f, (out..., f(ax)), chosen, axs...)
@inline __reduced_indices(f, out, ::Val{false}, chosen, ax, axs) =
_reduced_indices(f, (out..., ax), chosen, axs...)
reduced_axis( ax::Axis{name,<:AbstractArray{T}}) where {name,T<:Number} = ax(oftype(ax.val, Base.OneTo(1)))
reduced_axis0(ax::Axis{name,<:AbstractArray{T}}) where {name,T<:Number} = ax(oftype(ax.val, length(ax.val) == 0 ? Base.OneTo(0) : Base.OneTo(1)))
reduced_axis( ax) = ax(Base.OneTo(1))
reduced_axis0(ax) = ax(length(ax.val) == 0 ? Base.OneTo(0) : Base.OneTo(1))
function Base.permutedims(A::AxisArray, perm)
p = permutation(perm, axisnames(A))
AxisArray(permutedims(A.data, p), axes(A)[[p...]])
end
Base.transpose(A::AxisArray{T,2}) where {T} = AxisArray(transpose(A.data), A.axes[2], A.axes[1])
Base.adjoint(A::AxisArray{T,2}) where {T} = AxisArray(adjoint(A.data), A.axes[2], A.axes[1])
Base.transpose(A::AxisArray{T,1}) where {T} = AxisArray(transpose(A.data), Axis{:transpose}(Base.OneTo(1)), A.axes[1])
Base.adjoint(A::AxisArray{T,1}) where {T} = AxisArray(adjoint(A.data), Axis{:transpose}(Base.OneTo(1)), A.axes[1])
Base.map!(f::F, A::AxisArray) where {F} = (map!(f, A.data); A)
Base.map(f, A::AxisArray) = AxisArray(map(f, A.data), A.axes...)
function Base.map!(f::F, dest::AxisArray{T,N,D,Ax}, As::AxisArray{T,N,D,Ax}...) where {F,T,N,D,Ax<:Tuple{Vararg{Axis}}}
matchingdims((dest, As...)) || error("All axes must be identically-valued")
data = map(a -> a.data, As)
map!(f, dest.data, data...)
return dest
end
function Base.map(f, As::AxisArray{T,N,D,Ax}...) where {T,N,D,Ax<:Tuple{Vararg{Axis}}}
matchingdims(As) || error("All axes must be identically-valued")
data = map(a -> a.data, As)
return AxisArray(map(f, data...), As[1].axes...)
end
permutation(to::Union{AbstractVector{Int},Tuple{Int,Vararg{Int}}}, from::Symbols) = to
"""
permutation(to, from) -> p
Calculate the permutation of labels in `from` to produce the order in
`to`. Any entries in `to` that are missing in `from` will receive an
index of 0. Any entries in `from` that are missing in `to` will have
their indices appended to the end of the permutation. Consequently,
the length of `p` is equal to the longer of `to` and `from`.
"""
function permutation(to::Symbols, from::Symbols)
n = length(to)
nf = length(from)
li = eachindex(from)
d = Dict(from[i]=>i for i in li)
covered = Array(falses(length(li)))
ind = Array{Int}(undef, max(n, nf))
for (i,toi) in enumerate(to)
j = get(d, toi, 0)
ind[i] = j
if j != 0
covered[j] = true
end
end
k = n
for i in li
if !covered[i]
d[from[i]] != i && throw(ArgumentError("$(from[i]) is a duplicated argument"))
k += 1
k > nf && throw(ArgumentError("no incomplete containment allowed in $to and $from"))
ind[k] = i
end
end
ind
end
@inline Base.dropdims(A::AxisArray; dims) = _dropdims(A, dims)
function _dropdims(A::AxisArray, dims)
keepdims = setdiff(1:ndims(A), dims)
AxisArray(dropdims(A.data; dims=dims), axes(A)[keepdims])
end
# This version attempts to be type-stable
function _dropdims(A::AxisArray, ::Type{Ax}) where {Ax<:Axis}
dim = axisdim(A, Ax)
AxisArray(dropdims(A.data; dims=dim), dropax(Ax, axes(A)...))
end
@inline dropax(ax, ax1, axs...) = (ax1, dropax(ax, axs...)...)
@inline dropax(ax::Axis{name}, ax1::Axis{name}, axs...) where {name} = dropax(ax, axs...)
@inline dropax(ax::Type{<:Axis{name}}, ax1::Axis{name}, axs...) where {name} = dropax(ax, axs...)
dropax(ax) = ()
# A simple display method to include axis information. It might be nice to
# eventually display the axis labels alongside the data array, but that is
# much more difficult.
function Base.summary(io::IO, A::AxisArray)
_summary(io, A)
for (name, val) in zip(axisnames(A), axisvalues(A))
print(io, " :$name, ")
show(IOContext(io, :limit=>true), val)
println(io)
end
print(io, "And data, a ", summary(A.data))
end
_summary(io, A::AxisArray{T,N}) where {T,N} = println(io, "$N-dimensional AxisArray{$T,$N,...} with axes:")
# Custom methods specific to AxisArrays
"""
axisvalues(A::AxisArray) -> (AbstractVector...)
axisvalues(ax::Axis...) -> (AbstractVector...)
Returns the axis values of an AxisArray or list of Axises as a tuple of vectors.
"""
axisvalues(A::AxisArray) = axisvalues(A.axes...)
axisvalues() = ()
axisvalues(ax::Axis, axs::Axis...) = tuple(ax.val, axisvalues(axs...)...)
"""
axes(A::AxisArray) -> (Axis...)
axes(A::AxisArray, ax::Axis) -> Axis
axes(A::AxisArray, dim::Int) -> Axis
Returns the tuple of axis vectors for an AxisArray. If an specific `Axis` is
specified, then only that axis vector is returned. Note that when extracting a
single axis vector, `axes(A, Axis{1})`) is type-stable and will perform better
than `axes(A)[1]`.
For an AbstractArray without `Axis` information, `axes` returns the
default axes, i.e., those that would be produced by `AxisArray(A)`.
"""
axes(A::AxisArray) = A.axes
axes(A::AxisArray, dim::Int) = A.axes[dim]
axes(A::AxisArray, ax::Axis) = axes(A, typeof(ax))
@generated function axes(A::AxisArray, ax::Type{T}) where T<:Axis
dim = axisdim(A, T)
:(A.axes[$dim])
end
axes(A::AbstractArray) = default_axes(A)
axes(A::AbstractArray, dim::Int) = default_axes(A)[dim]
"""
axisparams(::AxisArray) -> Vararg{::Type{Axis}}
axisparams(::Type{AxisArray}) -> Vararg{::Type{Axis}}
Returns the axis parameters for an AxisArray.
"""
axisparams(::AxisArray{T,N,D,Ax}) where {T,N,D,Ax} = (Ax.parameters...,)
axisparams(::Type{AxisArray{T,N,D,Ax}}) where {T,N,D,Ax} = (Ax.parameters...,)
### Axis traits ###
abstract type AxisTrait end
struct Dimensional <: AxisTrait end
struct Categorical <: AxisTrait end
struct Unsupported <: AxisTrait end
"""
axistrait(ax::Axis) -> Type{<:AxisTrait}
axistrait{T}(::Type{T}) -> Type{<:AxisTrait}
Returns the indexing type of an `Axis`, any subtype of `AxisTrait`.
The default is `Unsupported`, meaning there is no special indexing behaviour for this axis
and indexes into this axis are passed directly to the underlying array.
Two main types of axes supported by default are `Categorical` and `Dimensional`; see
[Indexing](@ref) for more information on these types.
User-defined axis types can be added along with custom indexing behaviors by defining new
methods of this function. Here is the example of adding a custom Dimensional axis:
```julia
AxisArrays.axistrait(::Type{MyCustomAxis}) = AxisArrays.Dimensional
```
"""
axistrait(::T) where {T} = axistrait(T)
axistrait(::Type{T}) where {T} = Unsupported
axistrait(::Type{Axis{name,T}}) where {name,T} = axistrait(T)
axistrait(::Type{T}) where {T<:AbstractVector} = _axistrait_el(eltype(T))
_axistrait_el(::Type{<:Union{Number, Dates.AbstractTime}}) = Dimensional
_axistrait_el(::Type{<:Union{Symbol, AbstractString}}) = Categorical
_axistrait_el(::Type{T}) where {T} = Categorical
checkaxis(ax::Axis) = checkaxis(ax.val)
checkaxis(ax) = checkaxis(axistrait(ax), ax)
checkaxis(::Type{Unsupported}, ax) = nothing # TODO: warn or error?
# Dimensional axes must be monotonically increasing
checkaxis(::Type{Dimensional}, ax) = issorted(ax) || throw(ArgumentError("Dimensional axes must be monotonically increasing"))
# Categorical axes must simply be unique
function checkaxis(::Type{Categorical}, ax)
seen = Set{eltype(ax)}()
for elt in ax
if elt in seen
throw(ArgumentError("Categorical axes must be unique"))
end
push!(seen, elt)
end
end
_length(A::AbstractArray) = length(eachindex(A))
_length(A) = length(A)
_size(A::AbstractArray) = map(length, Base.axes(A))
_size(A) = size(A)
_size(A::AbstractArray, d) = length(Base.axes(A, d))
_size(A, d) = size(A, d)
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 18672 | const Idx = Union{Real,Colon,AbstractArray{Int}}
using Base: ViewIndex, @propagate_inbounds, tail
abstract type Value{T} end
struct TolValue{T} <: Value{T}
val::T
tol::T
end
TolValue(x, tol=Base.rtoldefault(typeof(x))*abs(x)) = TolValue(promote(x,tol)...)
struct ExactValue{T} <: Value{T}
val::T
end
atvalue(x::Number; rtol=Base.rtoldefault(typeof(x)), atol=zero(x)) = TolValue(x, atol+rtol*abs(x))
atvalue(x) = ExactValue(x)
const Values = AbstractArray{<:Value}
# For throwing a BoundsError with a Value index, we need to define the following
# (note that we could inherit them for free, were Value <: Number)
Base.iterate(x::Value, state = false) = state ? nothing : (x, true)
# Values have the indexing trait of their wrapped type
_axistrait_el(::Type{<:Value{T}}) where {T} = _axistrait_el(T)
# How to show Value objects (e.g. in a BoundsError)
Base.show(io::IO, v::TolValue) =
print(io, string("TolValue(", v.val, ", tol=", v.tol, ")"))
Base.show(io::IO, v::ExactValue) = print(io, string("ExactValue(", v.val, ")"))
# Defer IndexStyle to the wrapped array
Base.IndexStyle(::Type{AxisArray{T,N,D,Ax}}) where {T,N,D,Ax} = IndexStyle(D)
# Simple scalar indexing where we just set or return scalars
@propagate_inbounds Base.getindex(A::AxisArray, idxs::Int...) = A.data[idxs...]
@propagate_inbounds Base.setindex!(A::AxisArray, v, idxs::Int...) = (A.data[idxs...] = v)
# Cartesian iteration
Base.eachindex(A::AxisArray) = eachindex(A.data)
# Avoid an ambiguity -- [email protected] takes .. from EllipsisNotation,
# which defines A[..] for any AbstractArray, like this:
Base.getindex(A::AxisArray, ::typeof(..)) = A
"""
reaxis(A::AxisArray, I...)
This internal function determines the new set of axes that are constructed upon
indexing with I.
"""
reaxis(A::AxisArray, I::Idx...) = _reaxis(make_axes_match(axes(A), I), I)
# Linear indexing
reaxis(A::AxisArray{<:Any,1}, I::AbstractArray{Int}) = _new_axes(A.axes[1], I)
reaxis(A::AxisArray, I::AbstractArray{Int}) = default_axes(I)
reaxis(A::AxisArray{<:Any,1}, I::Real) = ()
reaxis(A::AxisArray, I::Real) = ()
reaxis(A::AxisArray{<:Any,1}, I::Colon) = _new_axes(A.axes[1], Base.axes(A, 1))
reaxis(A::AxisArray, I::Colon) = default_axes(Base.OneTo(length(A)))
reaxis(A::AxisArray{<:Any,1}, I::AbstractArray{Bool}) = _new_axes(A.axes[1], findall(I))
reaxis(A::AxisArray, I::AbstractArray{Bool}) = default_axes(findall(I))
# Ensure the number of axes matches the number of indexing dimensions
@inline function make_axes_match(axs, idxs)
nidxs = Base.index_ndims(idxs...)
ntuple(i->(Base.@_inline_meta; _default_axis(i > length(axs) ? Base.OneTo(1) : axs[i], i)), length(nidxs))
end
# Now we can reaxis without worrying about mismatched axes/indices
@inline _reaxis(axs::Tuple{}, idxs::Tuple{}) = ()
# Scalars are dropped
const ScalarIndex = Union{Real, AbstractArray{<:Any, 0}}
@inline _reaxis(axs::Tuple, idxs::Tuple{ScalarIndex, Vararg{Any}}) = _reaxis(tail(axs), tail(idxs))
# Colon passes straight through
@inline _reaxis(axs::Tuple, idxs::Tuple{Colon, Vararg{Any}}) = (axs[1], _reaxis(tail(axs), tail(idxs))...)
# But arrays can add or change dimensions and accompanying axis names
@inline _reaxis(axs::Tuple, idxs::Tuple{AbstractArray, Vararg{Any}}) =
(_new_axes(axs[1], idxs[1])..., _reaxis(tail(axs), tail(idxs))...)
# Vectors simply create new axes with the same name; just subsetted by their value
@inline _new_axes(ax::Axis{name}, idx::AbstractVector) where {name} = (Axis{name}(ax.val[idx]),)
# Arrays create multiple axes with _N appended to the axis name containing their indices
@generated function _new_axes(ax::Axis{name}, idx::AbstractArray{<:Any,N}) where {name,N}
newaxes = Expr(:tuple)
for i=1:N
push!(newaxes.args, :($(Axis{Symbol(name, "_", i)})(Base.axes(idx, $i))))
end
newaxes
end
# And indexing with an AxisArray joins the name and overrides the values
@generated function _new_axes(ax::Axis{name}, idx::AxisArray{<:Any, N}) where {name,N}
newaxes = Expr(:tuple)
idxnames = axisnames(idx)
for i=1:N
push!(newaxes.args, :($(Axis{Symbol(name, "_", idxnames[i])})(idx.axes[$i].val)))
end
newaxes
end
@propagate_inbounds function Base.getindex(A::AxisArray, idxs::Idx...)
AxisArray(A.data[idxs...], reaxis(A, idxs...))
end
# To resolve ambiguities, we need several definitions
using Base: AbstractCartesianIndex
@propagate_inbounds Base.view(A::AxisArray, idxs::Idx...) = AxisArray(view(A.data, idxs...), reaxis(A, idxs...))
# Setindex is so much simpler. Just assign it to the data:
@propagate_inbounds Base.setindex!(A::AxisArray, v, idxs::Idx...) = (A.data[idxs...] = v)
# Logical indexing
@propagate_inbounds function Base.getindex(A::AxisArray, idx::AbstractArray{Bool})
AxisArray(A.data[idx], reaxis(A, idx))
end
@propagate_inbounds Base.setindex!(A::AxisArray, v, idx::AbstractArray{Bool}) = (A.data[idx] = v)
### Fancier indexing capabilities provided only by AxisArrays ###
# To avoid StackOverflowErrors on indexes that we don't know how to convert, we
# give AxisArrays once chance to convert into known format and then defer to the parent
@propagate_inbounds Base.getindex(A::AxisArray, idxs...) = getindex_converted(A, to_index(A,idxs...)...)
@propagate_inbounds Base.setindex!(A::AxisArray, v, idxs...) = setindex!_converted(A, v, to_index(A,idxs...)...)
# Deal with lots of ambiguities here
@propagate_inbounds Base.view(A::AxisArray, idxs::ViewIndex...) = view(A, to_index(A,idxs...)...)
@propagate_inbounds Base.view(A::AxisArray, idxs::Union{ViewIndex,AbstractCartesianIndex}...) = view(A, to_index(A,Base.IteratorsMD.flatten(idxs)...)...)
@propagate_inbounds Base.view(A::AxisArray, idxs...) = view(A, to_index(A,idxs...)...)
@propagate_inbounds getindex_converted(A, idxs::Idx...) = A[idxs...]
@propagate_inbounds setindex!_converted(A, v, idxs::Idx...) = (A[idxs...] = v)
@propagate_inbounds getindex_converted(A, idxs...) = A.data[idxs...]
@propagate_inbounds setindex!_converted(A, v, idxs...) = (A.data[idxs...] = v)
# First is indexing by named axis. We simply sort the axes and re-dispatch.
# When indexing by named axis the shapes of omitted dimensions are preserved
# TODO: should we handle multidimensional Axis indexes? It could be interpreted
# as adding dimensions in the middle of an AxisArray.
# TODO: should we allow repeated axes? As a union of indices of the duplicates?
@generated function to_index(A::AxisArray{T,N,D,Ax}, I::Axis...) where {T,N,D,Ax}
dims = Int[axisdim(A, ax) for ax in I]
idxs = Expr[:(Colon()) for d = 1:N]
names = axisnames(A)
for i=1:length(dims)
idxs[dims[i]] == :(Colon()) ||
return :(throw(ArgumentError(string("multiple indices provided ",
"on axis ", $(string(names[dims[i]]))))))
idxs[dims[i]] = :(I[$i].val)
end
meta = Expr(:meta, :inline)
return :($meta; to_index(A, $(idxs...)))
end
function Base.reshape(A::AxisArray, ::Val{N}) where N
axN, _ = Base.IteratorsMD.split(axes(A), Val(N))
AxisArray(reshape(A.data, Val(N)), Base.front(axN))
end
# Keyword indexing, reconstructs the Axis{}() objects
@propagate_inbounds Base.view(A::AxisArray; kw...) =
view(A, kw_to_axes(parent(A), values(kw))...)
@propagate_inbounds Base.getindex(A::AxisArray; kw...) =
getindex(A, kw_to_axes(parent(A), values(kw))...)
@propagate_inbounds Base.setindex!(A::AxisArray, val; kw...) =
setindex!(A, val, kw_to_axes(parent(A), values(kw))...)
function kw_to_axes(A::AbstractArray, nt::NamedTuple)
length(nt) == 0 && throw(BoundsError(A, ())) # Trivial case A[] lands here
nt_to_axes(nt)
end
### Indexing along values of the axes ###
# Default axes indexing throws an error
"""
axisindexes(ax::Axis, axis_idx) -> array_idx
axisindexes(::Type{<:AxisTrait}, axis_values, axis_idx) -> array_idx
Translate an index into an axis into an index into the underlying array.
Users can add additional indexing behaviours for custom axes or custom indices by adding
methods to this function.
## Examples
Add a method for indexing into an `Axis{name, SortedSet}`:
```julia
AxisArrays.axisindexes(::Type{Categorical}, ax::SortedSet, idx::AbstractVector) = findin(collect(ax), idx)
```
Add a method for indexing into a `Categorical` axis with a `SortedSet`:
```julia
AxisArrays.axisindexes(::Type{Categorical}, ax::AbstractVector, idx::SortedSet) = findin(ax, idx)
```
"""
axisindexes(ax, idx) = axisindexes(axistrait(ax.val), ax.val, idx)
axisindexes(::Type{Unsupported}, ax, idx) = error("elementwise indexing is not supported for axes of type $(typeof(ax))")
axisindexes(t, ax, idx) = error("cannot index $(typeof(ax)) with $(typeof(idx)); expected $(eltype(ax)) axis value or Integer index")
# Dimensional axes may be indexed directly by their elements if Non-Real and unique
# Maybe extend error message to all <: Numbers if Base allows it?
axisindexes(::Type{Dimensional}, ax::AbstractVector, idx::Real) =
throw(ArgumentError("invalid index: $idx. Use `atvalue` when indexing by value."))
function axisindexes(::Type{Dimensional}, ax::AbstractVector{T}, idx::T) where T
idxs = searchsorted(ax, ClosedInterval(idx,idx))
length(idxs) > 1 && error("more than one datapoint lies on axis value $idx; use an interval to return all values")
if length(idxs) == 1
idxs[1]
else
throw(BoundsError(ax, idx))
end
end
function axisindexes(::Type{Dimensional}, ax::AbstractVector, idx::Axis)
idxs = searchsorted(ax, idx.val)
length(idxs) > 1 && error("more than one datapoint lies on axis value $idx; use an interval to return all values")
if length(idxs) == 1
idxs[1]
else
throw(BoundsError(ax, idx))
end
end
# Dimensional axes may always be indexed by value if in a Value type wrapper.
function axisindexes(::Type{Dimensional}, ax::AbstractVector, idx::TolValue)
idxs = searchsorted(ax, ClosedInterval(idx.val,idx.val))
length(idxs) > 1 && error("more than one datapoint lies on axis value $idx; use an interval to return all values")
if length(idxs) == 1
idxs[1]
else # it's zero
last(idxs) > 0 && abs(ax[last(idxs)] - idx.val) < idx.tol && return last(idxs)
first(idxs) <= length(ax) && abs(ax[first(idxs)] - idx.val) < idx.tol && return first(idxs)
throw(BoundsError(ax, idx))
end
end
function axisindexes(::Type{Dimensional}, ax::AbstractVector, idx::ExactValue)
idxs = searchsorted(ax, ClosedInterval(idx.val,idx.val))
length(idxs) > 1 && error("more than one datapoint lies on axis value $idx; use an interval to return all values")
if length(idxs) == 1
idxs[1]
else # it's zero
throw(BoundsError(ax, idx))
end
end
# For index types that AxisArrays doesn't know about
axisindexes(::Type{Dimensional}, ax::AbstractVector, idx) = idx
# Dimensional axes may be indexed by intervals to select a range
axisindexes(::Type{Dimensional}, ax::AbstractVector, idx::ClosedInterval) = searchsorted(ax, idx)
# Or repeated intervals, which only work if the axis is a range since otherwise
# there will be a non-constant number of indices in each repetition.
#
# There are a number of challenges here:
# * This operation adds a dimension to the result; rows represent the interval
# (or subset) and columns are offsets (or repetition). A RepeatedRangeMatrix
# represents the resulting matrix of indices very nicely.
# * We also want the returned matrix to keep track of its axes; the axis
# subset (ax_sub) is the relative location of the interval with respect to
# each offset, and the repetitions (ax_rep) is the array of offsets.
# * We are interested in the resulting *addition* of the interval against the
# offsets. Either the offsets or the interval may independently be out of
# bounds prior to this addition. Even worse: the interval may have different
# units than the axis (e.g., `(Day(-1)..Day(1)) + dates` for a three-day
# span around dates of interest over a Date axis).
# * It is possible (and likely!) that neither the interval endpoints nor the
# offsets fall exactly upon an axis value. Or even worse: the some offsets
# when added to the interval could span more elements than others (the
# fencepost problem). As such, we need to be careful about how and when we
# snap the provided intervals and offsets to exact axis values (and indices).
#
# To avoid the fencepost problems and to define the axes, we convert the
# interval to a UnitRange of relative indices and the array of offsets to an
# array of absolute indices (independently of each other). Exactly how we do so
# must be carefully considered.
#
# Note that this is fundamentally different than indexing by a single interval;
# whereas those intervals are specified in the same units as the elements of the
# axis itself, these intervals are specified in terms of _offsets_. At the same
# time, we want `A[interval] == vec(A[interval + [0]])`. To make these
# computations as similar as possible, we use a phony range of the form
# `step(ax):step(ax):step(ax)` in order to search for the interval.
phony_range(r::AbstractRange) = step(r):step(r):step(r)
phony_range(r::AbstractUnitRange) = step(r):step(r)
phony_range(r::StepRangeLen) = StepRangeLen(r.step, r.step, 1)
function relativewindow(r::AbstractRange, x::ClosedInterval)
pr = phony_range(r)
idxs = Extrapolated.searchsorted(pr, x)
vals = Extrapolated.getindex(pr, idxs)
return (idxs, vals)
end
axisindexes(::Type{Dimensional}, ax::AbstractVector, idx::RepeatedInterval) = error("repeated intervals might select a varying number of elements for non-range axes; use a repeated Range of indices instead")
function axisindexes(::Type{Dimensional}, ax::AbstractRange, idx::RepeatedInterval)
idxs, vals = relativewindow(ax, idx.window)
offsets = [Extrapolated.searchsortednearest(ax, offset) for offset in idx.offsets]
AxisArray(RepeatedRangeMatrix(idxs, offsets), Axis{:sub}(vals), Axis{:rep}(Extrapolated.getindex(ax, offsets)))
end
# We also have special datatypes to represent intervals about indices
axisindexes(::Type{Dimensional}, ax::AbstractVector, idx::IntervalAtIndex) = searchsorted(ax, idx.window + ax[idx.index])
function axisindexes(::Type{Dimensional}, ax::AbstractRange, idx::IntervalAtIndex)
idxs, vals = relativewindow(ax, idx.window)
AxisArray(idxs .+ idx.index, Axis{:sub}(vals))
end
axisindexes(::Type{Dimensional}, ax::AbstractVector, idx::RepeatedIntervalAtIndexes) = error("repeated intervals might select a varying number of elements for non-range axes; use a repeated Range of indices instead")
function axisindexes(::Type{Dimensional}, ax::AbstractRange,
idx::RepeatedIntervalAtIndexes)
idxs, vals = relativewindow(ax, idx.window)
AxisArray(RepeatedRangeMatrix(idxs, idx.indexes), Axis{:sub}(vals), Axis{:rep}(ax[idx.indexes]))
end
# Categorical axes may be indexed by their elements
function axisindexes(::Type{Categorical}, ax::AbstractVector, idx)
i = findfirst(isequal(idx), ax)
i === nothing && throw(ArgumentError("index $idx not found"))
i
end
function axisindexes(::Type{Categorical}, ax::AbstractVector, idx::Value)
val = idx.val
i = findfirst(isequal(val), ax)
i === nothing && throw(ArgumentError("index $val not found"))
i
end
# Categorical axes may be indexed by a vector of their elements
function axisindexes(::Type{Categorical}, ax::AbstractVector, idx::AbstractVector)
res = findall(in(idx), ax)
length(res) == length(idx) || throw(ArgumentError("index $(setdiff(idx,ax)) not found"))
res
end
# Creates *instances* of axis traits for a set of axes.
# TODO: Transition axistrait() to return trait instances in line with common
# practice in Base and other packages.
#
# This function is a utility tool to ensure that `axistrait` is only called
# from outside the generated function below. (If not, we can get world age
# errors.)
_axistraits(ax1, rest...) = (axistrait(ax1)(), _axistraits(rest...)...)
_axistraits() = ()
# This catch-all method attempts to convert any axis-specific non-standard
# indexing types to their integer or integer range equivalents using axisindexes
# It is separate from the `Base.getindex` function to allow reuse between
# set- and get- index.
@inline to_index(A::AxisArray, I...) = _to_index(A, _axistraits(I...), I...)
@generated function _to_index(A::AxisArray{T,N,D,Ax}, axtraits, I...) where {T,N,D,Ax}
ex = Expr(:tuple)
n = 0
axtrait_types = axtraits.parameters
for i=1:length(I)
if axtrait_types[i] <: Categorical && i <= length(Ax.parameters)
if I[i] <: Axis
push!(ex.args, :(axisindexes(A.axes[$i], I[$i].val)))
else
push!(ex.args, :(axisindexes(A.axes[$i], I[$i])))
end
n += 1
continue
end
if I[i] <: Idx
push!(ex.args, :(I[$i]))
n += 1
elseif I[i] <: AbstractArray{Bool}
push!(ex.args, :(findall(I[$i])))
n += 1
elseif I[i] <: Values
push!(ex.args, :(axisindexes.(Ref(A.axes[$i]), I[$i])))
n += 1
elseif I[i] <: CartesianIndex
for j = 1:length(I[i])
push!(ex.args, :(I[$i][$j]))
end
n += length(I[i])
elseif i <= length(Ax.parameters)
if I[i] <: Axis
push!(ex.args, :(axisindexes(A.axes[$i], I[$i].val)))
else
push!(ex.args, :(axisindexes(A.axes[$i], I[$i])))
end
n += 1
else
push!(ex.args, :(error("dimension ", $i, " does not have an axis to index")))
end
end
for _=n+1:N
push!(ex.args, :(Colon()))
end
meta = Expr(:meta, :inline)
return :($meta; $ex)
end
## Extracting the full axis (name + values) from the Axis{:name} type
@inline Base.getindex(A::AxisArray, ::Type{Ax}) where {Ax<:Axis} = getaxis(Ax, axes(A)...)
@inline getaxis(::Type{Ax}, ax::Ax, axs...) where {Ax<:Axis} = ax
@inline getaxis(::Type{Ax}, ax::Axis, axs...) where {Ax<:Axis} = getaxis(Ax, axs...)
@noinline getaxis(::Type{Ax}) where {Ax<:Axis} = throw(ArgumentError("no axis of type $Ax was found"))
# Boundschecking specialization: defer to the data array.
# Note that we could unwrap AxisArrays when they are used as indices into other
# arrays within Base's to_index/to_indices methods, but that requires a bigger
# refactor to merge our to_index method with Base's.
@inline Base.checkindex(::Type{Bool}, inds::AbstractUnitRange, A::AxisArray) = Base.checkindex(Bool, inds, A.data)
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 5009 | # Promotion rules for "promiscuous" types like Intervals and SIUnits, which both
# simply wrap any Number, are often ambiguous. That is, which type should "win"
# -- is the promotion between an SIUnit and an ClosedInterval an SIQuantity{ClosedInterval}
# or is it an ClosedInterval{SIQuantity}? For our uses in AxisArrays, though, we can
# sidestep this problem by making Intervals *not* a subtype of Number. Then in
# order for them to plug into the promotion system, we *extend* the promoting
# operator behaviors to Union{Number, ClosedInterval}. This way other types can
# similarly define their own extensions to the promoting operators without fear
# of ambiguity -- there will simply be, e.g.,
#
# f(x::Number, y::Number) = f(promote(x,y)...) # in base
# f(x::Union{Number, ClosedInterval}, y::Union{Number, ClosedInterval}) = f(promote(x,y)...)
# f(x::Union{Number, T}, y::Union{Number, T}) = f(promote(x,y)...)
#
# In this way, these "promiscuous" types will never interact unless explicitly
# made subtypes of Number or otherwise defined with knowledge of eachother. The
# downside is that Intervals are not as useful as they could be; they really
# could be considered as <: Number themselves. We do this in general for any
# supported Scalar:
const Scalar = Union{Number, Dates.AbstractTime}
Base.promote_rule(::Type{ClosedInterval{T}}, ::Type{T}) where {T<:Scalar} = ClosedInterval{T}
Base.promote_rule(::Type{ClosedInterval{T}}, ::Type{S}) where {T,S<:Scalar} = ClosedInterval{promote_type(T,S)}
import Base: isless, <=, >=, ==, +, -, *, /, ^, //
# TODO: Is this a total ordering? (antisymmetric, transitive, total)?
isless(a::ClosedInterval, b::ClosedInterval) = isless(a.right, b.left)
# The default definition for <= assumes a strict total order (<=(x,y) = !(y < x))
<=(a::ClosedInterval, b::ClosedInterval) = a.left <= b.left && a.right <= b.right
+(a::ClosedInterval) = a
+(a::ClosedInterval, b::ClosedInterval) = ClosedInterval(a.left + b.left, a.right + b.right)
-(a::ClosedInterval) = ClosedInterval(-a.right, -a.left)
-(a::ClosedInterval, b::ClosedInterval) = a + (-b)
for f in (:(*), :(/), :(//))
# For a general monotonic operator, we compute the operation over all
# combinations of the endpoints and return the widest interval
@eval function $(f)(a::ClosedInterval, b::ClosedInterval)
w = $(f)(a.left, b.left)
x = $(f)(a.left, b.right)
y = $(f)(a.right, b.left)
z = $(f)(a.right, b.right)
ClosedInterval(min(w,x,y,z), max(w,x,y,z))
end
end
# Extend the promoting operators to include Intervals. The comparison operators
# (<, <=, and ==) are a pain since they are non-promoting fallbacks that call
# isless, !(y < x) (which is wrong), and ===. So implementing promotion with
# Union{T, ClosedInterval} causes stack overflows for the base types. This is safer:
for f in (:isless, :(<=), :(>=), :(==), :(+), :(-), :(*), :(/), :(//))
# We don't use promote here, though, because promotions can be lossy... and
# that's particularly bad for comparisons. Just make an interval instead.
@eval $(f)(x::ClosedInterval, y::Scalar) = $(f)(x, y..y)
@eval $(f)(x::Scalar, y::ClosedInterval) = $(f)(x..x, y)
end
# And, finally, we have an Array-of-Structs to Struct-of-Arrays transform for
# the common case where the interval is constant over many offsets:
struct RepeatedInterval{T,S,A} <: AbstractVector{T}
window::ClosedInterval{S}
offsets::A # A <: AbstractVector
end
RepeatedInterval(window::ClosedInterval{S}, offsets::A) where {S,A<:AbstractVector} =
RepeatedInterval{promote_type(ClosedInterval{S}, eltype(A)), S, A}(window, offsets)
Base.size(r::RepeatedInterval) = size(r.offsets)
Base.IndexStyle(::Type{<:RepeatedInterval}) = IndexLinear()
Base.getindex(r::RepeatedInterval, i::Int) = r.window + r.offsets[i]
+(window::ClosedInterval, offsets::AbstractVector) = RepeatedInterval(window, offsets)
+(offsets::AbstractVector, window::ClosedInterval) = RepeatedInterval(window, offsets)
-(window::ClosedInterval, offsets::AbstractVector) = RepeatedInterval(window, -offsets)
-(offsets::AbstractVector, window::ClosedInterval) = RepeatedInterval(-window, offsets)
# As a special extension to intervals, we allow specifying Intervals about a
# particular index, which is resolved by an axis upon indexing.
struct IntervalAtIndex{T}
window::ClosedInterval{T}
index::Int
end
atindex(window::ClosedInterval, index::Integer) = IntervalAtIndex(window, index)
# And similarly, an AoS -> SoA transform:
struct RepeatedIntervalAtIndexes{T,A<:AbstractVector{Int}} <: AbstractVector{IntervalAtIndex{T}}
window::ClosedInterval{T}
indexes::A # A <: AbstractVector{Int}
end
atindex(window::ClosedInterval, indexes::AbstractVector) = RepeatedIntervalAtIndexes(window, indexes)
Base.size(r::RepeatedIntervalAtIndexes) = size(r.indexes)
Base.IndexStyle(::Type{<:RepeatedIntervalAtIndexes}) = IndexLinear()
Base.getindex(r::RepeatedIntervalAtIndexes, i::Int) = IntervalAtIndex(r.window, r.indexes[i])
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 3958 | # Additions to Base's searchsorted functionality
"""
searchsortednearest(vec::AbstractVector, x)
Like `searchsortedfirst` or `searchsortedlast`, this returns the the index of
the element in the sorted vector `vec` whose value is closest to `x`, rounding
up. If there are multiple elements that are equally close to `x`, this will
return the first index if `x` is less than or equal to those in the vector or
the last index if `x` is greater.
"""
function searchsortednearest(vec::AbstractVector, x)
idx = searchsortedfirst(vec, x) # Returns the first idx | vec[idx] >= x
if idx > 1 && (idx > length(vec) || (vec[idx] - x) > (x - vec[idx-1]))
idx -= 1 # The previous element is closer
end
return idx
end
# Base only specializes searching ranges by Numbers; so optimize for Intervals
function Base.searchsorted(a::AbstractRange, I::ClosedInterval)
searchsortedfirst(a, I.left):searchsortedlast(a, I.right)
end
"""
The internal `Extrapolated` module contains implementations for indexing and
searching into ranges beyond their bounds. The `@inbounds` macro is not
sufficient since it can be turned off by `--check-bounds=yes`.
"""
module Extrapolated
using IntervalSets: ClosedInterval
function searchsortednearest(vec::AbstractRange, x)
idx = searchsortedfirst(vec, x) # Returns the first idx | vec[idx] >= x
if (getindex(vec, idx) - x) > (x - getindex(vec, idx-1))
idx -= 1 # The previous element is closer
end
return idx
end
"""
searchsorted(a::AbstractRange, I::ClosedInterval)
Return the indices of the range that fall within an interval without checking
bounds, possibly extrapolating outside the range if needed.
"""
function searchsorted(a::AbstractRange, I::ClosedInterval)
searchsortedfirst(a, I.left):searchsortedlast(a, I.right)
end
# When running with `--check-bounds=yes` (like on Travis), the bounds-check isn't elided
@inline function getindex(v::AbstractRange{T}, i::Integer) where T
convert(T, first(v) + (i-1)*step(v))
end
@inline function getindex(r::AbstractRange, s::AbstractRange{<:Integer})
f = first(r)
st = oftype(f, f + (first(s)-1)*step(r))
range(st, step=step(r)*step(s), length=length(s))
end
getindex(r::AbstractRange, I::Array) = [getindex(r, i) for i in I]
@inline getindex(r::StepRangeLen, i::Integer) = Base.unsafe_getindex(r, i)
@inline function getindex(r::StepRangeLen, s::AbstractUnitRange)
soffset = 1 + (r.offset - first(s))
soffset = clamp(soffset, 1, length(s))
ioffset = first(s) + (soffset-1)
if ioffset == r.offset
StepRangeLen(r.ref, r.step, length(s), max(1,soffset))
else
StepRangeLen(r.ref + (ioffset-r.offset)*r.step, r.step, length(s), max(1,soffset))
end
end
function searchsortedlast(a::AbstractRange, x)
step(a) == 0 && throw(ArgumentError("ranges with a zero step are unsupported"))
n = round(Integer,(x-first(a))/step(a))+1
isless(x, getindex(a, n)) ? n-1 : n
end
function searchsortedfirst(a::AbstractRange, x)
step(a) == 0 && throw(ArgumentError("ranges with a zero step are unsupported"))
n = round(Integer,(x-first(a))/step(a))+1
isless(getindex(a, n), x) ? n+1 : n
end
function searchsortedlast(a::AbstractRange{<:Integer}, x)
step(a) == 0 && throw(ArgumentError("ranges with a zero step are unsupported"))
fld(floor(Integer,x)-first(a),step(a))+1
end
function searchsortedfirst(a::AbstractRange{<:Integer}, x)
step(a) == 0 && throw(ArgumentError("ranges with a zero step are unsupported"))
-fld(floor(Integer,-x)+first(a),step(a))+1
end
function searchsortedfirst(a::AbstractRange{<:Integer}, x::Unsigned)
step(a) == 0 && throw(ArgumentError("ranges with a zero step are unsupported"))
-fld(first(a)-signed(x),step(a))+1
end
function searchsortedlast(a::AbstractRange{<:Integer}, x::Unsigned)
step(a) == 0 && throw(ArgumentError("ranges with a zero step are unsupported"))
fld(signed(x)-first(a),step(a))+1
end
end
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 3627 |
export SortedVector
"""
A SortedVector is an AbstractVector where the underlying data is
ordered (monotonically increasing).
Indexing that would unsort the data is prohibited. A SortedVector is a
Dimensional axis, and no checking is done to ensure that the data is
sorted. Duplicate values are allowed.
A SortedVector axis can be indexed with an ClosedInterval, with a value, or
with a vector of values. Use of a SortedVector{Tuple} axis allows
indexing similar to the hierarchical index of the Python Pandas
package or the R data.table package.
### Constructors
```julia
SortedVector(x::AbstractVector)
```
### Keyword Arguments
* `x::AbstractVector` : the wrapped vector
### Examples
```julia
v = SortedVector(collect([1.; 10.; 10:15.]))
A = AxisArray(reshape(1:16, 8, 2), v, [:a, :b])
A[ClosedInterval(8.,12.), :]
A[1., :]
A[10., :]
## Hierarchical index example with three key levels
data = reshape(1.:40., 20, 2)
v = collect(zip([:a, :b, :c][rand(1:3,20)], [:x,:y][rand(1:2,20)], [:x,:y][rand(1:2,20)]))
idx = sortperm(v)
A = AxisArray(data[idx,:], SortedVector(v[idx]), [:a, :b])
A[:b, :]
A[[:a,:c], :]
A[(:a,:x), :]
A[(:a,:x,:x), :]
A[ClosedInterval(:a,:b), :]
A[ClosedInterval((:a,:x),(:b,:x)), :]
```
"""
struct SortedVector{T} <: AbstractVector{T}
data::AbstractVector{T}
end
Base.getindex(v::SortedVector, idx::Int) = v.data[idx]
Base.getindex(v::SortedVector, idx::UnitRange) = SortedVector(v.data[idx])
Base.getindex(v::SortedVector, idx::StepRange) =
step(idx) > 0 ? SortedVector(v.data[idx]) : error("step must be positive to index a SortedVector")
Base.getindex(v::SortedVector, idx::AbstractVector) =
issorted(idx) ? SortedVector(v.data[idx]) : error("index must be monotonically increasing to index a SortedVector")
Base.length(v::SortedVector) = length(v.data)
Base.size(v::SortedVector) = size(v.data)
Base.size(v::SortedVector, i) = size(v.data, i)
Base.axes(v::SortedVector) = Base.axes(v.data)
axistrait(::Type{<:SortedVector}) = Dimensional
checkaxis(::SortedVector) = nothing
## Add some special indexing for SortedVector{Tuple}'s to achieve something like
## Panda's hierarchical indexing
axisindexes(ax::Axis{S,SortedVector{T}}, idx) where {T<:Tuple,S} =
searchsorted(ax.val, idx, 1, length(ax.val), Base.ord(_isless,identity,false,Base.Forward))
axisindexes(ax::Axis{S,SortedVector{T}}, idx::AbstractArray) where {T<:Tuple,S} =
vcat([axisindexes(ax, i) for i in idx]...)
## Use a modification of `isless`, so that (:a,) is not less than (:a, :b).
## This allows for more natural indexing.
_isless(x,y) = isless(x,y)
function _isless(t1::Tuple, t2::Tuple)
n1, n2 = length(t1), length(t2)
for i = 1:min(n1, n2)
a, b = t1[i], t2[i]
if !isequal(a, b)
return _isless(a, b)
end
end
return false
end
# Additionally, we allow comparing scalars against tuples, which enables
# indexing by the first scalar in the tuple
_isless(t1::Tuple, t2) = _isless(t1,(t2,))
_isless(t1, t2::Tuple) = _isless((t1,),t2)
# And then we add special comparisons to Intervals, because by default they
# only define comparisons against Numbers and Dates. We're able to do this on
# our own local function... doing this directly on isless itself would be
# fraught with trouble.
_isless(a::ClosedInterval, b::ClosedInterval) = _isless(a.right, b.left)
_isless(t1::ClosedInterval, t2::Tuple) = _isless(t1, ClosedInterval(t2,t2))
_isless(t1::Tuple, t2::ClosedInterval) = _isless(ClosedInterval(t1,t1), t2)
_isless(a::ClosedInterval, b) = _isless(a, ClosedInterval(b,b))
_isless(a, b::ClosedInterval) = _isless(ClosedInterval(a,a), b)
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 1715 | # Test CategoricalVector with a hierarchical index (indexed using Tuples)
data = reshape(1.:40., 20, 2)
# v = collect(zip([:a, :b, :c][rand(1:3,20)], [:x,:y][rand(1:2,20)], [:x,:y][rand(1:2,20)]))
v = [(:b, :x, :y), (:c, :y, :y), (:b, :x, :y), (:a, :y, :y), (:b, :y, :y),
(:c, :y, :y), (:b, :x, :x), (:c, :x, :y), (:c, :y, :y), (:a, :y, :y),
(:a, :y, :y), (:b, :x, :y), (:c, :x, :y), (:c, :y, :y), (:b, :x, :y),
(:a, :x, :x), (:c, :x, :x), (:c, :y, :y), (:b, :y, :x), (:b, :y, :y)]
idx = sortperm(v)
A = AxisArray(data[idx,:], AxisArrays.CategoricalVector(v[idx]), [:a, :b])
@test A[:b, :] == A[5:12, :]
@test A[[:a,:c], :] == A[[1:4;13:end], :]
@test A[(:a,:y), :] == A[2:4, :]
@test A[(:c,:y,:y), :] == A[16:end, :]
@test AxisArrays.axistrait(AxisArrays.axes(A)[1]) <: AxisArrays.Categorical
v = AxisArrays.CategoricalVector(collect([1; 8; 10:15]))
@test size(v) == (8,)
@test size(v, 1) == 8
@test size(v, 2) == 1
@test AxisArrays.axistrait(AxisArrays.axes(A)[1]) <: AxisArrays.Categorical
A = AxisArray(reshape(1:16, 8, 2), v, [:a, :b])
@test A[Axis{:row}(AxisArrays.CategoricalVector([15]))] == AxisArray(reshape(A.data[8, :], 1, 2), AxisArrays.CategoricalVector([15]), [:a, :b])
@test A[Axis{:row}(AxisArrays.CategoricalVector([15])), 1] == AxisArray([A.data[8, 1]], AxisArrays.CategoricalVector([15]))
@test A[atvalue(15), :] == AxisArray(A.data[8, :], [:a, :b])
@test A[atvalue(15), 1] == 8
@test AxisArrays.axistrait(AxisArrays.axes(A)[1]) <: AxisArrays.Categorical
# TODO: maybe make this work? Would require removing or modifying Base.getindex(A::AxisArray, idxs::Idx...)
# @test A[AxisArrays.CategoricalVector([15]), 1] == AxisArray([A.data[8, 1]], AxisArrays.CategoricalVector([15]))
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 4744 | # cat
A1data, A2data = [1 3; 2 4], [5 7; 6 8]
A1 = AxisArray(A1data, Axis{:Row}([:First, :Second]), Axis{:Col}([:A, :B]))
A2 = AxisArray(A2data, Axis{:Row}([:Third, :Fourth]), Axis{:Col}([:A, :B]))
@test isa(cat(A1, A2, dims=1), AxisArray)
@test cat(A1, A2, dims=1) == AxisArray(vcat(A1data, A2data),
Axis{:Row}([:First, :Second, :Third, :Fourth]), Axis{:Col}([:A, :B]))
A2 = AxisArray(A2data, Axis{:Row}([:First, :Second]), Axis{:Col}([:C, :D]))
@test isa(cat(A1, A2, dims=2), AxisArray)
@test cat(A1, A2, dims=2) == AxisArray(hcat(A1data, A2data),
Axis{:Row}([:First, :Second]), Axis{:Col}([:A, :B, :C, :D]))
A2 = AxisArray(A2data, Axis{:Row}([:First, :Second]), Axis{:Col}([:A, :B]))
@test isa(cat(A1, A2, dims=3), AxisArray)
@test cat(A1, A2, dims=3) == AxisArray(cat(A1data, A2data, dims=3),
Axis{:Row}([:First, :Second]), Axis{:Col}([:A, :B]),
Axis{:page}(1:2))
A1 = AxisArray(A1data, :Row, :Col)
A2 = AxisArray(A2data, :Row, :Col)
@test_throws ArgumentError cat(A1, A2, dims=2)
@test cat(A1, A2, dims=3) == AxisArray(cat(A1data, A2data, dims=3), :Row, :Col)
# merge
Adata, Bdata, Cdata = randn(4,4,2), randn(4,4,2), randn(4,4,2)
A = AxisArray(Adata, Axis{:X}([1,2,3,4]), Axis{:Y}([10.,20,30,40]), Axis{:Z}([:First, :Second]))
B = AxisArray(Bdata, Axis{:X}([3,4,5,6]), Axis{:Y}([30.,40,50,60]), Axis{:Z}([:First, :Second]))
ABdata = zeros(6,6,2)
ABdata[1:4,1:4,:] = Adata
ABdata[3:6,3:6,:] = Bdata
@test merge(A,B) == AxisArray(ABdata, Axis{:X}([1,2,3,4,5,6]), Axis{:Y}([10.,20,30,40,50,60]), Axis{:Z}([:First, :Second]))
@test_throws ErrorException AxisArrays.axismerge(:notouter, Axis{:X}([1,2,3,4]), Axis{:X}([1,2,3,4]))
AC = AxisArray(cat(Adata, Cdata, dims=3), :X, :Y, :Z)
B2 = AxisArray(Bdata, :X, :Y, :Z)
@test merge(AC,B2) == AxisArray(cat(Bdata, Cdata, dims=3), :X, :Y, :Z)
# join
ABdata = zeros(6,6,2,2)
ABdata[1:4,1:4,:,1] = Adata
ABdata[3:6,3:6,:,2] = Bdata
@test join(A,B) == AxisArray(ABdata, Axis{:X}([1,2,3,4,5,6]), Axis{:Y}([10.,20,30,40,50,60]), Axis{:Z}([:First, :Second]))
@test join(A,B, newaxis=Axis{:JoinAxis}([:A, :B])) == AxisArray(ABdata, Axis{:X}([1,2,3,4,5,6]), Axis{:Y}([10.,20,30,40,50,60]), Axis{:Z}([:First, :Second]), Axis{:JoinAxis}([:A, :B]))
@test join(A,B,method=:inner) == AxisArray(ABdata[3:4, 3:4, :, :], Axis{:X}([3,4]), Axis{:Y}([30.,40]), Axis{:Z}([:First, :Second]))
@test join(A,B,method=:left) == AxisArray(ABdata[1:4, 1:4, :, :], A.axes...)
@test join(A,B,method=:right) == AxisArray(ABdata[3:6, 3:6, :, :], B.axes...)
@test join(A,B,method=:outer) == join(A,B)
# collapse
A1 = AxisArray(A1data, Axis{:X}(1:2), Axis{:Y}(1:2))
A2 = AxisArray(reshape(A2data, size(A2data)..., 1), Axis{:X}(1:2), Axis{:Y}(1:2), Axis{:Z}([:foo]))
@test @inferred(collapse(Val(2), A1, A2)) == AxisArray(cat(A1data, A2data, dims=3), Axis{:X}(1:2), Axis{:Y}(1:2), Axis{:collapsed}(AxisArrays.CategoricalVector([(1,), (2, :foo)])))
@test @inferred(collapse(Val(2), A1)) == AxisArray(reshape(A1, 2, 2, 1), Axis{:X}(1:2), Axis{:Y}(1:2), Axis{:collapsed}(AxisArrays.CategoricalVector([(1,)])))
@test @inferred(collapse(Val(2), A1)) == AxisArray(reshape(A1.data, size(A1)..., 1), AxisArrays.axes(A1)..., Axis{:collapsed}(AxisArrays.CategoricalVector([(1,)])))
@test @inferred(collapse(Val(2), (:A1, :A2), A1, A2)) == AxisArray(cat(A1data, A2data, dims=3), Axis{:X}(1:2), Axis{:Y}(1:2), Axis{:collapsed}(AxisArrays.CategoricalVector([(:A1,), (:A2, :foo)])))
@test @inferred(collapse(Val(2), (:foo,), A1)) == AxisArray(reshape(A1, 2, 2, 1), Axis{:X}(1:2), Axis{:Y}(1:2), Axis{:collapsed}(AxisArrays.CategoricalVector([(:foo,)])))
@test @inferred(collapse(Val(2), (:a,), A1)) == AxisArray(reshape(A1.data, size(A1)..., 1), AxisArrays.axes(A1)..., Axis{:collapsed}(AxisArrays.CategoricalVector([(:a,)])))
@test @inferred(collapse(Val(0), A1)) == AxisArray(vec(A1data), Axis{:collapsed}(AxisArrays.CategoricalVector(vec(collect(Iterators.product((1,), axisvalues(A1)...))))))
@test @inferred(collapse(Val(1), A1)) == AxisArray(A1data, Axis{:row}(1:2), Axis{:collapsed}(AxisArrays.CategoricalVector(vec(collect(Iterators.product((1,), axisvalues(A1)[2]))))))
@test @inferred(collapse(Val(1), (1,), A1)) == collapse(Val(1), A1)
@test @inferred(collapse(Val(1), Array{Int, 2}, A1)) == collapse(Val(1), A1)
@test @inferred(collapse(Val(1), Array{Int, 2}, (1,), A1)) == collapse(Val(1), A1)
@test_throws ArgumentError collapse(Val(-1), A1)
@test_throws ArgumentError collapse(Val(10), A1)
A1ᵀ = transpose(A1)
@test_throws ArgumentError collapse(Val(-1), A1, A1ᵀ)
@test_throws ArgumentError collapse(Val(1), A1, A1ᵀ)
@test_throws ArgumentError collapse(Val(10), A1, A1ᵀ)
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 14249 | macro maybe_test_broken(brokenif, args...)
if VERSION < v"1"
return quote
@test_broken $(esc(args...))
end
else
return quote
if $(esc(brokenif))
@test_broken $(esc(args...))
else
@test $(esc(args...))
end
end
end
end
# FIXME: type stability broken. The following should NOT error
A = @inferred(AxisArray(reshape(1:24, 2,3,4), .1:.1:.2, .1:.1:.3, .1:.1:.4))
@test_throws ArgumentError AxisArray(reshape(1:24, 2,3,4), .1:.1:.1, .1:.1:.3, .1:.1:.4)
@test_throws ArgumentError AxisArray(reshape(1:24, 2,3,4), .1:.1:.1, .1:.1:.3)
@test_throws ArgumentError AxisArray(reshape(1:24, 2,3,4), .1:.1:.2, .1:.1:.3, .1:.1:.4, 1:1)
@test parent(A) === reshape(1:24, 2,3,4)
# Test iteration
for (a,b) in zip(A, A.data)
@test a == b
end
# Cartesian indexing
for idx in eachindex(A)
@test A[idx] == A.data[idx]
end
# Conversion and similar
@test Array(A) == A.data
@test reshape(A, length(A)) == A.data[:]
B = similar(A, Float64)
for i in eachindex(A)
B[i] = A[i]
end
@test A == B
for i=1:length(A)
@test float(A[i]) === B[i]
end
C = similar(A, 0)
@test isa(C, Array{Int,1})
@test C == []
D = similar(A)
@test size(A) == size(D)
@test eltype(A) == eltype(D)
# permutedims and transpose
@test axisnames(permutedims(A, (2,1,3))) == (:col, :row, :page)
@test axisnames(permutedims(A, (2,3,1))) == (:col, :page, :row)
@test axisnames(permutedims(A, (3,2,1))) == (:page, :col, :row)
@test axisnames(permutedims(A, (3,1,2))) == (:page, :row, :col)
for perm in ((:col, :row, :page), (:col, :page, :row),
(:page, :col, :row), (:page, :row, :col),
(:row, :page, :col), (:row, :col, :page))
@test axisnames(permutedims(A, perm)) == perm
end
@test axisnames(permutedims(A, (:col,))) == (:col, :row, :page)
@test axisnames(permutedims(A, (:page,))) == (:page, :row, :col)
A2 = @inferred(AxisArray(reshape(1:15, 3, 5)))
A1 = AxisArray(1:5, :t)
for f in (transpose, adjoint)
@test f(A2).data == f(A2.data)
@test axisnames(f(A2)) == (:col, :row)
@test f(A1).data == f(A1.data)
@test axisnames(f(A1)) == (:transpose, :t)
end
# Test modifying a particular axis
E = similar(A, Float64, Axis{:col}(1:2))
@test size(E) == (2,2,4)
@test eltype(E) == Float64
F = similar(A, Axis{:row}())
@test size(F) == size(A)[2:end]
@test eltype(F) == eltype(A)
@test axisvalues(F) == axisvalues(A)[2:end]
@test axisnames(F) == axisnames(A)[2:end]
G = similar(A, Float64)
@test size(G) == size(A)
@test eltype(G) == Float64
@test axisvalues(A) == axisvalues(G)
@test axisnames(A) == axisnames(G)
H = similar(A, 1,1,1)
@test size(H) == (1,1,1)
@test eltype(H) == eltype(A)
@test typeof(H) <: Array
H = similar(A, Float64, 1,1,1)
@test size(H) == (1,1,1)
@test eltype(H) == Float64
@test typeof(H) <: Array
# Size
@test size(A, 1) == size(A, Axis{1}) == size(A, Axis{:row}) == size(A, Axis{:row}())
## Test constructors
# No axis or time args
A = AxisArray(1:3)
@test A.data == 1:3
@test axisnames(A) == (:row,)
@inferred(axisnames(A))
@test axisvalues(A) == (1:3,)
A = AxisArray(reshape(1:16, 2,2,2,2))
@test A.data == reshape(1:16, 2,2,2,2)
@test axisnames(A) == (:row,:col,:page,:dim_4)
@inferred(axisnames(A))
@test axisvalues(A) == (1:2, 1:2, 1:2, 1:2)
# Just axis names
A = AxisArray(1:3, :a)
@test A.data == 1:3
@test axisnames(A) == (:a,)
@inferred(axisnames(A))
@test axisvalues(A) == (1:3,)
A = AxisArray([1 3; 2 4], :a)
@test A.data == [1 3; 2 4]
@test axisnames(A) == (:a, :col)
@inferred(axisnames(A))
@test axisvalues(A) == (1:2, 1:2)
# Just axis values
A = @inferred(AxisArray(1:3, .1:.1:.3))
@test A.data == 1:3
@test axisnames(A) == (:row,)
@inferred(axisnames(A))
@test axisvalues(A) == (.1:.1:.3,)
# FIXME: reintroduce inferred
A = @inferred(AxisArray(reshape(1:16, 2,2,2,2), .5:.5:1))
@test A.data == reshape(1:16, 2,2,2,2)
@test axisnames(A) == (:row,:col,:page,:dim_4)
@inferred(axisnames(A))
@test axisvalues(A) == (.5:.5:1, 1:2, 1:2, 1:2)
A = AxisArray([0]', :x, :y)
@test axisnames(dropdims(A, dims=1)) == (:y,)
@test axisnames(dropdims(A, dims=2)) == (:x,)
@test axisnames(dropdims(A, dims=(1,2))) == axisnames(dropdims(A, dims=(2,1))) == ()
@test axisnames((dropdims(A, dims=Axis{:x}))) == (:y,)
@test axisnames((dropdims(A, dims=Axis{:x,UnitRange{Int}}))) == (:y,)
@test axisnames((dropdims(A, dims=Axis{:y}))) == (:x,)
@test axisnames((dropdims(dropdims(A, dims=Axis{:x}), dims=Axis{:y}))) == ()
@test_broken @inferred(dropdims(A, dims=Axis{:x}))
@test_broken @inferred(dropdims(A, dims=Axis{:x,UnitRange{Int}}))
@test_broken @inferred(dropdims(A, dims=Axis{:y}))
@test_broken @inferred(dropdims(dropdims(A, dims=Axis{:x}), dims=Axis{:y}))
# Names, steps, and offsets
B = AxisArray([1 4; 2 5; 3 6], (:x, :y), (0.2, 100))
@test axisnames(B) == (:x, :y)
@test axisvalues(B) == (0:0.2:0.4, 0:100:100)
B = AxisArray([1 4; 2 5; 3 6], (:x, :y), (0.2, 100), (-3,14))
@test axisnames(B) == (:x, :y)
@test axisvalues(B) == (-3:0.2:-2.6, 14:100:114)
# Keyword constructor
C = AxisArray([1 4; 2 5; 3 6], x=10:10:30, y=[:a, :b])
@test axisnames(C) == (:x, :y)
@test axisvalues(C) == (10:10:30, [:a, :b])
@test @inferred(AxisArray(parent(C), x=1:3, y=1:2)) isa AxisArray
@test AxisArrays.HasAxes(A) == AxisArrays.HasAxes{true}()
@test AxisArrays.HasAxes([1]) == AxisArrays.HasAxes{false}()
@test_throws ArgumentError AxisArray(reshape(1:24, 2,3,4),
Axis{1}(.1:.1:.2),
Axis{2}(1//10:1//10:3//10),
Axis{3}(["a", "b", "c", "d"])) # Axis need to be symbols
@test_throws ArgumentError AxisArray(reshape(1:24, 2,3,4),
Axis{:x}(.1:.1:.2),
Axis{:y}(1//10:1//10:3//10),
Axis{:z}(["a", "b", "c", "d"]),
Axis{:_}(1:1)) # Too many Axes
A = @inferred(AxisArray(reshape(1:24, 2,3,4),
Axis{:x}(.1:.1:.2),
Axis{:y}(1//10:1//10:3//10),
Axis{:z}(["a", "b", "c", "d"])))
# recursive constructor
@test A === @inferred AxisArray(A)
@test axisnames(AxisArray(A, Axis{:yoyo}(1:length(A[Axis{:x}])))) == (:yoyo, :y, :z)
@test AxisArray(A, Axis{:yoyo}(1:length(A[Axis{:x}]))).data === A.data
@test AxisArray(A, (Axis{:yoyo}(1:length(A[Axis{:x}])),)).data === A.data
@test axisnames(AxisArray(A, :something, :in, :the)) == (:something, :in, :the)
@test AxisArray(A, :way, :you, :move).data === A.data
@test axisnames(AxisArray(A, (:c, :a, :b), (2, 3, 4))) == (:c, :a, :b)
@test AxisArray(A, (:c, :a, :b), (2, 3, 4)).data === A.data
@inferred AxisArray(A, Axis{:yoyo}(1:length(A[Axis{:x}])))
@inferred AxisArray(A, (Axis{:yoyo}(1:length(A[Axis{:x}])),))
# Test axisdim
@test axisdim(A, Axis{:x}) == axisdim(A, Axis{:x}()) == 1
@test axisdim(A, Axis{:y}) == axisdim(A, Axis{:y}()) == 2
@test axisdim(A, Axis{:z}) == axisdim(A, Axis{:z}()) == 3
# Test axes
@test @inferred(AxisArrays.axes(A)) == (Axis{:x}(.1:.1:.2), Axis{:y}(1//10:1//10:3//10), Axis{:z}(["a", "b", "c", "d"]))
@test @inferred(AxisArrays.axes(A, Axis{:x})) == @inferred(AxisArrays.axes(A, Axis{:x}())) == Axis{:x}(.1:.1:.2)
@test @inferred(AxisArrays.axes(A, Axis{:y})) == @inferred(AxisArrays.axes(A, Axis{:y}())) == Axis{:y}(1//10:1//10:3//10)
@test @inferred(AxisArrays.axes(A, Axis{:z})) == @inferred(AxisArrays.axes(A, Axis{:z}())) == Axis{:z}(["a", "b", "c", "d"])
@test AxisArrays.axes(A, 2) == Axis{:y}(1//10:1//10:3//10)
Aplain = rand(2,3)
@test @inferred(AxisArrays.axes(Aplain)) === AxisArrays.axes(AxisArray(Aplain))
@test AxisArrays.axes(Aplain, 1) === AxisArrays.axes(AxisArray(Aplain))[1]
@test AxisArrays.axes(Aplain, 2) === AxisArrays.axes(AxisArray(Aplain))[2]
@test Axis{:col}(1) == Axis{:col}(1)
@test Axis{:col}(1) != Axis{:com}(1)
@test Axis{:x}(1:3) == Axis{:x}(Base.OneTo(3))
@test hash(Axis{:col}(1)) == hash(Axis{:col}(1.0))
@test hash(Axis{:row}()) != hash(Axis{:col}())
@test hash(Axis{:x}(1:3)) == hash(Axis{:x}(Base.OneTo(3)))
@test AxisArrays.axistype(Axis{1}(1:2)) == typeof(1:2)
@test AxisArrays.axistype(Axis{1,UInt32}) == UInt32
@test axisnames(Axis{1}, Axis{2}, Axis{3}) == (1,2,3)
@test Axis{:row}(2:7)[4] == 5
@test eltype(Axis{:row}(1.0:1.0:3.0)) == Float64
@test size(Axis{:row}(2:7)) === (6,)
T = A[AxisArrays.Axis{:x}]
@test T[end] == 0.2
@test Base.axes(Axis{:row}(2:7)) === (Base.OneTo(6),)
@test Base.axes(Axis{:row}(-1:1), 1) === Base.OneTo(3)
@test length(Axis{:col}(-1:2)) === 4
@test AxisArrays.axisname(Axis{:foo}(1:2)) == :foo
@test AxisArrays.axisname(Axis{:foo}) == :foo
@test first(Axis{:row}(1:3)) == 1
@test step(Axis{:row}(1:3)) == 1
@test last(Axis{:row}(1:3)) == 3
# Test Timetype axis construction
dt, vals = DateTime(2010, 1, 2, 3, 40), randn(5,2)
A = @inferred(AxisArray(vals, Axis{:Timestamp}(dt-Dates.Hour(2):Dates.Hour(1):dt+Dates.Hour(2)), Axis{:Cols}([:A, :B])))
@test A[:, :A].data == vals[:, 1]
@test A[dt, :].data == vals[3, :]
@test AxisArrays.axistrait(A.axes[1]) == AxisArrays.Dimensional
@test AxisArrays.axistrait(typeof(A.axes[1])) == AxisArrays.Dimensional
@test AxisArrays.axistrait(A.axes[1].val) == AxisArrays.Dimensional
@test AxisArrays.axistrait(typeof(A.axes[1].val)) == AxisArrays.Dimensional
@test AxisArrays.axistrait(A.axes[2]) == AxisArrays.Categorical
@test AxisArrays.axistrait(typeof(A.axes[2])) == AxisArrays.Categorical
@test AxisArrays.axistrait(A.axes[2].val) == AxisArrays.Categorical
@test AxisArrays.axistrait(typeof(A.axes[2].val)) == AxisArrays.Categorical
@test_throws ArgumentError AxisArrays.checkaxis(Axis{:x}(10:-1:1))
@test_throws ArgumentError AxisArrays.checkaxis(10:-1:1)
# Simply run the display method to ensure no stupid errors
show(IOBuffer(),MIME("text/plain"),A)
# With unconventional indices
import OffsetArrays # import rather than using because OffsetArrays has a deprecation for ..
A = AxisArray(OffsetArrays.OffsetArray([5,3,4], -1:1), :x)
@test AxisArrays.axes(A) == (Axis{:x}(-1:1),)
@test A[-1] == 5
A[0] = 12
@test A.data[0] == 12
@test Base.axes(A) == Base.axes(A.data)
@test LinearIndices(A) == LinearIndices(A.data)
A = AxisArray(OffsetArrays.OffsetArray(rand(4,5), -1:2, 5:9), :x, :y)
@test Base.axes(A) == Base.axes(A.data)
@test LinearIndices(A) == LinearIndices(A.data)
@test AxisArrays.matchingdims((A, A))
f1(x) = x < 0
A2 = map(f1, A)
@test isa(A2, AxisArray)
@test A2.axes == A.axes
@test A2.data == map(f1, A.data)
map!(~, A2, A2)
@test isa(A2, AxisArray)
@test A2.axes == A.axes
@test A2.data == map(~, map(f1, A).data)
A2 = map(+, A, A)
@test isa(A2, AxisArray)
@test A2.axes == A.axes
@test A2.data == A.data .+ A.data
map!(*, A2, A, A)
@test isa(A2, AxisArray)
@test A2.axes == A.axes
@test A2.data == A.data .* A.data
# Reductions (issue #55)
A = AxisArray(collect(reshape(1:15,3,5)), :y, :x)
B = @inferred(AxisArray(collect(reshape(1:15,3,5)), Axis{:y}(0.1:0.1:0.3), Axis{:x}(10:10:50)))
arrays = (A, B)
functions = (sum, minimum)
for C in arrays
local C
for op in functions # together, cover both reduced_indices and reduced_indices0
axv = axisvalues(C)
@maybe_test_broken (op === minimum) @inferred(op(C; dims=1)) isa AxisArray
C1 = op(C; dims=1)
@test typeof(C1) == typeof(C)
@test axisnames(C1) == (:y,:x)
@test axisvalues(C1) === (oftype(axv[1], Base.OneTo(1)), axv[2])
@maybe_test_broken (op === minimum) @inferred(op(C, dims=2)) isa AxisArray
C2 = op(C, dims=2)
@test typeof(C2) == typeof(C)
@test axisnames(C2) == (:y,:x)
@test axisvalues(C2) === (axv[1], oftype(axv[2], Base.OneTo(1)))
@maybe_test_broken (op === minimum) @inferred(op(C, dims=(1,2))) isa AxisArray
C12 = op(C, dims=(1,2))
@test typeof(C12) == typeof(C)
@test axisnames(C12) == (:y,:x)
@test axisvalues(C12) === (oftype(axv[1], Base.OneTo(1)), oftype(axv[2], Base.OneTo(1)))
if op == sum
@test C1 == [6 15 24 33 42]
@test C2 == reshape([35,40,45], 3, 1)
@test C12 == reshape([120], 1, 1)
else
@test C1 == [1 4 7 10 13]
@test C2 == reshape([1,2,3], 3, 1)
@test C12 == reshape([1], 1, 1)
end
# TODO: add @inferred
@test (op(C, dims=Axis{:y})) == C1
@test (op(C, dims=Axis{:x})) == C2
@test (op(C, dims=(Axis{:y},Axis{:x}))) == C12
@test (op(C, dims=Axis{:y}())) == C1
@test (op(C, dims=Axis{:x}())) == C2
@test (op(C, dims=(Axis{:y}(),Axis{:x}()))) == C12
end
end
function typeof_noaxis(::AxisArray{T,N,D}) where {T,N,D}
AxisArray{T,N,D}
end
# uninferrable
C = AxisArray(collect(reshape(1:15,3,5)), Axis{:y}([:a,:b,:c]), Axis{:x}(["a","b","c","d","e"]))
for op in functions # together, cover both reduced_indices and reduced_indices0
axv = axisvalues(C)
C1 = op(C, dims=1)
@test typeof_noaxis(C1) == typeof_noaxis(C)
@test axisnames(C1) == (:y,:x)
@test axisvalues(C1) === (Base.OneTo(1), axv[2])
C2 = op(C, dims=2)
@test typeof_noaxis(C2) == typeof_noaxis(C)
@test axisnames(C2) == (:y,:x)
@test axisvalues(C2) === (axv[1], Base.OneTo(1))
C12 = op(C, dims=(1,2))
@test typeof_noaxis(C12) == typeof_noaxis(C)
@test axisnames(C12) == (:y,:x)
@test axisvalues(C12) === (Base.OneTo(1), Base.OneTo(1))
if op == sum
@test C1 == [6 15 24 33 42]
@test C2 == reshape([35,40,45], 3, 1)
@test C12 == reshape([120], 1, 1)
else
@test C1 == [1 4 7 10 13]
@test C2 == reshape([1,2,3], 3, 1)
@test C12 == reshape([1], 1, 1)
end
# TODO: These should be @inferred, but are currently broken
@test (op(C, dims=Axis{:y})) == C1
@test (op(C, dims=Axis{:x})) == C2
# Unfortunately the type of (Axis{:y},Axis{:x}) is Tuple{UnionAll,UnionAll} so methods will not specialize
@test (op(C, dims=(Axis{:y},Axis{:x}))) == C12
@test (op(C, dims=Axis{:y}())) == C1
@test (op(C, dims=Axis{:x}())) == C2
@test (op(C, dims=(Axis{:y}(),Axis{:x}()))) == C12
end
C = AxisArray(collect(reshape(1:15,3,5)), Axis{:y}([:a,:b,:c]), Axis{:x}(["a","b","c","d","e"]))
@test occursin(r"axes:\n\s+:y,", summary(C))
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 15317 | A = AxisArray(reshape(1:24, 2,3,4), .1:.1:.2, .1:.1:.3, .1:.1:.4)
D = similar(A)
D[1,1,1,1,1] = 10
@test @inferred(D[1,1,1,1,1]) == @inferred(D[1]) == D.data[1] == 10
@test @inferred(D[1,1,1,:]) == @inferred(D[1,1,1,1:1]) == @inferred(D[1,1,1,[1]]) == AxisArray([10], Axis{:dim_4}(Base.OneTo(1)))
# Test slices
@test A == A.data
@test A[:,:,:] == A[Axis{:row}(:)] == A[Axis{:col}(:)] == A[Axis{:page}(:)] == A.data[:,:,:]
# Test UnitRange slices
@test @inferred(A[1:2,:,:]) == A.data[1:2,:,:] == @inferred(A[Axis{:row}(1:2)]) == @inferred(A[Axis{1}(1:2)]) == @inferred(A[Axis{:row}(ClosedInterval(-Inf,Inf))]) == @inferred(A[[true,true],:,:])
@test @inferred(view(A,1:2,:,:)) == A.data[1:2,:,:] == @inferred(view(A,Axis{:row}(1:2))) == @inferred(view(A,Axis{1}(1:2))) == @inferred(view(A,Axis{:row}(ClosedInterval(-Inf,Inf)))) == @inferred(view(A,[true,true],:,:))
@test @inferred(A[:,1:2,:]) == A.data[:,1:2,:] == @inferred(A[Axis{:col}(1:2)]) == @inferred(A[Axis{2}(1:2)]) == @inferred(A[Axis{:col}(ClosedInterval(0.0, .25))]) == @inferred(A[:,[true,true,false],:])
@test @view(A[:,1:2,:]) == A.data[:,1:2,:] == @view(A[Axis{:col}(1:2)]) == @view(A[Axis{2}(1:2)]) == @view(A[Axis{:col}(ClosedInterval(0.0, .25))]) == @view(A[:,[true,true,false],:])
@test A[:,:,1:2] == A.data[:,:,1:2] == A[Axis{:page}(1:2)] == A[Axis{3}(1:2)] == A[Axis{:page}(ClosedInterval(-1., .22))] == A[:,:,[true,true,false,false]]
@test @view(A[:,:,1:2]) == @view(A.data[:,:,1:2]) == @view(A[Axis{:page}(1:2)]) == @view(A[Axis{3}(1:2)]) == @view(A[Axis{:page}(ClosedInterval(-1., .22))]) == @view(A[:,:,[true,true,false,false]])
# Test scalar slices
@test A[2,:,:] == A.data[2,:,:] == A[Axis{:row}(2)]
@test A[:,2,:] == A.data[:,2,:] == A[Axis{:col}(2)]
@test A[:,:,2] == A.data[:,:,2] == A[Axis{:page}(2)]
# Test fallback methods
@test A[[1 2; 3 4]] == @view(A[[1 2; 3 4]]) == A.data[[1 2; 3 4]]
VERSION >= v"1.0.0-rc" && @test_throws BoundsError A[]
# Test axis restrictions
@test A[:,:,:].axes == A.axes
@test A[Axis{:row}(1:2)].axes[1].val == A.axes[1].val[1:2]
@test A[Axis{:row}(1:2)].axes[2:3] == A.axes[2:3]
@test A[Axis{:col}(1:2)].axes[2].val == A.axes[2].val[1:2]
@test A[Axis{:col}(1:2)].axes[[1,3]] == A.axes[[1,3]]
@test A[Axis{:page}(1:2)].axes[3].val == A.axes[3].val[1:2]
@test A[Axis{:page}(1:2)].axes[1:2] == A.axes[1:2]
# Linear indexing across multiple dimensions drops tracking of those dims
@test A[:].axes[1].val == 1:length(A)
B2 = reshape(A, Val(2))
B = B2[1:2,:]
@test B.axes[1].val == A.axes[1].val[1:2]
@test B.axes[2].val == 1:Base.trailingsize(A,2)
# Logical indexing
all_inds = collect(1:length(A))
odd_inds = collect(1:2:length(A))
@test @inferred(A[trues(size(A))]) == A[:] == A[all_inds]
@test AxisArrays.axes(A[trues(size(A))]) == AxisArrays.axes(A[all_inds])
@test @inferred(A[isodd.(A)]) == A[1:2:length(A)] == A[odd_inds]
@test AxisArrays.axes(A[isodd.(A)]) == AxisArrays.axes(A[odd_inds])
@test @inferred(A[vec(trues(size(A)))]) == A[:] == A[all_inds]
@test AxisArrays.axes(A[vec(trues(size(A)))]) == AxisArrays.axes(A[all_inds])
@test @inferred(A[vec(isodd.(A))]) == A[1:2:length(A)] == A[odd_inds]
@test AxisArrays.axes(A[vec(isodd.(A))]) == AxisArrays.axes(A[odd_inds])
B = AxisArray(reshape(1:15, 5,3), .1:.1:0.5, [:a, :b, :c])
# Test indexing by Intervals
@test B[ClosedInterval(0.0, 0.5), :] == B[ClosedInterval(0.0, 0.5)] == B[:,:]
@test B[ClosedInterval(0.0, 0.3), :] == B[ClosedInterval(0.0, 0.3)] == B[1:3,:]
@test B[ClosedInterval(0.15, 0.3), :] == B[ClosedInterval(0.15, 0.3)] == B[2:3,:]
@test B[ClosedInterval(0.2, 0.5), :] == B[ClosedInterval(0.2, 0.5)] == B[2:end,:]
@test B[ClosedInterval(0.2, 0.6), :] == B[ClosedInterval(0.2, 0.6)] == B[2:end,:]
@test @view(B[ClosedInterval(0.0, 0.5), :]) == @view(B[ClosedInterval(0.0, 0.5)]) == B[:,:]
@test @view(B[ClosedInterval(0.0, 0.3), :]) == @view(B[ClosedInterval(0.0, 0.3)]) == B[1:3,:]
@test @view(B[ClosedInterval(0.15, 0.3), :]) == @view(B[ClosedInterval(0.15, 0.3)]) == B[2:3,:]
@test @view(B[ClosedInterval(0.2, 0.5), :]) == @view(B[ClosedInterval(0.2, 0.5)]) == B[2:end,:]
@test @view(B[ClosedInterval(0.2, 0.6), :]) == @view(B[ClosedInterval(0.2, 0.6)]) == B[2:end,:]
# Test Categorical indexing
@test @inferred(B[:, :a]) == @view(B[:, :a]) == B[:,1]
@test @inferred(B[:, :c]) == @view(B[:, :c]) == B[:,3]
@test @inferred(B[:, [:a]]) == @view(B[:, [:a]]) == B[:,[1]]
@test @inferred(B[:, [:c]]) == @view(B[:, [:c]]) == B[:,[3]]
@test @inferred(B[:, [:a,:c]]) == @view(B[:, [:a,:c]]) == B[:,[1,3]]
@test B[Axis{:row}(ClosedInterval(0.15, 0.3))] == @view(B[Axis{:row}(ClosedInterval(0.15, 0.3))]) == B[2:3,:]
# Test indexing by Intervals that aren't of the form step:step:last
B = AxisArray(reshape(1:15, 5,3), 1.1:0.1:1.5, [:a, :b, :c])
@test B[ClosedInterval(1.0, 1.5), :] == B[ClosedInterval(1.0, 1.5)] == B[:,:]
@test B[ClosedInterval(1.0, 1.3), :] == B[ClosedInterval(1.0, 1.3)] == B[1:3,:]
@test B[ClosedInterval(1.15, 1.3), :] == B[ClosedInterval(1.15, 1.3)] == B[2:3,:]
@test B[ClosedInterval(1.2, 1.5), :] == B[ClosedInterval(1.2, 1.5)] == B[2:end,:]
@test B[ClosedInterval(1.2, 1.6), :] == B[ClosedInterval(1.2, 1.6)] == B[2:end,:]
@test @view(B[ClosedInterval(1.0, 1.5), :]) == @view(B[ClosedInterval(1.0, 1.5)]) == B[:,:]
@test @view(B[ClosedInterval(1.0, 1.3), :]) == @view(B[ClosedInterval(1.0, 1.3)]) == B[1:3,:]
@test @view(B[ClosedInterval(1.15, 1.3), :]) == @view(B[ClosedInterval(1.15, 1.3)]) == B[2:3,:]
@test @view(B[ClosedInterval(1.2, 1.5), :]) == @view(B[ClosedInterval(1.2, 1.5)]) == B[2:end,:]
@test @view(B[ClosedInterval(1.2, 1.6), :]) == @view(B[ClosedInterval(1.2, 1.6)]) == B[2:end,:]
A = AxisArray(reshape(1:256, 4,4,4,4), Axis{:d1}(.1:.1:.4), Axis{:d2}(1//10:1//10:4//10), Axis{:d3}(["1","2","3","4"]), Axis{:d4}([:a, :b, :c, :d]))
ax1 = AxisArrays.axes(A)[1]
@test A[Axis{:d1}(2)] == A[ax1(2)]
@test A.data[1:2,:,:,:] == A[Axis{:d1}(ClosedInterval(.1,.2))] == A[ClosedInterval(.1,.2),:,:,:] == A[ClosedInterval(.1,.2),:,:,:,1] == A[ClosedInterval(.1,.2)]
@test A.data[:,1:2,:,:] == A[Axis{:d2}(ClosedInterval(1//10,2//10))] == A[:,ClosedInterval(1//10,2//10),:,:] == A[:,ClosedInterval(1//10,2//10),:,:,1] == A[:,ClosedInterval(1//10,2//10)]
@test A.data[:,:,1:2,:] == A[Axis{:d3}(["1","2"])] == A[:,:,["1","2"],:] == A[:,:,["1","2"],:,1] == A[:,:,["1","2"]]
@test A.data[:,:,:,1:2] == A[Axis{:d4}([:a,:b])] == A[:,:,:,[:a,:b]] == A[:,:,:,[:a,:b],1]
A = AxisArray(reshape(1:32, 2, 2, 2, 2, 2), .1:.1:.2, .1:.1:.2, .1:.1:.2, [:a, :b], [:c, :d])
@test A[ClosedInterval(.15, .25), ClosedInterval(.05, .15), ClosedInterval(.15, .25), :a] == A.data[2:2, 1:1, 2:2, 1, :]
@test A[Axis{:dim_5}(2)] == A.data[:, :, :, :, 2]
# Test vectors
v = AxisArray(collect(.1:.1:10.0), .1:.1:10.0)
@test v[Colon()] == v
@test v[:] == v.data[:] == v[Axis{:row}(:)]
@test v[3:8] == v.data[3:8] == v[ClosedInterval(.25,.85)] == v[Axis{:row}(3:8)] == v[Axis{:row}(ClosedInterval(.22,.88))]
# Test repeated intervals, for different range types
# First, since integers mean "location" rather than value, we have to
# create a number type from which we build a StepRange but which is
# not an Int.
module IL # put in a module so this file can be re-run
struct IntLike <: Number
val::Int
end
IntLike(x::IntLike) = x
Base.one(x::IntLike) = IntLike(0)
Base.zero(x::IntLike) = IntLike(0)
Base.isless(x::IntLike, y::IntLike) = isless(x.val, y.val)
Base.:+(x::IntLike, y::IntLike) = IntLike(x.val+y.val)
Base.:-(x::IntLike, y::IntLike) = IntLike(x.val-y.val)
Base.:/(x::IntLike, y::IntLike) = x.val / y.val
Base.rem(x::IntLike, y::IntLike) = IntLike(rem(x.val, y.val))
Base.div(x::IntLike, y::IntLike) = div(x.val, y.val)
Base.:*(x::IntLike, y::Int) = IntLike(x.val * y)
Base.:*(x::Int, y::IntLike) = y*x
Base.:/(x::IntLike, y::Int) = IntLike(x.val / y)
Base.promote_rule(::Type{IntLike}, ::Type{Int}) = Int
Base.convert(::Type{Int}, x::IntLike) = x.val
using AxisArrays
AxisArrays.axistrait(::AbstractVector{IntLike}) = AxisArrays.Dimensional
end
for (r, Irel) in ((0.1:0.1:10.0, -0.5..0.5), # FloatRange
(22.1:0.1:32.0, -0.5..0.5),
(range(0.1, stop=10.0, length=100), -0.51..0.51), # LinSpace
(IL.IntLike(1):IL.IntLike(1):IL.IntLike(100),
IL.IntLike(-5)..IL.IntLike(5))) # StepRange
local A, B
Iabs = r[20]..r[30]
A = AxisArray([1:100 -1:-1:-100], r, [:c1, :c2])
@test A[Iabs, :] == A[atindex(Irel, 25), :] == [20:30 -20:-1:-30]
@test A[Iabs, :] == A[r[25]+Irel, :] == [20:30 -20:-1:-30]
@test A[Iabs, [:c1,:c2]] == A[atindex(Irel, 25), [:c1, :c2]] == [20:30 -20:-1:-30]
@test A[Iabs, :c1] == A[atindex(Irel, 25), :c1] == collect(20:30)
@test A[atindex(Irel, 25), :c1] == collect(20:30)
@test A[atindex(Irel, [25, 35]), :c1] == [20:30 30:40]
@test A[r[[25, 35]] + Irel, :c1] == [20:30 30:40]
@test_throws BoundsError A[atindex(Irel, 5), :c1]
@test_throws BoundsError A[atindex(Irel, [5, 15, 25]), :]
B = A[r[[25, 35]] + Irel, :c1]
@test B[:,:] == B[Irel, :] == [20:30 30:40]
end
# Indexing with CartesianIndex
A = AxisArray(reshape(1:15, 3, 5), :x, :y)
@test A[2,2,CartesianIndex(())] == 5
@test A[2,CartesianIndex(()),2] == 5
@test A[CartesianIndex(()),2,2] == 5
A3 = AxisArray(reshape(1:24, 4, 3, 2), :x, :y, :z)
@test A3[2,CartesianIndex(2,2)] == 18
@test A3[CartesianIndex(2,2),2] == 18
@test A3[CartesianIndex(2,2,2)] == 18
# Extracting the full axis
axx = @inferred(A[Axis{:x}])
@test isa(axx, Axis{:x})
@test axx.val == 1:3
axy = @inferred(A[Axis{:y}])
@test isa(axy, Axis{:y})
@test axy.val == 1:5
@test_throws ArgumentError A[Axis{:z}]
# indexing by value (implicitly) in a dimensional axis
some_dates = DateTime(2016, 1, 2, 0):Hour(1):DateTime(2016, 1, 2, 2)
A1 = AxisArray(reshape(1:6, 2, 3), Axis{:x}(1:2), Axis{:y}(some_dates))
A2 = AxisArray(reshape(1:6, 2, 3), Axis{:x}(1:2), Axis{:y}(collect(some_dates)))
for A in (A1, A2)
local A
@test A[:, DateTime(2016, 1, 2, 1)] == [3; 4]
@test A[:, DateTime(2016, 1, 2, 1) .. DateTime(2016, 1, 2, 2)] == [3 5; 4 6]
@test_throws BoundsError A[:, DateTime(2016, 1, 2, 3)]
@test_throws BoundsError A[:, DateTime(2016, 1, 1, 23)]
try
A[:, DateTime(2016, 1, 2, 3)]
@test "unreachable" === false
catch err
@test err == BoundsError(A.axes[2].val, DateTime(2016, 1, 2, 3))
end
end
# Test for the expected exception type given repeated axes
A = AxisArray(rand(2,2), :x, :y)
@test_throws ArgumentError A[Axis{:x}(1), Axis{:x}(1)]
@test_throws ArgumentError A[Axis{:y}(1), Axis{:y}(1)]
# Reductions (issues #66, #62)
@test maximum(A3; dims=1) == reshape([4 16; 8 20; 12 24], 1, 3, 2)
@test maximum(A3; dims=2) == reshape([9 21; 10 22; 11 23; 12 24], 4, 1, 2)
@test maximum(A3; dims=3) == reshape(A3[:,:,2], 4, 3, 1)
acc = zeros(Int, 4, 1, 2)
Base.mapreducedim!(x->x>5, +, acc, A3)
@test acc == reshape([1 3; 2 3; 2 3; 2 3], 4, 1, 2)
# Value axistraits
@testset for typ in (IL.IntLike, Complex{Float32}, DateTime, String, Symbol, Int)
@test AxisArrays.axistrait(Axis{:foo, Vector{AxisArrays.ExactValue{typ}}}) ===
AxisArrays.axistrait(Axis{:foo, Vector{AxisArrays.TolValue{typ}}}) ===
AxisArrays.axistrait(Axis{:bar, Vector{typ}})
end
# Indexing by value using `atvalue`
A = AxisArray([1 2; 3 4], Axis{:x}([1.0,4.0]), Axis{:y}([2.0,6.1]))
@test @inferred(A[atvalue(1.0)]) == @inferred(A[atvalue(1.0), :]) == [1,2]
# `atvalue` doesn't require same type:
@test @inferred(A[atvalue(1)]) == @inferred(A[atvalue(1), :]) ==[1,2]
@test A[atvalue(4.0)] == A[atvalue(4.0),:] == [3,4]
@test A[atvalue(4)] == A[atvalue(4),:] == [3,4]
@test_throws BoundsError A[atvalue(5.0)]
@test @inferred(A[atvalue(1.0), atvalue(2.0)]) == 1
@test @inferred(A[:, atvalue(2.0)]) == [1,3]
@test @inferred(A[Axis{:x}(atvalue(4.0))]) == [3,4]
@test @inferred(A[Axis{:y}(atvalue(6.1))]) == [2,4]
@test @inferred(A[Axis{:x}(atvalue(4.00000001))]) == [3,4]
@test @inferred(A[Axis{:x}(atvalue(2.0, atol=5))]) == [1,2]
@test_throws BoundsError A[Axis{:x}(atvalue(4.00000001, rtol=0))]
# Showing Values
@test sprint(show, AxisArrays.ExactValue(1)) == "ExactValue(1)"
@test sprint(show, AxisArrays.TolValue(1., 0.1)) == "TolValue(1.0, tol=0.1)"
# Indexing with ExactValue on Dimensional axes
A = AxisArray([2.0,4.0,6.1], Axis{:x}([-10,1,3]))
@test @inferred(A[AxisArrays.ExactValue(1)]) == @inferred(A[atvalue(1)]) == 4.0
@test_throws BoundsError A[AxisArrays.ExactValue(2)]
# Indexing by array of values
A = AxisArray([1 2 3 4; 5 6 7 8; 9 10 11 12], -1:1, [5.1, 5.4, 5.7, 5.8])
@test @inferred(A[atvalue(-1), atvalue.([5.1, 5.7])]) == [1, 3]
@test_throws BoundsError A[atvalue.([1,2])]
# Indexing by value into an OffsetArray
A = AxisArray(OffsetArrays.OffsetArray([1 2; 3 4], 0:1, 1:2),
Axis{:x}([1.0,4.0]), Axis{:y}([2.0,6.1]))
@test_broken @inferred(A[atvalue(4.0)]) == [3,4]
@test @inferred(A[:, atvalue(2.0)]) == OffsetArrays.OffsetArray([1,3], 0:1)
@test_throws BoundsError A[atvalue(5.0)]
# Indexing by value directly is forbidden for indexes that are Real
@test_throws ArgumentError A[4.0]
@test_throws ArgumentError A[BigFloat(1.0)]
@test_throws ArgumentError A[1.0f0]
@test_throws ArgumentError A[:,6.1]
# Indexing with `atvalue` on Categorical axes
A = AxisArray([1 2; 3 4], Axis{:x}([:a, :b]), Axis{:y}(["c", "d"]))
@test @inferred(A[atvalue(:a)]) == @inferred(A[atvalue(:a), :]) == [1,2]
@test @inferred(A[atvalue(:b)]) == @inferred(A[atvalue(:b), :]) == [3,4]
@test_throws ArgumentError A[atvalue(:c)]
@test @inferred(A[atvalue(:a), atvalue("c")]) == 1
@test @inferred(A[:, atvalue("c")]) == [1,3]
@test @inferred(A[Axis{:x}(atvalue(:b))]) == [3,4]
@test @inferred(A[Axis{:y}(atvalue("d"))]) == [2,4]
# Index by mystery types categorically
struct Foo
x
end
A = AxisArray(1:10, Axis{:x}(map(Foo, 1:10)))
@test A[map(Foo, 3:6)] == collect(3:6)
@test_throws ArgumentError A[map(Foo, 3:11)]
@test A[Foo(4)] == 4
@test_throws ArgumentError A[Foo(0)]
# Test using dates
using Dates: Day, Month
A = AxisArray(1:365, Date(2017,1,1):Day(1):Date(2017,12,31))
@test A[Date(2017,2,1) .. Date(2017,2,28)] == collect(31 .+ (1:28)) # February
@test A[(-Day(13)..Day(14)) + Date(2017,2,14)] == collect(31 .+ (1:28))
@test A[(-Day(14)..Day(14)) + DateTime(2017,2,14,12)] == collect(31 .+ (1:28))
@test A[(Day(0)..Day(6)) + (Date(2017,1,1):Month(1):Date(2017,4,12))] == [1:7 32:38 60:66 91:97]
# Test using index types that AxisArrays doesn't understand
# This example is inspired from Interpolations.jl and would implement linear interpolation
struct WeightedIndex{T}
idx::Int
weights::Tuple{T,T}
end
Base.to_indices(A, I::Tuple{Vararg{Union{Int,WeightedIndex}}}) = I
@inline function Base._getindex(::IndexStyle, A::AbstractVector, I::WeightedIndex)
A[I.idx]*I.weights[1] + A[I.idx+1]*I.weights[2]
end
idx = WeightedIndex(2, (0.2, 0.8))
a = [2, 3, 7]
@test a[idx] ≈ 6.2
aa = AxisArray(a, :x)
@test aa[idx] ≈ 6.2
# Keyword indexing
A = AxisArray([1 2; 3 4], Axis{:x}(10:10:20), Axis{:y}(["c", "d"]))
@test @inferred(A[x=1, y=1]) == 1
@test @inferred(A[x=1]) == [1, 2]
@test axisnames(A[x=1]) == (:y,)
@test @inferred(view(A, x=1)) == [1,2]
@test parent(view(A, x=1)) isa SubArray
@test @inferred(A[x=atvalue(20), y=atvalue("d")]) == 4
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 2726 | # Specific intervals tests
import IntervalSets
# Promotion behaviors -- we only allow concrete endpoints of the same type
@test 1.0 .. 2 === 1.0 .. 2.0
@test 1//2 .. 3.5 === 0.5 .. 3.5
# IntervalSets v0.7: https://github.com/JuliaMath/IntervalSets.jl/pull/93
thrown_err = isdefined(IntervalSets, :duration) ? ArgumentError : ErrorException
@test_throws thrown_err :a .. "b"
@test_throws thrown_err 1 .. (2,3)
v = [1 .. 2, 3.0 .. 4.0]
@test v[1] === 1.0 .. 2.0
@test v[2] === 3.0 .. 4.0
# Test simple arithmetic, with promotion behaviors
@test (1.0 .. 2.0) + 1 === (2.0 .. 3.0)
@test (1 .. 2) + 1.0 === (2.0 .. 3.0)
@test (1 .. 2) + (1.0 .. 2.0) === (2.0 .. 4.0)
@test (1 .. 2) - (1 .. 2) === (-1 .. 1)
@test +(1 .. 2) === (1 .. 2)
@test -(1 .. 2) === (-2 .. -1)
@test (1..2)*3 === 3..6
@test (-1..1)*3 === -3..3
@test (2..4)/2 === 1.0 .. 2.0
@test 1/(2..4) === 1/4 .. 1/2
@test 3.2 in 3..4
@test 4 in 2.0 .. 6.0
@test 4 in 4.0 .. 4.0
@test 4 in 4.0 .. 5
@test (1..2) ⊆ (0.5 .. 2.5)
@test !((1..2) ⊆ (1.5 .. 2.5))
@test maximum(1..2) === 2
@test minimum(1..2) === 1
# Comparisons are "for-all" like, with <= and >= allowing overlap
@test 0 <= 1 .. 2
@test !(0 >= 1 .. 2)
@test 1 <= 1 .. 2
@test !(1 >= 1 .. 2)
@test !(2 <= 1 .. 2)
@test 2 >= 1 .. 2
@test !(3 <= 1 .. 2)
@test 3 >= 1 .. 2
@test 0 < 1 .. 2
@test !(0 > 1 .. 2)
@test !(1 < 1 .. 2)
@test !(1 > 1 .. 2)
@test !(2 < 1 .. 2)
@test !(2 > 1 .. 2)
@test !(3 < 1 .. 2)
@test 3 > 1 .. 2
# Test dictionary lookup by numeric value
d = Dict(1..2 => 1, 2.0..3.0 => 2)
@test d[1..2] === 1
@test d[1.0..2.0] === 1
@test d[2..3] === 2
@test d[2.0..3.0] === 2
d[0x1 .. 0x2] = 3
@test d[1..2] === 3
@test length(d) == 2
# Test repeated intervals:
@test (1..2) + [1,2,3] == [(1..2)+i for i in [1,2,3]]
@test (1..2) + (1:3) == [(1..2)+i for i in 1:3]
@test (1..2) - [1,2,3] == [(1..2)-i for i in [1,2,3]]
@test (1..2) - (1:3) == [(1..2)-i for i in 1:3]
@test [1,2,3] + (1..2)== [i+(1..2) for i in [1,2,3]]
@test (1:3) + (1..2)== [i+(1..2) for i in 1:3]
@test [1,2,3] - (1..2)== [i-(1..2) for i in [1,2,3]]
@test (1:3) - (1..2)== [i-(1..2) for i in 1:3]
# And intervals at indices
@test atindex(1..2, [3,4,5]) == [atindex(1..2, i) for i in [3,4,5]]
@test atindex(1..2, 3:5) == [atindex(1..2, i) for i in 3:5]
# Ensure comparisons are exact (and not lossy)
@assert 0.2 > 2//10 # 0.2 == 2.0000000000000001
@test !(0.1 .. 0.2 <= 2//10)
# Conversion and construction:
@test 1 .. 2 === ClosedInterval(1, 2) === ClosedInterval{Int}(1.0, 2.0) === ClosedInterval{Int}(1.0 .. 2.0)
@test 1.0 .. 2.0 === ClosedInterval(1.0, 2) === ClosedInterval{Float64}(1, 2) === ClosedInterval{Float64}(1 .. 2)
@test 1 .. 1 === ClosedInterval(1, 1) === ClosedInterval{Int}(1.0, 1.0)
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 1709 | # Intended to ensure the README stays working (this is a copy)
using AxisArrays, Unitful
import Unitful: s, ms, µs
fs = 40000
y = randn(60*fs+1)*3
for spk = (sin.(0.8:0.2:8.6) .* [0:0.01:.1; .15:.1:.95; 1:-.05:.05] .* 50,
sin.(0.8:0.4:8.6) .* [0:0.02:.1; .15:.1:1; 1:-.2:.1] .* 50)
i = rand(round(Int,.001fs):1fs)
while i+length(spk)-1 < length(y)
y[i:i+length(spk)-1] += spk
i += rand(round(Int,.001fs):1fs)
end
end
A = AxisArray([y 2y], Axis{:time}(0s:1s/fs:60s), Axis{:chan}([:c1, :c2]))
A = AxisArray(hcat(y, 2 .* y); time = (0s:1s/fs:60s), chan = ([:c1, :c2]))
A[Axis{:time}(4)]
A[time=4]
A[Axis{:chan}(:c2), Axis{:time}(1:5)]
A[chan = :c2, time = 1:5]
ax = A[40µs .. 220µs, :c1]
AxisArrays.axes(ax, 1)
A[atindex(-90µs .. 90µs, 5), :c2]
idxs = findall(diff(A[:,:c1] .< -15) .> 0)
spks = A[atindex(-200µs .. 800µs, idxs), :c1]
A[atvalue(2.5e-5s), :c1]
A[2.5e-5s..2.5e-5s, :c1]
A[atvalue(25.0µs)]
# # A possible "dynamic verification" strategy
# const readmefile = joinpath(dirname(dirname(@__FILE__)), "README.md")
# function extract_julialines(iowr, filein)
# open(filein) do iord
# while !eof(iord)
# line = readline(iord)
# if startswith(line, "julia>")
# print(iowr, line[8:end])
# while !eof(iord)
# line = readline(iord)
# if !startswith(line, " ")
# break
# end
# print(iowr, line[8:end])
# end
# end
# end
# end
# end
# tmpfile, iowr = mktemp()
# extract_julialines(iowr, readmefile)
# close(iowr)
# include(tmpfile)
# rm(tmpfile)
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 756 | using AxisArrays
using Dates
using Test
using Random
import IterTools
@testset "AxisArrays" begin
VERSION >= v"1.1" && @test isempty(detect_ambiguities(AxisArrays, Base, Core))
@testset "Core" begin
include("core.jl")
end
@testset "Intervals" begin
include("intervals.jl")
end
@testset "Indexing" begin
include("indexing.jl")
end
@testset "SortedVector" begin
include("sortedvector.jl")
end
@testset "CategoricalVector" begin
include("categoricalvector.jl")
end
@testset "Search" begin
include("search.jl")
end
@testset "Combine" begin
include("combine.jl")
end
@testset "README" begin
include("readme.jl")
end
end
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 1879 | # Internal searching methods
import AxisArrays: searchsortednearest
@test searchsortednearest(1:5, 2.5) === 3
@test searchsortednearest(1:5, prevfloat(2.5)) === 2
@test searchsortednearest([1,1,2,2,3,3], 1.5) === 3
@test searchsortednearest([1,1,2,2,3,3], 2.0) === 3
@test searchsortednearest([1,1,2,2,3,3], 2.4) === 4
@test searchsortednearest([1,1,2,2,3,3], 3.0) === 5
@test searchsortednearest([1,1,2,2,3,3], Inf) === 6
@test searchsortednearest([1,1,2,2,3,3], -Inf) === 1
# Extrapolated searching for ranges
import AxisArrays: Extrapolated
@test Extrapolated.searchsorted(1:10, -1 .. 1) === -1:1
@test Extrapolated.searchsorted(1:10, 12 .. 15) === 12:15
@test Extrapolated.searchsorted(0:2:10, -3 .. -1) === 0:0
@test Extrapolated.searchsorted(0:2:10, -5 .. 3) === -1:2
@test Extrapolated.searchsorted(1:2, 4.5 .. 4.5) === 5:4
@test Extrapolated.searchsorted(1:2, 3.5 .. 3.5) === 4:3
@test Extrapolated.searchsorted(1:2, 2.5 .. 2.5) === 3:2 === searchsorted(1:2, 2.5 .. 2.5)
@test Extrapolated.searchsorted(1:2, 1.5 .. 1.5) === 2:1 === searchsorted(1:2, 1.5 .. 1.5)
@test Extrapolated.searchsorted(1:2, 0.5 .. 0.5) === 1:0 === searchsorted(1:2, 0.5 .. 0.5)
@test Extrapolated.searchsorted(1:2, -0.5 .. -0.5) === 0:-1
@test Extrapolated.searchsorted(1:2, -1.5 .. -1.5) === -1:-2
@test Extrapolated.searchsorted(2:2:4, 0x6 .. 0x6) === 3:3
@test Extrapolated.searchsorted(2:2:4, 0x5 .. 0x5) === searchsorted(2:2:4, 0x5 .. 0x5) === 3:2
@test Extrapolated.searchsorted(2:2:4, 0x4 .. 0x4) === searchsorted(2:2:4, 0x4 .. 0x4) === 2:2
@test Extrapolated.searchsorted(2:2:4, 0x3 .. 0x3) === searchsorted(2:2:4, 0x3 .. 0x3) === 2:1
@test Extrapolated.searchsorted(2:2:4, 0x2 .. 0x2) === searchsorted(2:2:4, 0x2 .. 0x2) === 1:1
@test Extrapolated.searchsorted(2:2:4, 0x1 .. 0x1) === searchsorted(2:2:4, 0x1 .. 0x1) === 1:0
@test Extrapolated.searchsorted(2:2:4, 0x0 .. 0x0) === 0:0
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | code | 1257 |
# Test SortedVector
v = SortedVector(collect([1.; 10.; 10:15.]))
A = AxisArray(reshape(1:16, 8, 2), v, [:a, :b])
@test A[ClosedInterval(8.,12.), :] == A[2:5, :]
@test A[1. .. 1., :] == A[1:1, :]
@test A[10. .. 10., :] == A[2:3, :]
@test size(v) == (8,)
@test size(v, 1) == 8
@test size(v, 2) == 1
# test StepRange indexing
@test v[1:2:8] == [1.0; 10.0; 12.0; 14.0]
# Test SortedVector with a hierarchical index (indexed using Tuples)
data = reshape(1.:40., 20, 2)
# v = collect(zip([:a, :b, :c][rand(1:3,20)], [:x,:y][rand(1:2,20)], [:x,:y][rand(1:2,20)]))
v = [(:b, :x, :y), (:c, :y, :y), (:b, :x, :y), (:a, :y, :y), (:b, :y, :y),
(:c, :y, :y), (:b, :x, :x), (:c, :x, :y), (:c, :y, :y), (:a, :y, :y),
(:a, :y, :y), (:b, :x, :y), (:c, :x, :y), (:c, :y, :y), (:b, :x, :y),
(:a, :x, :x), (:c, :x, :x), (:c, :y, :y), (:b, :y, :x), (:b, :y, :y)]
idx = sortperm(v)
A = AxisArray(data[idx,:], SortedVector(v[idx]), [:a, :b])
@test A[:b, :] == A[5:12, :]
@test A[[:a,:c], :] == A[[1:4;13:end], :]
@test A[(:a,:y), :] == A[2:4, :]
@test A[(:c,:y,:y), :] == A[16:end, :]
@test A[ClosedInterval(:a,:b), :] == A[1:12, :]
@test A[ClosedInterval((:a,:x),(:b,:x)), :] == A[1:9, :]
@test A[[ClosedInterval((:a,:x),(:b,:x)),:c], :] == A[[1:9;13:end], :]
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | docs | 11955 | # AxisArrays.jl
[![][action-img]][action-url]
[![][pkgeval-img]][pkgeval-url]
[![][codecov-img]][codecov-url]
This package for the Julia language provides an array type (the `AxisArray`) that knows about its dimension names and axis values.
This allows for indexing by name without incurring any runtime overhead.
This permits one to implement algorithms that are oblivious to the storage order of the underlying arrays.
AxisArrays can also be indexed by the values along their axes, allowing column names or interval selections.
In contrast to similar approaches in [Images.jl](https://github.com/timholy/Images.jl) and [NamedArrays.jl](https://github.com/davidavdav/NamedArrays), this allows for type-stable selection of dimensions and compile-time axis lookup. It is also better suited for regularly sampled axes, like samples over time.
Collaboration is welcome! This is still a work-in-progress. See [the roadmap](https://github.com/JuliaArrays/AxisArrays.jl/issues/7) for the project's current direction.
### Note about `Axis{}` and keywords
An `AxisArray` stores an object of type `Axis{:name}` for each dimension,
containing both the name (a `Symbol`) and the "axis values" (an `AbstractVector`).
These types are what made compile-time lookup possible.
Instead of providing them explicitly, it is now possible to use keyword arguments
for both construction and indexing:
```julia
V = AxisArray(rand(10); row='a':'j') # AxisArray(rand(10), Axis{:row}('a':'j'))
V[row='c'] == V[Axis{:row}('c')] == V[row=3] == V[3]
```
### Note about `axes()` and `indices()`
The function `AxisArrays.axes` returns the tuple of such `Axis` objects.
Since Julia version 0.7, `Base.axes(V) == (1:10,)` gives instead the range of possible
ordinary integer indices. (This was called `Base.indices`.) Since both names are exported,
this collision results in a warning if you try to use `axes` without qualification:
```julia
julia> axes([1,2])
WARNING: both AxisArrays and Base export "axes"; uses of it in module Main must be qualified
ERROR: UndefVarError: axes not defined
```
Packages that are upgrading to support Julia 0.7+ should:
* Replace all uses of the `axes` function with the fully-qualified `AxisArrays.axes`
* Replace all uses of the deprecated `indices` function with the un-qualified `axes`
* Immediately after `using AxisArrays`, add `const axes = Base.axes`
In the future, AxisArrays will be looking for a new name for its functionality.
This will allow you to use the idiomatic `Base` name and offers an easy upgrade
path to whatever the new name will be.
## Example of currently-implemented behavior:
```julia
julia> using Pkg; pkg"add AxisArrays Unitful"
julia> using AxisArrays, Unitful, Random
julia> fs = 40000; # Generate a 40kHz noisy signal, with spike-like stuff added for testing
julia> import Unitful: s, ms, µs
julia> rng = Random.MersenneTwister(123); # Seed a random number generator for repeatable examples
julia> y = randn(rng, 60*fs+1)*3;
julia> for spk = (sin.(0.8:0.2:8.6) .* [0:0.01:.1; .15:.1:.95; 1:-.05:.05] .* 50,
sin.(0.8:0.4:8.6) .* [0:0.02:.1; .15:.1:1; 1:-.2:.1] .* 50)
i = rand(rng, round(Int,.001fs):1fs)
while i+length(spk)-1 < length(y)
y[i:i+length(spk)-1] += spk
i += rand(rng, round(Int,.001fs):1fs)
end
end
julia> A = AxisArray(hcat(y, 2 .* y); time = (0s:1s/fs:60s), chan = ([:c1, :c2]))
2-dimensional AxisArray{Float64,2,...} with axes:
:time, 0.0 s:2.5e-5 s:60.0 s
:chan, Symbol[:c1, :c2]
And data, a 2400001×2 Array{Float64,2}:
3.5708 7.14161
6.14454 12.2891
3.42795 6.85591
1.37825 2.75649
-1.19004 -2.38007
-1.99414 -3.98828
2.9429 5.88581
-0.226449 -0.452898
0.821446 1.64289
-0.582687 -1.16537
⋮
-3.50593 -7.01187
2.26783 4.53565
-0.16902 -0.33804
-3.84852 -7.69703
0.226457 0.452914
0.560809 1.12162
4.67663 9.35326
-2.41005 -4.8201
-3.71612 -7.43224
```
AxisArrays behave like regular arrays, but they additionally use the axis
information to enable all sorts of fancy behaviors. For example, we can specify
indices in *any* order, just so long as we annotate them with the axis name:
```julia
julia> A[time=4] # or A[Axis{:time}(4)]
1-dimensional AxisArray{Float64,1,...} with axes:
:chan, Symbol[:c1, :c2]
And data, a 2-element Array{Float64,1}:
1.37825
2.75649
julia> A[chan = :c2, time = 1:5] # or A[Axis{:chan}(:c2), Axis{:time}(1:5)]
1-dimensional AxisArray{Float64,1,...} with axes:
:time, 0.0 s:2.5e-5 s:0.0001 s
And data, a 5-element Array{Float64,1}:
7.14161
12.2891
6.85591
2.75649
-2.38007
```
We can also index by the *values* of each axis using an `Interval` type that
selects all values between two endpoints `a .. b` or the axis values directly.
Notice that the returned AxisArray still has axis information itself... and it
still has the correct time information for those datapoints!
```julia
julia> A[40µs .. 220µs, :c1]
1-dimensional AxisArray{Float64,1,...} with axes:
:time, 5.0e-5 s:2.5e-5 s:0.0002 s
And data, a 7-element Array{Float64,1}:
3.42795
1.37825
-1.19004
-1.99414
2.9429
-0.226449
0.821446
julia> AxisArrays.axes(ans, 1)
AxisArrays.Axis{:time,StepRangeLen{Quantity{Float64, Dimensions:{𝐓}, Units:{s}},Base.TwicePrecision{Quantity{Float64, Dimensions:{𝐓}, Units:{s}}},Base.TwicePrecision{Quantity{Float64, Dimensions:{𝐓}, Units:{s}}}}}(5.0e-5 s:2.5e-5 s:0.0002 s)
```
You can also index by a single value using `atvalue(t)`.
This function is not needed for categorical axes like `:chan` here,
as `:c1` is a `Symbol` which can't be confused with an integer index.
Using `atvalue()` will drop a dimension (like using a single integer).
Indexing with an `Interval(lo, hi)` type retains dimensions, even
when the ends of the interval are equal (like using a range `1:1`):
```julia
julia> A[atvalue(2.5e-5s), :c1]
6.14453912336772
julia> A[2.5e-5s..2.5e-5s, :c1]
1-dimensional AxisArray{Float64,1,...} with axes:
:time, 2.5e-5 s:2.5e-5 s:2.5e-5 s
And data, a 1-element Array{Float64,1}:
6.14454
```
You can even index by multiple values by broadcasting `atvalue` over an array:
```julia
julia> A[atvalue.([2.5e-5s, 75.0µs])]
2-dimensional AxisArray{Float64,2,...} with axes:
:time, Quantity{Float64, Dimensions:{𝐓}, Units:{s}}[2.5e-5 s, 7.5e-5 s]
:chan, Symbol[:c1, :c2]
And data, a 2×2 Array{Float64,2}:
6.14454 12.2891
1.37825 2.75649
```
Sometimes, though, what we're really interested in is a window of time about a
specific index. One of the operations above (looking for values in the window from 40µs
to 220µs) might be more clearly expressed as a symmetrical window about a
specific index where we know something interesting happened. To represent this,
we use the `atindex` function:
```julia
julia> A[atindex(-90µs .. 90µs, 5), :c2]
1-dimensional AxisArray{Float64,1,...} with axes:
:time_sub, -7.5e-5 s:2.5e-5 s:7.500000000000002e-5 s
And data, a 7-element Array{Float64,1}:
6.85591
2.75649
-2.38007
-3.98828
5.88581
-0.452898
1.64289
```
Note that the returned AxisArray has its time axis shifted to represent the
interval about the given index! This simple concept can be extended to some
very powerful behaviors. For example, let's threshold our data and find windows
about those threshold crossings.
```julia
julia> idxs = findall(diff(A[:,:c1] .< -15) .> 0);
julia> spks = A[atindex(-200µs .. 800µs, idxs), :c1]
2-dimensional AxisArray{Float64,2,...} with axes:
:time_sub, -0.0002 s:2.5e-5 s:0.0008 s
:time_rep, Quantity{Float64, Dimensions:{𝐓}, Units:{s}}[0.162 s, 0.20045 s, 0.28495 s, 0.530325 s, 0.821725 s, 1.0453 s, 1.11967 s, 1.1523 s, 1.22085 s, 1.6253 s … 57.0094 s, 57.5818 s, 57.8716 s, 57.8806 s, 58.4353 s, 58.7041 s, 59.1015 s, 59.1783 s, 59.425 s, 59.5657 s]
And data, a 41×247 Array{Float64,2}:
0.672063 7.25649 0.633375 … 1.54583 5.81194 -4.706
-1.65182 2.57487 0.477408 3.09505 3.52478 4.13037
4.46035 2.11313 4.78372 1.23385 7.2525 3.57485
5.25651 -2.19785 3.05933 0.965021 6.78414 5.94854
7.8537 0.345008 0.960533 0.812989 0.336715 0.303909
0.466816 0.643649 -3.67087 … 3.92978 -3.1242 0.789722
-6.0445 -13.2441 -4.60716 0.265144 -4.50987 -8.84897
-9.21703 -13.2254 -14.4409 -8.6664 -13.3457 -11.6213
-16.1809 -22.7037 -25.023 -15.9376 -28.0817 -16.996
-23.2671 -31.2021 -25.3787 -24.4914 -32.2599 -26.1118
⋮ ⋱ ⋮
-0.301629 0.0683982 -4.36574 1.92362 -5.12333 -3.4431
4.7182 1.18615 4.40717 -4.51757 -8.64314 0.0800021
-2.43775 -0.151882 -1.40817 -3.38555 -2.23418 0.728549
3.2482 -0.60967 0.471288 … 2.53395 0.468817 -3.65905
-4.26967 2.24747 -3.13758 1.74967 4.5052 -0.145357
-0.752487 1.69446 -1.20491 1.71429 1.81936 0.290158
4.64348 -3.94187 -1.59213 7.15428 -0.539748 4.82309
1.09652 -2.66999 0.521931 -3.80528 1.70421 3.40583
-0.94341 2.60785 -3.34291 … 1.10584 4.31118 3.6404
```
By indexing with a repeated interval, we have *added* a dimension to the
output! The returned AxisArray's columns specify each repetition of the
interval, and each datapoint in the column represents a timepoint within that
interval, adjusted by the time of the theshold crossing. The best part here
is that the returned matrix knows precisely where its data came from, and has
labeled its dimensions appropriately. Not only is there the proper time
base for each waveform, but we also have recorded the event times as the axis
across the columns.
## Indexing
Two main types of Axes supported by default include:
* Categorical axis -- These are vectors of labels, normally symbols or
strings. Elements or slices can be selected by elements or vectors
of elements.
* Dimensional axis -- These are sorted vectors or iterators that can
be selected by `Intervals`. These are commonly used for sequences of
times or date-times. For regular sample rates, ranges can be used.
Here is an example with a Dimensional axis representing a time
sequence along rows and a Categorical axis of symbols for column
headers.
```julia
B = AxisArray(reshape(1:15, 5, 3), .1:.1:0.5, [:a, :b, :c])
B[row = (0.2..0.4)] # restrict the AxisArray along the time axis
B[0.0..0.3, [:a, :c]] # select an interval and two of the columns
```
User-defined axis types can be added along with custom indexing
behaviors.
### Example: compute the intensity-weighted mean along the z axis
```julia
B = AxisArray(randn(100,100,100), :x, :y, :z)
Itotal = sumz = 0.0
for iter in CartesianIndices(Base.axes(B)) # traverses in storage order for cache efficiency
global Itotal, sumz
I = B[iter] # intensity in a single voxel
Itotal += I
sumz += I * iter[axisdim(B, Axis{:z})] # axisdim "looks up" the z dimension
end
meanz = sumz/Itotal
```
The intention is that all of these operations are just as efficient as they would be if you used traditional position-based indexing with all the inherent assumptions about the storage order of `B`.
<!-- badges -->
[pkgeval-img]: https://juliaci.github.io/NanosoldierReports/pkgeval_badges/A/AxisArrays.svg
[pkgeval-url]: https://juliaci.github.io/NanosoldierReports/pkgeval_badges/report.html
[action-img]: https://github.com/JuliaArrays/AxisArrays.jl/workflows/Unit%20test/badge.svg
[action-url]: https://github.com/JuliaArrays/AxisArrays.jl/actions
[codecov-img]: https://codecov.io/github/JuliaArrays/AxisArrays.jl/coverage.svg?branch=master
[codecov-url]: https://codecov.io/gh/JuliaArrays/AxisArrays.jl
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | docs | 10733 | ```@meta
DocTestSetup = quote
using AxisArrays, Unitful, Random
import Unitful: s, ms, µs
rng = MersenneTwister(123)
fs = 40000
y = randn(rng, 60*fs+1)*3
for spk = (sin.(0.8:0.2:8.6) .* [0:0.01:.1; .15:.1:.95; 1:-.05:.05] .* 50,
sin.(0.8:0.4:8.6) .* [0:0.02:.1; .15:.1:1; 1:-.2:.1] .* 50)
i = rand(rng, round(Int,.001fs):1fs)
while i+length(spk)-1 < length(y)
y[i:i+length(spk)-1] += spk
i += rand(rng, round(Int,.001fs):1fs)
end
end
A = AxisArray([y 2y], Axis{:time}(0s:1s/fs:60s), Axis{:chan}([:c1, :c2]))
end
```
# AxisArrays
[](https://travis-ci.org/JuliaArrays/AxisArrays.jl) [](https://coveralls.io/github/JuliaArrays/AxisArrays.jl?branch=master)
This package for the Julia language provides an array type (the `AxisArray`) that knows about its dimension names and axis values.
This allows for indexing with the axis name without incurring any runtime overhead.
AxisArrays can also be indexed by the values of their axes, allowing column names or interval selections.
This permits one to implement algorithms that are oblivious to the storage order of the underlying arrays.
In contrast to similar approaches in [Images.jl](https://github.com/timholy/Images.jl) and [NamedArrays.jl](https://github.com/davidavdav/NamedArrays), this allows for type-stable selection of dimensions and compile-time axis lookup. It is also better suited for regularly sampled axes, like samples over time.
Collaboration is welcome! This is still a work-in-progress. See [the roadmap](https://github.com/JuliaArrays/AxisArrays.jl/issues/7) for the project's current direction.
## Example of currently-implemented behavior:
```julia
julia> Pkg.add("AxisArrays")
julia> using AxisArrays, Unitful, Random
julia> import Unitful: s, ms, µs
julia> rng = MersenneTwister(123) # Seed a random number generator for repeatable examples
julia> fs = 40000 # Generate a 40kHz noisy signal, with spike-like stuff added for testing
julia> y = randn(rng, 60*fs+1)*3
julia> for spk = (sin.(0.8:0.2:8.6) .* [0:0.01:.1; .15:.1:.95; 1:-.05:.05] .* 50,
sin.(0.8:0.4:8.6) .* [0:0.02:.1; .15:.1:1; 1:-.2:.1] .* 50)
i = rand(rng, round(Int,.001fs):1fs)
while i+length(spk)-1 < length(y)
y[i:i+length(spk)-1] += spk
i += rand(rng, round(Int,.001fs):1fs)
end
end
```
```jldoctest
julia> A = AxisArray([y 2y], Axis{:time}(0s:1s/fs:60s), Axis{:chan}([:c1, :c2]))
2-dimensional AxisArray{Float64,2,...} with axes:
:time, 0.0 s:2.5e-5 s:60.0 s
:chan, Symbol[:c1, :c2]
And data, a 2400001×2 Array{Float64,2}:
3.5708 7.14161
6.14454 12.2891
3.42795 6.85591
1.37825 2.75649
-1.19004 -2.38007
-1.99414 -3.98828
2.9429 5.88581
-0.226449 -0.452898
0.821446 1.64289
-0.582687 -1.16537
⋮
-3.50593 -7.01187
2.26783 4.53565
-0.16902 -0.33804
-3.84852 -7.69703
0.226457 0.452914
0.560809 1.12162
4.67663 9.35326
-2.41005 -4.8201
-3.71612 -7.43224
```
AxisArrays behave like regular arrays, but they additionally use the axis
information to enable all sorts of fancy behaviors. For example, we can specify
indices in *any* order, just so long as we annotate them with the axis name:
```jldoctest
julia> A[Axis{:time}(4)]
1-dimensional AxisArray{Float64,1,...} with axes:
:chan, Symbol[:c1, :c2]
And data, a 2-element Array{Float64,1}:
1.378246861221241
2.756493722442482
julia> A[Axis{:chan}(:c2), Axis{:time}(1:5)]
1-dimensional AxisArray{Float64,1,...} with axes:
:time, 0.0 s:2.5e-5 s:0.0001 s
And data, a 5-element Array{Float64,1}:
7.141607285917661
12.28907824673544
6.855905417203194
2.756493722442482
-2.380074475771338
```
We can also index by the *values* of each axis using an `Interval` type that
selects all values between two endpoints `a .. b` or the axis values directly.
Notice that the returned AxisArray still has axis information itself... and it
still has the correct time information for those datapoints!
```jldoctest
julia> A[40µs .. 220µs, :c1]
1-dimensional AxisArray{Float64,1,...} with axes:
:time, 5.0e-5 s:2.5e-5 s:0.0002 s
And data, a 7-element Array{Float64,1}:
3.427952708601597
1.378246861221241
-1.190037237885669
-1.994137635575063
2.9429034802756004
-0.22644919919326786
0.8214461136364685
julia> AxisArrays.axes(ans, 1)
Axis{:time,StepRangeLen{Unitful.Quantity{Float64,𝐓,Unitful.FreeUnits{(s,),𝐓,nothing}},Base.TwicePrecision{Unitful.Quantity{Float64,𝐓,Unitful.FreeUnits{(s,),𝐓,nothing}}},Base.TwicePrecision{Unitful.Quantity{Float64,𝐓,Unitful.FreeUnits{(s,),𝐓,nothing}}}}}(5.0e-5 s:2.5e-5 s:0.0002 s)
```
You can also index by a single value on an axis using `atvalue`. This will drop
a dimension. Indexing with an `Interval` type retains dimensions, even
when the ends of the interval are equal:
```jldoctest
julia> A[atvalue(2.5e-5s), :c1]
6.14453912336772
julia> A[2.5e-5s..2.5e-5s, :c1]
1-dimensional AxisArray{Float64,1,...} with axes:
:time, 2.5e-5 s:2.5e-5 s:2.5e-5 s
And data, a 1-element Array{Float64,1}:
6.14453912336772
```
You can even index by multiple values by broadcasting `atvalue` over an array:
```jldoctest
julia> A[atvalue.([2.5e-5s, 75.0µs])]
2-dimensional AxisArray{Float64,2,...} with axes:
:time, Unitful.Quantity{Float64,𝐓,Unitful.FreeUnits{(s,),𝐓,nothing}}[2.5e-5 s, 7.5e-5 s]
:chan, Symbol[:c1, :c2]
And data, a 2×2 Array{Float64,2}:
6.14454 12.2891
1.37825 2.75649
```
Sometimes, though, what we're really interested in is a window of time about a
specific index. One of the operations above (looking for values in the window from 40µs
to 220µs) might be more clearly expressed as a symmetrical window about a
specific index where we know something interesting happened. To represent this,
we use the `atindex` function:
```jldoctest
julia> A[atindex(-90µs .. 90µs, 5), :c2]
1-dimensional AxisArray{Float64,1,...} with axes:
:time_sub, -7.5e-5 s:2.5e-5 s:7.5e-5 s
And data, a 7-element Array{Float64,1}:
12.28907824673544
6.855905417203194
2.756493722442482
-2.380074475771338
-3.988275271150126
5.885806960551201
-0.4528983983865357
```
Note that the returned AxisArray has its time axis shifted to represent the
interval about the given index! This simple concept can be extended to some
very powerful behaviors. For example, let's threshold our data and find windows
about those threshold crossings.
```jldoctest
julia> idxs = findall(diff(A[:,:c1] .< -15) .> 0);
julia> spks = A[atindex(-200µs .. 800µs, idxs), :c1]
2-dimensional AxisArray{Float64,2,...} with axes:
:time_sub, -0.0002 s:2.5e-5 s:0.0008 s
:time_rep, Unitful.Quantity{Float64,𝐓,Unitful.FreeUnits{(s,),𝐓,nothing}}[0.161275 s, 0.489925 s, 0.957175 s, 1.1457 s, 1.40185 s, 1.84193 s, 2.07365 s, 2.32947 s, 2.7763 s, 2.79275 s … 57.6724 s, 57.7152 s, 57.749 s, 58.1109 s, 58.3783 s, 58.4178 s, 58.921 s, 59.1693 s, 59.6546 s, 59.7824 s]
And data, a 41×273 Array{Float64,2}:
-2.47171 -1.72242 4.54491 … 2.74969 3.1869 -2.00435
6.78576 3.65903 5.14183 -0.98535 3.96603 -5.74065
1.56584 1.88131 0.470257 2.66664 5.27674 0.0610194
4.78242 3.20142 3.28502 5.20484 -3.66085 1.16247
3.23148 -1.24878 -0.0252124 5.46585 4.88651 3.64283
6.5714 0.572557 3.038 … -0.974689 2.61297 7.3496
4.46643 -0.444754 -4.52857 0.304449 -1.54659 -2.53197
-9.57806 -1.29114 -2.23969 -9.10793 -6.35711 -5.06038
-12.2567 -5.06283 -8.53581 -11.9826 -14.868 -14.0543
-24.5458 -19.9823 -20.0798 -20.3065 -18.5437 -25.3609
⋮ ⋱ ⋮
2.14059 -0.365031 1.36771 -4.23763 5.9211 -3.84708
3.58157 2.87076 0.835568 -2.27752 1.18686 2.3412
6.7953 -1.32384 -3.0897 0.464151 -1.12327 -2.14844
1.19649 2.44709 -5.16029 … -0.965397 2.37465 -2.36185
-1.57253 0.526027 0.831144 0.6505 3.61602 1.93462
0.739684 -1.74925 -6.18072 -7.36229 -0.187708 1.97774
0.645211 1.04006 -1.33676 4.30262 -4.46544 -0.278097
1.32901 -1.74821 1.94781 0.780325 3.22951 -0.436806
0.387814 0.128453 -0.00287742 … -1.51196 -2.10081 -2.26663
```
By indexing with a repeated interval, we have *added* a dimension to the
output! The returned AxisArray's columns specify each repetition of the
interval, and each datapoint in the column represents a timepoint within that
interval, adjusted by the time of the theshold crossing. The best part here
is that the returned matrix knows precisely where its data came from, and has
labeled its dimensions appropriately. Not only is there the proper time
base for each waveform, but we also have recorded the event times as the axis
across the columns.
## Indexing
### Indexing axes
Two main types of Axes supported by default include:
* Categorical axis -- These are vectors of labels, normally symbols or
strings. Elements or slices can be selected by elements or vectors
of elements.
* Dimensional axis -- These are sorted vectors or iterators that can
be selected by `Intervals`. These are commonly used for sequences of
times or date-times. For regular sample rates, ranges can be used.
Here is an example with a Dimensional axis representing a time
sequence along rows and a Categorical axis of symbols for column
headers.
```julia
B = AxisArray(reshape(1:15, 5, 3), .1:.1:0.5, [:a, :b, :c])
B[Axis{:row}(Interval(.2,.4))] # restrict the AxisArray along the time axis
B[Interval(0.,.3), [:a, :c]] # select an interval and two of the columns
```
User-defined axis types can be added along with custom indexing
behaviors.
### Example: compute the intensity-weighted mean along the z axis
```julia
B = AxisArray(randn(100,100,100), :x, :y, :z)
Itotal = sumz = 0.0
for iter in eachindex(B) # traverses in storage order for cache efficiency
I = B[iter] # intensity in a single voxel
Itotal += I
sumz += I * iter[axisdim(B, Axis{:z})] # axisdim "looks up" the z dimension
end
meanz = sumz/Itotal
```
The intention is that all of these operations are just as efficient as they would be if you used traditional position-based indexing with all the inherent assumptions about the storage order of `B`.
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.4.7 | 16351be62963a67ac4083f748fdb3cca58bfd52f | docs | 40 | ```@autodocs
Modules = [AxisArrays]
```
| AxisArrays | https://github.com/JuliaArrays/AxisArrays.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 263 | using Documenter, QueryOperators
makedocs(
modules = [QueryOperators],
sitename = "QueryOperators.jl",
analytics="UA-132838790-1",
pages = [
"Introduction" => "index.md"
]
)
deploydocs(
repo = "github.com/queryverse/QueryOperators.jl.git"
)
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 7472 | module NamedTupleUtilities
"""
select(a::NamedTuple, v::Val{n})
Select a field `n` from `a` if it is in `a`.
```jldoctest
julia> QueryOperators.NamedTupleUtilities.select((a=1,b=2,c=3),Val(:a))
(a = 1,)
```
"""
@generated function select(a::NamedTuple{an}, ::Val{bn}) where {an, bn}
names = ((i for i in an if i == bn)...,)
types = Tuple{(fieldtype(a, n) for n in names)...}
vals = Expr[:(getfield(a, $(QuoteNode(n)))) for n in names]
return :(NamedTuple{$names,$types}(($(vals...),)))
end
"""
remove(a::NamedTuple, v::Val{n})
Remove a field `n` from the `a` if it is in `a`.
```jldoctest
julia> QueryOperators.NamedTupleUtilities.remove((a=1,b=2,c=3),Val(:c))
(a = 1, b = 2)
```
"""
@generated function remove(a::NamedTuple{an}, ::Val{bn}) where {an, bn}
names = ((i for i in an if i != bn)...,)
types = Tuple{(fieldtype(a, n) for n in names)...}
vals = Expr[:(getfield(a, $(QuoteNode(n)))) for n in names]
return :(NamedTuple{$names,$types}(($(vals...),)))
end
"""
range(a::NamedTuple, b::Val{n}, c::Val{n})
Return a NamedTuple which retains the fields from `b` to `c` in `a`.
If `b` is not in `a`, then it will return the empty NamedTuple.
If `c` is not in `a`, then it will return everything starting with `b`.
```jldoctest
julia> QueryOperators.NamedTupleUtilities.range((a=1,b=2,c=3),Val(:a),Val(:b))
(a = 1, b = 2)
```
"""
@generated function range(a::NamedTuple{an}, ::Val{bn}, ::Val{cn}) where {an, bn, cn}
rangeStarted = false
names = Symbol[]
for n in an
if n == bn
rangeStarted = true
end
if rangeStarted
push!(names, n)
end
if n == cn
rangeStarted = false
break
end
end
types = Tuple{(fieldtype(a, n) for n in names)...}
vals = Expr[:(getfield(a, $(QuoteNode(n)))) for n in names]
return :( NamedTuple{$(names...,),$types}(($(vals...),)) )
end
"""
rename(a::NamedTuple, b::Val{n}, c::Val{n})
Return a NamedTuple derived from `a` in which the the field from `b` is renamed to `c`.
If `b` is not in `a`, then it will return the original NamedTuple.
If `c` is in `a`, then `ERROR: duplicate field name in NamedTuple: "c" is not unique` will occur.
```jldoctest
julia> QueryOperators.NamedTupleUtilities.rename((a = 1, b = 2, c = 3),Val(:a),Val(:d))
(d = 1, b = 2, c = 3)
julia> QueryOperators.NamedTupleUtilities.rename((a = 1, b = 2, c = 3),Val(:m),Val(:d))
(a = 1, b = 2, c = 3)
julia> QueryOperators.NamedTupleUtilities.rename((a = 1, b = 2, c = 3),Val(:a),Val(:c))
ERROR: duplicate field name in NamedTuple: "c" is not unique
```
"""
@generated function rename(a::NamedTuple{an}, ::Val{bn}, ::Val{cn}) where {an, bn, cn}
names = Symbol[]
typesArray = DataType[]
vals = Expr[]
for n in an
if n == bn
push!(names, cn)
else
push!(names, n)
end
push!(typesArray, fieldtype(a, n))
push!(vals, :(getfield(a, $(QuoteNode(n)))))
end
types = Tuple{typesArray...}
return :(NamedTuple{$(names...,),$types}(($(vals...),)))
end
"""
startswith(a::NamedTuple, b::Val{n})
Return a NamedTuple which retains the fields with names starting with `b` in `a`.
```jldoctest
julia> QueryOperators.NamedTupleUtilities.startswith((abc=1,bcd=2,cde=3),Val(:a))
(abc = 1,)
```
"""
@generated function startswith(a::NamedTuple{an}, ::Val{bn}) where {an, bn}
names = ((i for i in an if Base.startswith(String(i), String(bn)))...,)
types = Tuple{(fieldtype(a, n) for n in names)...}
vals = Expr[:(getfield(a, $(QuoteNode(n)))) for n in names]
return :(NamedTuple{$names,$types}(($(vals...),)))
end
"""
not_startswith(a::NamedTuple, b::Val{n})
Return a NamedTuple which retains the fields with names that do not start with `b` in `a`.
```jldoctest
julia> QueryOperators.NamedTupleUtilities.not_startswith((abc=1,bcd=2,cde=3),Val(:a))
(bcd = 2, cde = 3)
```
"""
@generated function not_startswith(a::NamedTuple{an}, ::Val{bn}) where {an, bn}
names = ((i for i in an if !Base.startswith(String(i), String(bn)))...,)
types = Tuple{(fieldtype(a, n) for n in names)...}
vals = Expr[:(getfield(a, $(QuoteNode(n)))) for n in names]
return :(NamedTuple{$names,$types}(($(vals...),)))
end
"""
endswith(a::NamedTuple, b::Val{n})
Return a NamedTuple which retains the fields with names that do not end with `b` in `a`.
```jldoctest
julia> QueryOperators.NamedTupleUtilities.not_endswith((abc=1,bcd=2,cde=3),Val(:d))
(abc = 1, cde = 3)
```
"""
@generated function endswith(a::NamedTuple{an}, ::Val{bn}) where {an, bn}
names = ((i for i in an if Base.endswith(String(i), String(bn)))...,)
types = Tuple{(fieldtype(a, n) for n in names)...}
vals = Expr[:(getfield(a, $(QuoteNode(n)))) for n in names]
return :(NamedTuple{$names,$types}(($(vals...),)))
end
"""
not_endswith(a::NamedTuple, b::Val{n})
Return a NamedTuple which retains the fields with names ending with `b` in `a`.
```jldoctest
julia> QueryOperators.NamedTupleUtilities.endswith((abc=1,bcd=2,cde=3),Val(:d))
(bcd = 2,)
```
"""
@generated function not_endswith(a::NamedTuple{an}, ::Val{bn}) where {an, bn}
names = ((i for i in an if !Base.endswith(String(i), String(bn)))...,)
types = Tuple{(fieldtype(a, n) for n in names)...}
vals = Expr[:(getfield(a, $(QuoteNode(n)))) for n in names]
return :(NamedTuple{$names,$types}(($(vals...),)))
end
"""
occursin(a::NamedTuple, b::Val{n})
Return a NamedTuple which retains the fields with names containing `b` as a substring.
```jldoctest
julia> QueryOperators.NamedTupleUtilities.occursin((abc=1,bcd=2,cde=3),Val(:d))
(bcd = 2, cde = 3)
```
"""
@generated function occursin(a::NamedTuple{an}, ::Val{bn}) where {an, bn}
names = ((i for i in an if Base.occursin(String(bn), String(i)))...,)
types = Tuple{(fieldtype(a, n) for n in names)...}
vals = Expr[:(getfield(a, $(QuoteNode(n)))) for n in names]
return :(NamedTuple{$names,$types}(($(vals...),)))
end
"""
not_occursin(a::NamedTuple, b::Val{n})
Return a NamedTuple which retains the fields without names containing `b` as a substring.
```jldoctest
julia> QueryOperators.NamedTupleUtilities.not_occursin((abc=1,bcd=2,cde=3),Val(:d))
(abc = 1,)
```
"""
@generated function not_occursin(a::NamedTuple{an}, ::Val{bn}) where {an, bn}
names = ((i for i in an if !Base.occursin(String(bn), String(i)))...,)
types = Tuple{(fieldtype(a, n) for n in names)...}
vals = Expr[:(getfield(a, $(QuoteNode(n)))) for n in names]
return :(NamedTuple{$names,$types}(($(vals...),)))
end
"""
oftype(a::NamedTuple, b::DataType)
Returns a NamedTuple which retains the fields whose elements have type `b`.
```jldoctest
julia> QueryOperators.NamedTupleUtilities.oftype((a = [4,5,6], b = [3.,2.,1.], c = ["He","llo","World!"]), Val(Int64))
(a = [4, 5, 6],)
julia> QueryOperators.NamedTupleUtilities.oftype((a = [4,5,6], b = [3.,2.,1.], c = ["He","llo","World!"]), Val(Number))
(a = [4, 5, 6], b = [3., 2., 1.])
julia> QueryOperators.NamedTupleUtilities.oftype((a = [4,5,6], b = [3.,2.,1.], c = ["He","llo","World!"]), Val(Float32))
NamedTuple()
```
"""
@generated function oftype(a::NamedTuple{an}, ::Val{b}) where {an, b}
names = ((i for i in an if fieldtype(a, i) <: b)...,)
types = Tuple{(fieldtype(a, n) for n in names)...}
vals = Expr[:(getfield(a, $(QuoteNode(n)))) for n in names]
return :(NamedTuple{$names,$types}(($(vals...),)))
end
end | QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 873 | module QueryOperators
using DataStructures
using IteratorInterfaceExtensions
using TableShowUtils
import DataValues
export Grouping, key
include("operators.jl")
include("NamedTupleUtilities.jl")
include("enumerable/enumerable.jl")
include("enumerable/enumerable_groupby.jl")
include("enumerable/enumerable_join.jl")
include("enumerable/enumerable_groupjoin.jl")
include("enumerable/enumerable_orderby.jl")
include("enumerable/enumerable_map.jl")
include("enumerable/enumerable_gather.jl")
include("enumerable/enumerable_filter.jl")
include("enumerable/enumerable_mapmany.jl")
include("enumerable/enumerable_defaultifempty.jl")
include("enumerable/enumerable_count.jl")
include("enumerable/enumerable_take.jl")
include("enumerable/enumerable_drop.jl")
include("enumerable/enumerable_unique.jl")
include("enumerable/show.jl")
include("source_iterable.jl")
end # module
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 3354 | function count end
macro count(source, f)
q = Expr(:quote, f)
:(count($(esc(source)), $(esc(f)), $(esc(q))))
end
macro count(source)
:(count($(esc(source))))
end
function default_if_empty end
macro default_if_empty(source)
:(default_if_empty($(esc(source))))
end
macro default_if_empty(source, default_value)
:(default_if_empty($(esc(source)), $(esc(default_value))))
end
function filter end
macro filter(source, f)
q = Expr(:quote, f)
:(QueryOperators.filter($(esc(source)), $(esc(f)), $(esc(q))))
end
function gather end
macro gather(source, withIndex = False)
:(groupby($(esc(source)), $(esc(withIndex))))
end
function groupby end
macro groupby(source,elementSelector,resultSelector)
q_elementSelector = Expr(:quote, elementSelector)
q_resultSelector = Expr(:quote, resultSelector)
:(groupby($(esc(source)), $(esc(elementSelector)), $(esc(q_elementSelector)), $(esc(resultSelector)), $(esc(q_resultSelector))))
end
macro groupby_simple(source,elementSelector)
q_elementSelector = Expr(:quote, elementSelector)
:(groupby($(esc(source)), $(esc(elementSelector)), $(esc(q_elementSelector))))
end
function groupjoin end
macro groupjoin(outer, inner, outerKeySelector, innerKeySelector, resultSelector)
q_outerKeySelector = Expr(:quote, outerKeySelector)
q_innerKeySelector = Expr(:quote, innerKeySelector)
q_resultSelector = Expr(:quote, resultSelector)
:(groupjoin($(esc(outer)), $(esc(inner)), $(esc(outerKeySelector)), $(esc(q_outerKeySelector)), $(esc(innerKeySelector)),$(esc(q_innerKeySelector)), $(esc(resultSelector)),$(esc(q_resultSelector))))
end
function join end
macro join(outer, inner, outerKeySelector, innerKeySelector, resultSelector)
q_outerKeySelector = Expr(:quote, outerKeySelector)
q_innerKeySelector = Expr(:quote, innerKeySelector)
q_resultSelector = Expr(:quote, resultSelector)
:(join($(esc(outer)), $(esc(inner)), $(esc(outerKeySelector)), $(esc(q_outerKeySelector)), $(esc(innerKeySelector)),$(esc(q_innerKeySelector)), $(esc(resultSelector)),$(esc(q_resultSelector))))
end
function map end
macro map(source, f)
q = Expr(:quote, f)
:(map($(esc(source)), $(esc(f)), $(esc(q))))
end
function mapmany end
macro mapmany(source,collectionSelector,resultSelector)
q_collectionSelector = Expr(:quote, collectionSelector)
q_resultSelector = Expr(:quote, resultSelector)
:(mapmany($(esc(source)), $(esc(collectionSelector)), $(esc(q_collectionSelector)), $(esc(resultSelector)), $(esc(q_resultSelector))))
end
function orderby end
macro orderby(source, f)
q = Expr(:quote, f)
:(orderby($(esc(source)), $(esc(f)), $(esc(q))))
end
function orderby_descending end
macro orderby_descending(source, f)
q = Expr(:quote, f)
:(orderby_descending($(esc(source)), $(esc(f)), $(esc(q))))
end
function thenby end
macro thenby(source, f)
q = Expr(:quote, f)
:(thenby($(esc(source)), $(esc(f)), $(esc(q))))
end
function thenby_descending end
macro thenby_descending(source, f)
q = Expr(:quote, f)
:(thenby_descending($(esc(source)), $(esc(f)), $(esc(q))))
end
function take end
macro take(source, n)
:(take($(esc(source)), $(esc(n))))
end
function drop end
macro drop(source, n)
:(drop($(esc(source)), $(esc(n))))
end
function unique end
macro unique(source, f)
q = Expr(:quote, f)
:(unique($(esc(source)), $(esc(f)), $(esc(q))))
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 710 | struct EnumerableIterable{T,S} <: Enumerable
source::S
end
function query(source)
IteratorInterfaceExtensions.isiterable(source) || error()
typed_source = IteratorInterfaceExtensions.getiterator(source)
T = eltype(typed_source)
S = typeof(typed_source)
source_enumerable = EnumerableIterable{T,S}(typed_source)
return source_enumerable
end
Base.IteratorSize(::Type{EnumerableIterable{T,S}}) where {T,S} = haslength(S)
Base.eltype(::Type{EnumerableIterable{T,S}}) where {T,S} = T
Base.length(iter::EnumerableIterable{T,S}) where {T,S} = length(iter.source)
function Base.iterate(iter::EnumerableIterable{T,S}, state...) where {T,S}
return iterate(iter.source, state...)
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 285 | abstract type Enumerable end
Base.IteratorSize(::Type{T}) where {T <: Enumerable} = Base.SizeUnknown()
IteratorInterfaceExtensions.isiterable(x::Enumerable) = true
haslength(S) = Base.IteratorSize(S) isa Union{Base.HasLength, Base.HasShape} ? Base.HasLength() : Base.IteratorSize(S)
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 193 | function count(source::Enumerable, filter::Function, filter_expr::Expr)
return Base.count(filter, source)
end
function count(source::Enumerable)
return Base.count(i->true, source)
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 1599 | struct EnumerableDefaultIfEmpty{T,S} <: Enumerable
source::S
default_value::T
end
Base.eltype(iter::Type{EnumerableDefaultIfEmpty{T,S}}) where {T,S} = T
_default_value_expr(::Type{T}) where {T} = :( DataValues.DataValue{$T}() )
_default_value_expr(::Type{T}) where {T<:DataValues.DataValue} = :( $T() )
function _default_value_expr(::Type{T}) where {T<:NamedTuple}
return :( NamedTuple{$(fieldnames(T))}( ($( (_default_value_expr(fieldtype(T,i)) for i in 1:length(fieldnames(T)))... ),)) )
end
@generated function default_if_empty(source::S) where {S}
T_source = eltype(source)
default_value_expr = _default_value_expr(T_source)
q = quote
default_value = $default_value_expr
T = typeof(default_value)
return EnumerableDefaultIfEmpty{T,$S}(source, default_value)
end
return q
end
function default_if_empty(source::S, default_value::TD) where {S,TD}
T = eltype(source)
if T!=TD
error("The default value must have the same type as the elements from the source.")
end
return EnumerableDefaultIfEmpty{T,S}(source, default_value)
end
function Base.iterate(iter::EnumerableDefaultIfEmpty{T,S}) where {T,S}
s = iterate(iter.source)
if s===nothing
return iter.default_value, nothing
else
return convert(T,s[1]), s[2]
end
end
function Base.iterate(iter::EnumerableDefaultIfEmpty{T,S}, state) where {T,S}
state===nothing && return nothing
s = iterate(iter.source, state)
if s===nothing
return nothing
else
return convert(T, s[1]), s[2]
end
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 924 | struct EnumerableDrop{T,S} <: Enumerable
source::S
n::Int
end
function drop(source::Enumerable, n::Integer)
T = eltype(source)
S = typeof(source)
return EnumerableDrop{T,S}(source, Int(n))
end
Base.IteratorSize(::Type{EnumerableDrop{T,S}}) where {T,S} = (Base.IteratorSize(S) isa Base.HasLength || Base.IteratorSize(S) isa Base.HasShape) ? Base.HasLength() : Base.SizeUnknown()
Base.eltype(::Type{EnumerableDrop{T,S}}) where {T,S} = T
Base.length(iter::EnumerableDrop{T,S}) where {T,S} = max(length(iter.source)-iter.n,0)
function Base.iterate(iter::EnumerableDrop{T,S}) where {T,S}
ret = iterate(iter.source)
for i in 1:iter.n
if ret===nothing
return nothing
else
ret = iterate(iter.source, ret[2])
end
end
return ret
end
function Base.iterate(iter::EnumerableDrop{T,S}, state) where {T,S}
return iterate(iter.source, state)
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 712 | # T is the type of the elements produced by this iterator
struct EnumerableFilter{T,S,Q<:Function} <: Enumerable
source::S
filter::Q
end
Base.eltype(iter::Type{EnumerableFilter{T,S,Q}}) where {T,S,Q} = T
function filter(source::Enumerable, filter_func::Function, filter_expr::Expr)
T = eltype(source)
S = typeof(source)
Q = typeof(filter_func)
return EnumerableFilter{T,S,Q}(source, filter_func)
end
function Base.iterate(iter::EnumerableFilter{T,S,Q}, state...) where {T,S,Q}
ret = iterate(iter.source, state...)
while ret!==nothing
if iter.filter(ret[1])
return ret
end
ret = iterate(iter.source, ret[2])
end
return nothing
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 3138 | struct EnumerableGather{T,S,F,I,A} <: Enumerable
source::S
fields::F
indexFields::I
savedFields::A
key::Symbol
value::Symbol
end
struct Not{T}
val::T
end
function gather(source::Enumerable, args...; key::Symbol = :key, value::Symbol = :value)
fields = fieldnames(eltype(source))
if length(args) > 0
indexFields_vector = Vector{Symbol}(undef, 0)
firstArg = true
for arg in args
if typeof(arg) == Symbol
push!(indexFields_vector, arg)
else typeof(arg) == Not{Symbol}
if firstArg
indexFields_vector = [a for a in fields if a != arg.val]
else
indexFields_vector = [a for a in indexFields_vector if a != arg.val]
end
end
firstArg = false
end
indexFields = tuple(indexFields_vector...)
else
indexFields = fields
end
savedFields = (n for n in fields if !(n in indexFields)) # fields that are not in `indexFields`
savedFieldsType = (fieldtype(eltype(source), savedField) for savedField in savedFields)
valueTypes = (fieldtype(eltype(source), indexField) for indexField in indexFields)
valueType = reduce(promote_type, valueTypes)
T = NamedTuple{(savedFields..., key, value), Tuple{savedFieldsType..., Symbol, valueType}}
return EnumerableGather{T, typeof(source), typeof(fields), typeof(indexFields), typeof(savedFields)}(source,
fields, indexFields, savedFields, key, value)
end
function Base.iterate(iter::EnumerableGather{T, S, F, I, A}) where {T, S, F, I, A}
source_iterate = iterate(iter.source)
if source_iterate == nothing || length(iter.indexFields) == 0
return nothing
end
key = iter.indexFields[1]
current_source_row = source_iterate[1]
value = current_source_row[key]
return (T((Base.map(n->current_source_row[n], iter.savedFields)..., key, value)),
(current_source_row=current_source_row, source_state=source_iterate[2], current_index_field_index=1))
end
function Base.iterate(iter::EnumerableGather{T, S, F, I, A}, state) where {T, S, F, I, A}
current_index_field_index = state.current_index_field_index + 1
if current_index_field_index > length(iter.indexFields)
source_iterate = iterate(iter.source, state.source_state)
if source_iterate == nothing || length(iter.indexFields) == 0
return nothing
end
current_index_field_index = 1
source_state = source_iterate[2]
current_source_row = source_iterate[1]
else
source_state = state.source_state
current_source_row = state.current_source_row
end
key = iter.indexFields[current_index_field_index]
value = current_source_row[key]
return (T((Base.map(n->current_source_row[n], iter.savedFields)..., key, value)),
(current_source_row=current_source_row, source_state=source_state, current_index_field_index=current_index_field_index))
end
function Base.eltype(iter::EnumerableGather{T, S, F, I, A}) where {T, S, F, I, A}
return T
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 3656 | struct EnumerableGroupBySimple{T,TKey,TS,SO,ES<:Function} <: Enumerable
source::SO
elementSelector::ES
end
struct Grouping{TKey,T} <: AbstractArray{T,1}
_key::TKey
elements::Array{T,1}
end
key(g::Grouping) = getfield(g, :_key)
struct GroupColumnArrayView{T,G,INDEX} <: AbstractVector{T}
grouping::G
end
function Base.size(gcav::GroupColumnArrayView)
return size(gcav.grouping)
end
function Base.getindex(gcav::GroupColumnArrayView{T,G,INDEX}, i::Int) where {T,G,INDEX}
return getproperty(gcav.grouping[i],INDEX)
end
Base.IndexStyle(::Type{GroupColumnArrayView}) = IndexLinear()
function Base.getproperty(g::Grouping{TKey,T}, name::Symbol) where {TKey,T}
return GroupColumnArrayView{fieldtype(T,name),Grouping{TKey,T},name}(g)
end
Base.size(A::Grouping{TKey,T}) where {TKey,T} = size(getfield(A, :elements))
Base.getindex(A::Grouping{TKey,T},i) where {TKey,T} = getfield(A, :elements)[i]
Base.length(A::Grouping{TKey,T}) where {TKey,T} = length(getfield(A, :elements))
Base.eltype(::Type{EnumerableGroupBySimple{T,TKey,TS,SO,ES}}) where {T,TKey,TS,SO,ES} = T
function groupby(source::Enumerable, f_elementSelector::Function, elementSelector::Expr)
TS = eltype(source)
TKey = Base._return_type(f_elementSelector, Tuple{TS,})
SO = typeof(source)
T = Grouping{TKey,TS}
ES = typeof(f_elementSelector)
return EnumerableGroupBySimple{T,TKey,TS,SO,ES}(source,f_elementSelector)
end
function Base.iterate(iter::EnumerableGroupBySimple{T,TKey,TS,SO,ES}) where {T,TKey,TS,SO,ES}
result = OrderedDict{TKey,T}()
for i in iter.source
key = iter.elementSelector(i)
if !haskey(result, key)
result[key] = T(key,Array{TS}(undef, 0))
end
push!(getfield(result[key], :elements),i)
end
groups = collect(values(result))
if length(groups)==0
return nothing
else
return groups[1], (groups, 2)
end
end
function Base.iterate(iter::EnumerableGroupBySimple{T,TKey,TS,SO,ES}, state) where {T,TKey,TS,SO,ES}
if state[2]>length(state[1])
return nothing
else
return state[1][state[2]], (state[1], state[2]+1)
end
end
struct EnumerableGroupBy{T,TKey,TR,SO,ES<:Function,RS<:Function} <: Enumerable
source::SO
elementSelector::ES
resultSelector::RS
end
Base.eltype(::Type{EnumerableGroupBy{T,TKey,TR,SO,ES, RS}}) where {T,TKey,TR,SO,ES,RS} = T
function groupby(source::Enumerable, f_elementSelector::Function, elementSelector::Expr, f_resultSelector::Function, resultSelector::Expr)
TS = eltype(source)
TKey = Base._return_type(f_elementSelector, Tuple{TS,})
SO = typeof(source)
TR = Base._return_type(f_resultSelector, Tuple{TS,})
T = Grouping{TKey,TR}
ES = typeof(f_elementSelector)
RS = typeof(f_resultSelector)
return EnumerableGroupBy{T,TKey,TR,SO,ES,RS}(source,f_elementSelector,f_resultSelector)
end
function Base.iterate(iter::EnumerableGroupBy{T,TKey,TR,SO,ES}) where {T,TKey,TR,SO,ES}
result = OrderedDict{TKey,T}()
for i in iter.source
key = iter.elementSelector(i)
if !haskey(result, key)
result[key] = T(key,Array{TR}(undef,0))
end
push!(getfield(result[key], :elements),iter.resultSelector(i))
end
groups = collect(values(result))
if length(groups)==0
return nothing
else
return groups[1], (groups, 2)
end
end
function Base.iterate(iter::EnumerableGroupBy{T,TKey,TR,SO,ES}, state) where {T,TKey,TR,SO,ES}
if state[2]>length(state[1])
return nothing
else
return state[1][state[2]], (state[1], state[2]+1)
end
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 2251 | struct EnumerableGroupJoin{T,TKey,TI,SO,SI,OKS<:Function,IKS<:Function,RS<:Function} <: Enumerable
outer::SO
inner::SI
outerKeySelector::OKS
innerKeySelector::IKS
resultSelector::RS
end
Base.eltype(::Type{EnumerableGroupJoin{T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}}) where {T,TKeyOuter,TI,SO,SI,OKS,IKS,RS} = T
function groupjoin(outer::Enumerable, inner::Enumerable, f_outerKeySelector::Function, outerKeySelector::Expr, f_innerKeySelector::Function, innerKeySelector::Expr, f_resultSelector::Function, resultSelector::Expr)
TO = eltype(outer)
TI = eltype(inner)
TKeyOuter = Base._return_type(f_outerKeySelector, Tuple{TO,})
TKeyInner = Base._return_type(f_innerKeySelector, Tuple{TI,})
if TKeyOuter!=TKeyInner
error("The keys in the join clause have different types, $TKeyOuter and $TKeyInner.")
end
SO = typeof(outer)
SI = typeof(inner)
T = Base._return_type(f_resultSelector, Tuple{TO,Array{TI,1}})
OKS = typeof(f_outerKeySelector)
IKS = typeof(f_innerKeySelector)
RS = typeof(f_resultSelector)
return EnumerableGroupJoin{T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}(outer,inner,f_outerKeySelector,f_innerKeySelector,f_resultSelector)
end
function Base.iterate(iter::EnumerableGroupJoin{T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}) where {T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}
results = Array{T}(undef, 0)
inner_dict = OrderedDict{TKeyOuter,Array{TI,1}}()
for i in iter.inner
key = iter.innerKeySelector(i)
if !haskey(inner_dict, key)
inner_dict[key] = Array{TI}(undef, 0)
end
push!(inner_dict[key], i)
end
for i in iter.outer
outerKey = iter.outerKeySelector(i)
if haskey(inner_dict,outerKey)
g = inner_dict[outerKey]
else
g = Array{TI}(undef, 0)
end
push!(results, iter.resultSelector(i,g))
end
if length(results)==0
return nothing
end
return results[1], (results,2)
end
function Base.iterate(iter::EnumerableGroupJoin{T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}, state) where {T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}
if state[2]>length(state[1])
return nothing
else
return state[1][state[2]], (state[1], state[2]+1)
end
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 2193 | struct EnumerableJoin{T,TKey,TI,SO,SI,OKS<:Function,IKS<:Function,RS<:Function} <: Enumerable
outer::SO
inner::SI
outerKeySelector::OKS
innerKeySelector::IKS
resultSelector::RS
end
Base.eltype(::Type{EnumerableJoin{T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}}) where {T,TKeyOuter,TI,SO,SI,OKS,IKS,RS} = T
function join(outer::Enumerable, inner::Enumerable, f_outerKeySelector::Function, outerKeySelector::Expr, f_innerKeySelector::Function, innerKeySelector::Expr, f_resultSelector::Function, resultSelector::Expr)
TO = eltype(outer)
TI = eltype(inner)
TKeyOuter = Base._return_type(f_outerKeySelector, Tuple{TO,})
TKeyInner = Base._return_type(f_innerKeySelector, Tuple{TI,})
if TKeyOuter!=TKeyInner
error("The keys in the join clause have different types, $TKeyOuter and $TKeyInner.")
end
SO = typeof(outer)
SI = typeof(inner)
T = Base._return_type(f_resultSelector, Tuple{TO,TI})
OKS = typeof(f_outerKeySelector)
IKS = typeof(f_innerKeySelector)
RS = typeof(f_resultSelector)
return EnumerableJoin{T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}(outer,inner,f_outerKeySelector,f_innerKeySelector,f_resultSelector)
end
function Base.iterate(iter::EnumerableJoin{T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}) where {T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}
results = Array{T}(undef, 0)
inner_dict = OrderedDict{TKeyOuter,Array{TI,1}}()
for i in iter.inner
key = iter.innerKeySelector(i)
if !haskey(inner_dict, key)
inner_dict[key] = Array{TI}(undef, 0)
end
push!(inner_dict[key], i)
end
for i in iter.outer
outerKey = iter.outerKeySelector(i)
if haskey(inner_dict,outerKey)
for j in inner_dict[outerKey]
push!(results, iter.resultSelector(i,j))
end
end
end
if length(results)==0
return nothing
end
return results[1], (results, 2)
end
function Base.iterate(iter::EnumerableJoin{T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}, state) where {T,TKeyOuter,TI,SO,SI,OKS,IKS,RS}
if state[2]>length(state[1])
return nothing
else
return state[1][state[2]], (state[1], state[2]+1)
end
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 734 | struct EnumerableMap{T, S, Q<:Function} <: Enumerable
source::S
f::Q
end
Base.IteratorSize(::Type{EnumerableMap{T,S,Q}}) where {T,S,Q} = haslength(S)
Base.eltype(iter::Type{EnumerableMap{T,S,Q}}) where {T,S,Q} = T
Base.length(iter::EnumerableMap{T,S,Q}) where {T,S,Q} = length(iter.source)
function map(source::Enumerable, f::Function, f_expr::Expr)
TS = eltype(source)
T = Base._return_type(f, Tuple{TS,})
S = typeof(source)
Q = typeof(f)
return EnumerableMap{T,S,Q}(source, f)
end
function Base.iterate(iter::EnumerableMap{T,S,Q}, state...) where {T,S,Q}
ret = iterate(iter.source, state...)
if ret===nothing
return nothing
else
return iter.f(ret[1]), ret[2]
end
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 2999 | struct EnumerableMapMany{T,SO,CS<:Function,RS<:Function} <: Enumerable
source::SO
collectionSelector::CS
resultSelector::RS
end
Base.eltype(::Type{EnumerableMapMany{T,SO,CS,RS}}) where {T,SO,CS,RS} = T
# TODO Make sure this is actually correct. We might have to be more selective,
# i.e. only scan arguments for certain types of expression etc.
function expr_contains_ref_to(expr::Expr, var_name::Symbol)
for sub_expr in expr.args
if isa(sub_expr, Symbol)
if sub_expr==var_name
return true
end
else
test_sub = expr_contains_ref_to(sub_expr, var_name)
if test_sub
return true
end
end
end
return false
end
function expr_contains_ref_to(expr::Symbol, var_name::Symbol)
return expr==var_name
end
function expr_contains_ref_to(expr::QuoteNode, var_name::Symbol)
return expr==var_name
end
function expr_contains_ref_to(expr::Function, var_name::Symbol)
return false
end
expr_contains_ref_to(::Number, ::Symbol) = false
function mapmany(source::Enumerable, f_collectionSelector::Function, collectionSelector::Expr, f_resultSelector::Function, resultSelector::Expr)
TS = eltype(source)
# First detect whether the collectionSelector return value depends at all
# on the value of the anonymous function argument
anon_var = collectionSelector.head==:escape ? collectionSelector.args[1].args[1] : collectionSelector.args[1]
body = collectionSelector.head==:escape ? collectionSelector.args[1].args[2].args[2] : collectionSelector.args[2].args[2]
crossJoin = !expr_contains_ref_to(body, anon_var)
if crossJoin
inner_collection = f_collectionSelector(nothing)
input_type_collection_selector = typeof(inner_collection)
TCE = input_type_collection_selector.parameters[1]
else
input_type_collection_selector = Base._return_type(f_collectionSelector, Tuple{TS,})
TCE = typeof(input_type_collection_selector)==Union || input_type_collection_selector==Any ? Any : eltype(input_type_collection_selector)
end
T = Base._return_type(f_resultSelector, Tuple{TS,TCE})
SO = typeof(source)
CS = typeof(f_collectionSelector)
RS = typeof(f_resultSelector)
return EnumerableMapMany{T,SO,CS,RS}(source,f_collectionSelector,f_resultSelector)
end
# TODO This should be changed to a lazy implementation
function Base.iterate(iter::EnumerableMapMany{T,SO,CS,RS}) where {T,SO,CS,RS}
results = Array{T}(undef, 0)
for i in iter.source
for j in iter.collectionSelector(i)
push!(results,iter.resultSelector(i,j))
end
end
if length(results)==0
return nothing
end
return results[1], (results,2)
end
function Base.iterate(iter::EnumerableMapMany{T,SO,CS,RS}, state) where {T,SO,CS,RS}
if state[2]>length(state[1])
return nothing
else
return state[1][state[2]], (state[1], state[2]+1)
end
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 4163 | struct EnumerableOrderby{T,S,KS<:Function,TKS} <: Enumerable
source::S
keySelector::KS
descending::Bool
end
Base.IteratorSize(::Type{EnumerableOrderby{T,S,KS,TKS}}) where {T,S,KS,TKS} = haslength(S)
Base.eltype(::Type{EnumerableOrderby{T,S,KS,TKS}}) where {T,S,KS,TKS} = T
Base.length(iter::EnumerableOrderby{T,S,KS,TKS}) where {T,S,KS,TKS} = length(iter.source)
function orderby(source::Enumerable, f::Function, f_expr::Expr)
T = eltype(source)
TKS = Base._return_type(f, Tuple{T,})
KS = typeof(f)
return EnumerableOrderby{T,typeof(source), KS,TKS}(source, f, false)
end
function orderby_descending(source::Enumerable, f::Function, f_expr::Expr)
T = eltype(source)
TKS = Base._return_type(f, Tuple{T,})
KS = typeof(f)
return EnumerableOrderby{T,typeof(source),KS,TKS}(source, f, true)
end
function Base.iterate(iter::EnumerableOrderby{T,S,KS,TKS}) where {T,S,KS,TKS}
rows = (Base.IteratorSize(typeof(iter)) isa Base.HasLength || Base.IteratorSize(typeof(iter)) isa Base.HasShape) ? length(iter) : 0
elements = Array{T}(undef, rows)
if Base.IteratorSize(typeof(iter)) isa Base.HasLength || Base.IteratorSize(typeof(iter)) isa Base.HasShape
for i in enumerate(iter.source)
elements[i[1]] = i[2]
end
else
for i in iter.source
push!(elements, i)
end
end
if length(elements)==0
return nothing
end
sort!(elements, by=iter.keySelector, rev=iter.descending)
return elements[1], (elements, 2)
end
function Base.iterate(iter::EnumerableOrderby{T,S,KS,TKS}, state) where {T,S,KS,TKS}
if state[2]>length(state[1])
return nothing
else
return state[1][state[2]], (state[1], state[2]+1)
end
end
struct EnumerableThenBy{T,S,KS<:Function,TKS} <: Enumerable
source::S
keySelector::KS
descending::Bool
end
Base.eltype(::Type{EnumerableThenBy{T,S,KS,TKS}}) where {T,S,KS,TKS} = T
Base.length(iter::EnumerableThenBy{T,S,KS,TKS}) where {T,S,KS,TKS} = length(iter.source)
function thenby(source::Enumerable, f::Function, f_expr::Expr)
T = eltype(source)
TKS = Base._return_type(f, Tuple{T,})
KS = typeof(f)
return EnumerableThenBy{T,typeof(source),KS,TKS}(source, f, false)
end
function thenby_descending(source::Enumerable, f::Function, f_expr::Expr)
T = eltype(source)
TKS = Base._return_type(f, Tuple{T,})
KS = typeof(f)
return EnumerableThenBy{T,typeof(source),KS,TKS}(source, f, true)
end
function Base.iterate(iter::EnumerableThenBy{T,S,KS,TKS}) where {T,S,KS,TKS}
# Find start of ordering sequence
source = iter.source
keySelectors = [source.keySelector,iter.keySelector]
directions = [source.descending, iter.descending]
while !isa(source, EnumerableOrderby)
source = source.source
insert!(keySelectors,1,source.keySelector)
insert!(directions,1,source.descending)
end
keySelector = element->[i(element) for i in keySelectors]
lt = (t1,t2) -> begin
n1, n2 = length(t1), length(t2)
for i = 1:min(n1, n2)
a, b = t1[i], t2[i]
descending = directions[i]
if !isequal(a, b)
return descending ? !isless(a, b) : isless(a, b)
end
end
return n1 < n2
end
rows = (Base.IteratorSize(typeof(iter)) isa Base.HasLength || Base.IteratorSize(typeof(iter)) isa Base.HasShape) ? length(iter) : 0
elements = Array{T}(undef, rows)
if (Base.IteratorSize(typeof(iter)) isa Base.HasLength || Base.IteratorSize(typeof(iter)) isa Base.HasShape)
for i in enumerate(iter.source)
elements[i[1]] = i[2]
end
else
for i in iter.source
push!(elements, i)
end
end
if length(elements)==0
return nothing
end
sort!(elements, by=keySelector, lt=lt)
return elements[1], (elements, 2)
end
function Base.iterate(iter::EnumerableThenBy{T,S,KS,TKS}, state) where {T,S,KS,TKS}
if state[2]>length(state[1])
return nothing
else
return state[1][state[2]], (state[1], state[2]+1)
end
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 978 | struct EnumerableTake{T,S} <: Enumerable
source::S
n::Int
end
function take(source::Enumerable, n::Integer)
T = eltype(source)
S = typeof(source)
return EnumerableTake{T,S}(source, Int(n))
end
Base.IteratorSize(::Type{EnumerableTake{T,S}}) where {T,S} = haslength(S)
Base.eltype(::Type{EnumerableTake{T,S}}) where {T,S} = T
Base.length(iter::EnumerableTake{T,S}) where {T,S} = min(length(iter.source),iter.n)
function Base.iterate(iter::EnumerableTake{T,S}) where {T,S}
ret = iterate(iter.source)
if ret===nothing
return nothing
elseif iter.n==0
return nothing
else
return ret[1], (ret[2],1)
end
end
function Base.iterate(iter::EnumerableTake{T,S}, state) where {T,S}
if state[2]==iter.n
return nothing
else
ret = iterate(iter.source, state[1])
if ret===nothing
return nothing
else
return ret[1], (ret[2], state[2]+1)
end
end
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 1404 | struct EnumerableUnique{T,TKEY,S,Q<:Function} <: Enumerable
source::S
f::Q
end
function unique(source::Enumerable, f::Function, f_expr::Expr)
T = eltype(source)
S = typeof(source)
TKEY = Base._return_type(f, Tuple{T,})
return EnumerableUnique{T,TKEY,S,typeof(f)}(source, f)
end
Base.eltype(::Type{EnumerableUnique{T,TKEY,S,Q}}) where {T,TKEY,S,Q} = T
function Base.iterate(iter::EnumerableUnique{T,TKEY,S,Q}) where {T,TKEY,S,Q}
ret = iterate(iter.source)
ret===nothing && return nothing
observed_keys = Set{TKEY}()
first_element = ret[1]
source_state = ret[2]
key_first_element = iter.f(first_element)
push!(observed_keys, key_first_element)
return first_element, (observed_keys=observed_keys, source_state=source_state)
end
function Base.iterate(iter::EnumerableUnique{T,TKEY,S,Q}, state) where {T,TKEY,S,Q}
ret = iterate(iter.source, state.source_state)
ret===nothing && return nothing
while true
current_element = ret[1]
key_current_element=iter.f(current_element)
if key_current_element in state.observed_keys
ret = iterate(iter.source, ret[2])
ret===nothing && return nothing
else
push!(state.observed_keys, key_current_element)
return current_element, (observed_keys=state.observed_keys, source_state=ret[2])
end
end
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 1858 | function printsequence(io::IO, source::Enumerable)
T = eltype(source)
rows = Base.IteratorSize(source) == Base.HasLength() ? length(source) : "?"
print(io, "$(rows)-element query result")
max_element_to_show = 10
i = 1
foo = iterate(source)
while foo!==nothing
v, s = foo
println(io)
if i==max_element_to_show+1
print(io, "... with ")
if Base.IteratorSize(source)!=Base.HasLength()
print(io, " more elements")
else
extra_rows = length(source) - max_element_to_show
print(io, "$extra_rows more $(extra_rows==1 ? "element" : "elements")")
end
break
else
print(io, " ")
show(IOContext(io, :compact => true), v)
end
i += 1
foo = iterate(source, s)
end
end
function Base.show(io::IO, source::Enumerable)
if eltype(source) <: NamedTuple
TableShowUtils.printtable(io, source, "query result")
else
printsequence(io, source)
end
end
function Base.show(io::IO, ::MIME"text/html", source::Enumerable)
if eltype(source) <: NamedTuple
TableShowUtils.printHTMLtable(io, source)
else
error("Cannot write this Enumerable as text/html.")
end
end
function Base.Multimedia.showable(::MIME"text/html", source::Enumerable)
return eltype(source) <: NamedTuple
end
function Base.show(io::IO, ::MIME"application/vnd.dataresource+json", source::Enumerable)
if eltype(source) <: NamedTuple
TableShowUtils.printdataresource(io, source)
else
error("Cannot write this Enumerable as 'application/vnd.dataresource+json'.")
end
end
function Base.Multimedia.showable(::MIME"application/vnd.dataresource+json", source::Enumerable)
return eltype(source) <: NamedTuple
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 5290 | using QueryOperators
using DataValues
using Test
@testset "QueryOperators" begin
@testset "Core" begin
source_1 = [1,2,2,3,4]
enum = QueryOperators.query(source_1)
@test collect(QueryOperators.@filter(QueryOperators.query(source_1), i->i>2)) == [3,4]
@test collect(QueryOperators.@map(QueryOperators.query(source_1), i->i^2)) == [1,4,4,9,16]
@test collect(QueryOperators.@take(enum, 2)) == [1,2]
@test collect(QueryOperators.@drop(enum, 2)) == [2,3,4]
@test QueryOperators.@count(enum) == 5
@test QueryOperators.@count(enum, x->x%2==0) == 3
dropped_str = ""
for i in QueryOperators.drop(enum, 2)
dropped_str *= string(i)
end
@test dropped_str == "234"
dropped_str = ""
for i in QueryOperators.drop(enum, 80)
dropped_str *= string(i)
end
@test dropped_str == ""
taken_str = ""
for i in QueryOperators.take(enum, 2)
taken_str *= string(i)
end
@test taken_str == "12"
filtered_str = ""
for i in QueryOperators.@filter(enum, x->x%2==0)
filtered_str *= string(i)
end
@test filtered_str == "224"
filtered_str = ""
for i in QueryOperators.@filter(enum, x->x>100)
filtered_str *= string(i)
end
@test filtered_str == ""
@test collect(QueryOperators.@filter(enum, x->x<3)) == [1,2,2]
grouped = []
for i in QueryOperators.@groupby(QueryOperators.query(source_1), i->i, i->i^2)
push!(grouped, i)
end
@test grouped == [[1],[4,4],[9],[16]]
mapped = []
for i in collect(QueryOperators.@map(enum, i->i*3))
push!(mapped, i)
end
@test mapped == [3,6,6,9,12]
# ensure that the default value must be of the same type
errored = false
try
QueryOperators.@default_if_empty(source_1, "string")
catch
errored = true
end
@test errored == true
# default_if_empty for regular array
d = []
for i in QueryOperators.@default_if_empty(source_1, 0)
push!(d, i)
end
@test d == [1, 2, 2, 3, 4]
@test collect(QueryOperators.default_if_empty(DataValue{Int}[]))[1] == DataValue{Int}()
@test collect(QueryOperators.default_if_empty(DataValue{Int}[], DataValue{Int}()))[1] == DataValue{Int}()
# passing in a NamedTuple of DataValues
nt = (a=DataValue(2), b=DataValue("test"), c=DataValue(3))
def = QueryOperators.default_if_empty(typeof(nt)[])
@test typeof(collect(def)[1]) == typeof(nt)
ordered = QueryOperators.@orderby(enum, x -> -x)
@test collect(ordered) == [4, 3, 2, 2, 1]
filtered = QueryOperators.@orderby(QueryOperators.@filter(enum, x->x%2 == 0), x->x)
@test collect(filtered) == [2, 2, 4]
ordered = QueryOperators.@orderby_descending(enum, x -> -x)
@test collect(ordered) == [1, 2, 2, 3, 4]
desired = [[1], [2, 2, 3], [4]]
grouped = QueryOperators.@groupby(enum, x -> floor(x/2), x->x)
@test collect(grouped) == desired
group_no_macro = QueryOperators.groupby(enum, x -> floor(x/2), quote x->floor(x/2) end)
@test collect(group_no_macro) == desired
outer = QueryOperators.query([1,2,3,4,5,6])
inner = QueryOperators.query([2,3,4,5])
join_desired = [[3,2], [4,3], [5,4], [6,5]]
@test collect(QueryOperators.@join(outer, inner, x->x, x->x+1, (i,j)->[i,j])) == join_desired
group_desired = [[1, Int64[]], [2, Int64[]], [3, [2]], [4, [3]], [5, [4]], [6, [5]]]
@test collect(QueryOperators.@groupjoin(outer, inner, x->x, x->x+1, (i,j)->[i,j])) == group_desired
many_map_desired = [[1, 2], [2, 4], [2, 4], [3, 6], [4, 8]]
success = collect(QueryOperators.@mapmany(enum, x->[x*2], (x,y)->[x,y])) == many_map_desired
@test success # for some reason, this is required to avoid a BoundsError
first = QueryOperators.query([1, 2])
second = [3, 4]
many_map_desired = [(1,3), (1,4), (2,3), (2,4)]
success = collect(QueryOperators.@mapmany(first, i->second, (x,y)->(x,y))) == many_map_desired
@test success
ntups = QueryOperators.query([(a=1, b=2, c=3), (a=4, b=5, c=6)])
@test sprint(show, ntups) == """
2x3 query result
a │ b │ c
──┼───┼──
1 │ 2 │ 3
4 │ 5 │ 6"""
@test sprint(show, enum) == """
5-element query result
1
2
2
3
4"""
@test sprint((stream,data)->show(stream, "text/html", data), ntups) ==
"<table><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tbody><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></tbody></table>"
gather_result1 = QueryOperators.gather(QueryOperators.query([(US=1, EU=1, CN=1), (US=2, EU=2, CN=2), (US=3, EU=3, CN=3)]))
@test sprint(show, gather_result1) == """9x2 query result\nkey │ value\n────┼──────\n:US │ 1 \n:EU │ 1 \n:CN │ 1 \n:US │ 2 \n:EU │ 2 \n:CN │ 2 \n:US │ 3 \n:EU │ 3 \n:CN │ 3 """
gather_result2 = QueryOperators.gather(QueryOperators.query([(Year=2017, US=1, EU=1, CN=1), (Year=2018, US=2, EU=2, CN=2), (Year=2019, US=3, EU=3, CN=3)]), :US, :EU, :CN)
@test sprint(show, gather_result2) == "9x3 query result\nYear │ key │ value\n─────┼─────┼──────\n2017 │ :US │ 1 \n2017 │ :EU │ 1 \n2017 │ :CN │ 1 \n2018 │ :US │ 2 \n2018 │ :EU │ 2 \n2018 │ :CN │ 2 \n2019 │ :US │ 3 \n2019 │ :EU │ 3 \n2019 │ :CN │ 3 "
@test sprint((stream,data)->show(stream, "application/vnd.dataresource+json", data), ntups) ==
"{\"schema\":{\"fields\":[{\"name\":\"a\",\"type\":\"integer\"},{\"name\":\"b\",\"type\":\"integer\"},{\"name\":\"c\",\"type\":\"integer\"}]},\"data\":[{\"a\":1,\"b\":2,\"c\":3},{\"a\":4,\"b\":5,\"c\":6}]}"
end
include("test_enumerable_unique.jl")
include("test_namedtupleutilities.jl")
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 428 | using Test
@testset "unique" begin
source_1 = [1,2,2,3,3,3,4]
enum = QueryOperators.query(source_1)
@test collect(QueryOperators.@unique(enum, i->i)) == [1,2,3,4]
source_1 = [1,2,3,4]
enum = QueryOperators.query(source_1)
@test collect(QueryOperators.@unique(enum, i->i)) == [1,2,3,4]
source_1 = [1,-1,2,3,4]
enum = QueryOperators.query(source_1)
@test collect(QueryOperators.@unique(enum, i->abs(i))) == [1,2,3,4]
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | code | 2861 | using Test
using QueryOperators
@testset "NamedTupleUtilities" begin
@test QueryOperators.NamedTupleUtilities.select((a = 1, b = 2, c = 3), Val(:a)) == (a = 1,)
@test QueryOperators.NamedTupleUtilities.select((a = 1, b = 2, c = 3), Val(:d)) == NamedTuple()
@inferred QueryOperators.NamedTupleUtilities.remove((a = 1, b = 2), Val(:b))
@test QueryOperators.NamedTupleUtilities.remove((a = 1, b = 2, c = 3),Val(:c)) == (a = 1, b = 2)
@test QueryOperators.NamedTupleUtilities.remove((a = 1, b = 2),Val(:c)) == (a = 1, b = 2)
@inferred QueryOperators.NamedTupleUtilities.remove((a = 1, b = 2),Val(:c))
@test QueryOperators.NamedTupleUtilities.range((a = 1, b = 2, c = 3),Val(:a),Val(:b)) == (a = 1, b = 2)
@test QueryOperators.NamedTupleUtilities.range((a = 1, b = 2, c = 3),Val(:b),Val(:d)) == (b = 2, c = 3)
@test QueryOperators.NamedTupleUtilities.range((a = 1, b = 2, c = 3),Val(:d),Val(:c)) == NamedTuple()
@inferred QueryOperators.NamedTupleUtilities.range((a = 1, b = 2, c = 3),Val(:a),Val(:b))
@test QueryOperators.NamedTupleUtilities.rename((a = 1, b = 2, c = 3),Val(:a),Val(:d)) == (d = 1, b = 2, c = 3)
@test QueryOperators.NamedTupleUtilities.rename((a = 1, b = 2, c = 3),Val(:m),Val(:d)) == (a = 1, b = 2, c = 3)
@test_throws ErrorException QueryOperators.NamedTupleUtilities.rename((a = 1, b = 2, c = 3),Val(:a),Val(:c))
@inferred QueryOperators.NamedTupleUtilities.rename((a = 1, b = 2, c = 3),Val(:a),Val(:d))
@test QueryOperators.NamedTupleUtilities.startswith((abc=1,bcd=2,cde=3),Val(:a)) == (abc = 1,)
@test QueryOperators.NamedTupleUtilities.not_startswith((abc=1,bcd=2,cde=3),Val(:a)) == (bcd = 2, cde = 3)
@test QueryOperators.NamedTupleUtilities.endswith((abc=1,bcd=2,cde=3),Val(:d)) == (bcd = 2,)
@test QueryOperators.NamedTupleUtilities.not_endswith((abc=1,bcd=2,cde=3),Val(:d)) == (abc = 1, cde = 3)
@test QueryOperators.NamedTupleUtilities.occursin((abc=1,bcd=2,cde=3),Val(:d)) == (bcd = 2, cde = 3)
@test QueryOperators.NamedTupleUtilities.not_occursin((abc=1,bcd=2,cde=3),Val(:d)) == (abc = 1,)
@inferred QueryOperators.NamedTupleUtilities.startswith((abc=1,bcd=2,cde=3),Val(:a))
@inferred QueryOperators.NamedTupleUtilities.not_startswith((abc=1,bcd=2,cde=3),Val(:a))
@inferred QueryOperators.NamedTupleUtilities.endswith((abc=1,bcd=2,cde=3),Val(:d))
@inferred QueryOperators.NamedTupleUtilities.not_endswith((abc=1,bcd=2,cde=3),Val(:d))
@inferred QueryOperators.NamedTupleUtilities.occursin((abc=1,bcd=2,cde=3),Val(:d))
@inferred QueryOperators.NamedTupleUtilities.not_occursin((abc=1,bcd=2,cde=3),Val(:d))
nt = (a=4,b=true,c="Named")
@test QueryOperators.NamedTupleUtilities.oftype(nt, Val(Int)) == (a=4,)
@test QueryOperators.NamedTupleUtilities.oftype(nt, Val(Any)) == nt
@test QueryOperators.NamedTupleUtilities.oftype(nt, Val(Float64)) == NamedTuple()
@inferred QueryOperators.NamedTupleUtilities.oftype(nt, Val(Int))
end
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | docs | 1471 | # QueryOperators.jl v0.9.1 Release Notes
* Fix a bug in the oftype function
# QueryOperators.jl v0.9.0 Release Notes
* Drop julia 0.7 support
* Move to Project.toml
* Various performance improvements
# QueryOperators.jl v0.8.0 Release Notes
* Add gather
# QueryOperators.jl v0.7.1 Release Notes
* Fix bug in mapmany
# QueryOperators.jl v0.7.0 Release Notes
* Add support for "application/vnd.dataresource+json" MIME showing
# QueryOperators.jl v0.6.0 Release Notes
* Add unique command
# QueryOperators.jl v0.5.2 Release Notes
* Various bug fixes
* Some new features for NamedTupleUtilities
# QueryOperators.jl v0.5.1 Release Notes
* Various bug fixes
# QueryOperators.jl v0.5.0 Release Notes
* Add NamedTupleUtilities
# QueryOperators.jl v0.4.0 Release Notes
* Add GroupColumnArrayView
# QueryOperators.jl v0.3.0 Release Notes
* Drop julia 0.6 support, add julia 0.7 support
# QueryOperators.jl v0.2.3 Release Notes
* Fix bug in mapmany
# QueryOperators.jl v0.2.2 Release Notes
* Fix show bug
# QueryOperators.jl v0.2.1 Release Notes
* Add missing eltype methods
# QueryOperators.jl v0.2.0 Release Notes
* Add pretty show method
* Fix some bugs
* Add @drop and @take operators
# QueryOperators.jl v0.1.1 Release Notes
* Enable precompile
# QueryOperators.jl v0.1.0 Release Notes
* Rename many query operators
# QueryOperators.jl v0.0.2 Release Notes
* Depend on IteratorInterfaceExtensions.jl
# QueryOperators.jl v0.0.1 Release Notes
* Initial release
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.9.3 | 911c64c204e7ecabfd1872eb93c49b4e7c701f02 | docs | 829 | # QueryOperators
[](http://www.repostatus.org/#active)
[](https://travis-ci.org/queryverse/QueryOperators.jl)
[](https://ci.appveyor.com/project/queryverse/queryoperators-jl/branch/master)
[](https://codecov.io/gh/queryverse/QueryOperators.jl)
## Overview
This package contains the underlying query operators that are exposed to users in [Query.jl](https://github.com/queryverse/Query.jl).
| QueryOperators | https://github.com/queryverse/QueryOperators.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 901 | using Revise, Documenter, GModelFit, Gnuplot
makedocs(sitename="GModelFit.jl",
authors = "Giorgio Calderone",
# format = Documenter.HTML(prettyurls = false), # uncomment for local use, comment for deployment
modules=[GModelFit],
pages = [
"Home" => "index.md",
"Basic concepts and data types" => "concepts.md",
"Main functionalities" => "mainfunctions.md",
"Built-in components" => "builtincomp.md",
"Custom components" => "customcomp.md",
"Parameter constraints" => "parameter.md",
"Multi-dataset fitting" => "multifit.md",
"Minimizers" => "minimizers.md",
"Miscellaneous" => "misc.md",
"API" => "api.md"
])
Gnuplot.quitall()
if !(@isdefined is_compiled)
is_compiled = true
error("Re-run with compiled code!")
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 305 | using Gnuplot, Dates
Gnuplot.quitall()
mkpath("assets")
Gnuplot.options.term = "unknown"
empty!(Gnuplot.options.init)
push!( Gnuplot.options.init, linetypes(:Set1_5, lw=2.5, ps=1.5))
function saveas(file)
Gnuplot.save(term="pngcairo size 550,350 fontscale 0.8", "assets/$(file).png")
nothing
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 14649 | module GModelFit
using Printf, PrettyTables
using Statistics
using DataStructures
using LsqFit
using MacroTools
using Dates
using ProgressMeter
using Random
using JSON, GZip
import Base.show
import Base.ndims
import Base.size
import Base.length
import Base.haskey
import Base.keys
import Base.getindex
import Base.setindex!
import Base.reshape
import Base.propertynames
import Base.getproperty
import Base.iterate
import Base.values
import Base.push!
import Base.empty!
export AbstractDomain, Domain, CartesianDomain, coords, axis, Measures, uncerts,
CompEval, Model, @fd, select_maincomp!, SumReducer, domain, comptype,
isfreezed, thaw!, freeze!, fit, fit!, compare
include("PV.jl")
using .PV
include("domain.jl")
# ====================================================================
"""
FunctDesc
A "Julia function" descriptor containing the reference to the function itself, a string representation of its source code definition (for displaying purposes) and the lists of its arguments. It can be invoked using the standard syntax for functions
### Example:
```
julia> f = GModelFit.FunctDesc( (x, p=0) -> x + p, # actual function definition
"(x, p=0) -> x + p", # string representation
[:x], # vector of argument namess
[:(p = 0)]) # vector of `Expr` with arguments default values
julia> f(1, 2)
3
```
Note that it is inconvenient to directly create a `FunctDescr` using its constructor, and the above results can be obtained by using the @fd macro:
```
f = @fd (x, p=0) -> x + p
```
"""
struct FunctDesc
funct::Function
display::String
args::Vector{Symbol} # positional arguments
optargs::Vector{Expr} # optional arguments with default values
end
(f::FunctDesc)(args...; kws...) = f.funct(args...; kws...)
"""
@fd expr
Macro to generate a `FunctDesc` object using the same syntax as for a standard Julia anonymous function.
### Example
```
julia> f = @fd (x, p=0) -> x + p
julia> f(1, 2)
3
```
"""
macro fd(_expr)
@assert isexpr(longdef(_expr), :function)
expr = prettify(_expr)
def = splitdef(expr)
args = convert(Vector{Symbol}, filter(x -> isa(x, Symbol), def[:args]))
optargs = convert(Vector{Expr} , filter(x -> isa(x, Expr) , def[:args]))
return esc(:(GModelFit.FunctDesc($expr, string($(QuoteNode(expr))), $args, $optargs)))
end
# ====================================================================
"""
Parameter
A structure representing a model parameter.
# Fields:
- `val::Float64`: parameter value (initial guess before fitting, or best fit one after fitting);
- `low::Float64`: lower limit for the value (default: `-Inf`);
- `high::Float64`: upper limit for the value (default: `+Inf`);
- `fixed::Bool`: whether the parameter is fixed during fitting (default: `false`);
- `patch::Union{Nothing, Symbol, FunctDesc}`: patch prescription within the same model;
- `mpatch::Union{Nothing, FunctDesc}`: patch prescription in a multi-model analysis;
- `actual::Float64`: actual value for the parameter (i.e. after applying the patch prescription)`;
- `unc::Float64`: 1σ uncertainty associated to the parameter value.
Note: the `Parameter` fields are supposed to be accessed directly by the user, without invoking any get/set method.
"""
mutable struct Parameter
val::Float64
low::Float64 # lower limit value
high::Float64 # upper limit value
fixed::Bool
patch::Union{Nothing, Symbol, FunctDesc}
mpatch::Union{Nothing, FunctDesc}
actual::Float64
unc::Float64
end
Parameter(value::Number) = Parameter(float(value), -Inf, +Inf, false, nothing, nothing, NaN, NaN)
# ====================================================================
# Components:
#
# A *component* is a generic implementation of a building block for a
# model. It must inherit `AbstractComponent` and implement the
# `evaluate!` method. The structure should contain zero or more field
# of type Parameter, or have all parameters collected in a single
# field of type OrderedDict{Symbol, Parameter}()
abstract type AbstractComponent end
abstract type AbstractCompWDeps end
# Note: this function must mirror setparams!()
function getparams(comp::AbstractComponent)
out = OrderedDict{Symbol, Parameter}()
for name in fieldnames(typeof(comp))
field = getfield(comp, name)
if isa(field, Parameter)
out[name] = field
elseif isa(field, OrderedDict{Symbol, Parameter})
@assert length(out) == 0 # avoid parameter name clash
return field
end
end
return out
end
# Note: this function must mirror getparams()
function setparams!(comp::AbstractComponent, params::PVComp{Parameter})
for name in fieldnames(typeof(comp))
field = getfield(comp, name)
if isa(field, Parameter)
field.val = params[name].val
field.unc = params[name].unc
field.actual = params[name].actual
elseif isa(field, OrderedDict{Symbol, Parameter})
for (name, par) in field
par.val = params[name].val
par.unc = params[name].unc
par.actual = params[name].actual
end
end
end
nothing
end
"""
prepare!(comp::AbstractComponent, domain::AbstractDomain)
Allocate the buffer for a component evaluation on a specific domain. Return value must be a `Vector{Float64}`.
This function is invoked only once when the `ModelEval` structure is created (typically within a `fit` of `fit!` call), hence it is the perfect place to pre-compute quantities associated to a component evaluation on a specific domain. Default implementation returns a vector filled with `NaN`s with the same length as the domain.
"""
prepare!(comp::AbstractComponent, domain::AbstractDomain) = fill(NaN, length(domain))
"""
dependencies(comp::AbstractComponent)
Return the name of dependecies for a component. Return value must be a `Vector{Symbol}`.
Default implementation returns `Symbol[]` (i.e. no dependencies.
"""
dependencies(comp::AbstractComponent) = Symbol[]
# ====================================================================
# Model
#
"""
Model
A structure containing a model description.
Constructor is: `Model(components...)`. Components may be specified as:
- a single `Dict{Symbol, AbstractComponent}`, where the keys are the names and the values the component objects;
- a single component (the default `:main` is automatically assigned);
- a single `FunctDesc` which will be wrapped into an `FComp` component and a default name will be assigned (`:main`);
- one or more `Pair{Symbol, AbstractComponent}`, where the first element is the name and the second is the component.
You may access the individual component in a `Model` using the indexing syntax, as if it was a `Dict{Symbol, AbstractComponent}`. Also, you may add new components to a `Model` after it has been created using the same syntax. Finally, you may use the `keys()` and `haskey()` functions with their usual meanings.
Individual components may be *freezed* (i.e. have all its parameters fixed during fitting, despite the individual `Parameter` settings) or *thawed* using the `freeze!()` and `thaw!()` functions. Use the `isfreezed()` function to check if a component is freezed.
The main component, i.e. the one whose evaluation corresponds to the overall model evaluation, is automatically identified by analyzing the component dependencies. However a specific component may be forced to be the main one by invoking `select_maincomp!`.
The most important function for a `Model` object is `fit()` which allows to fit the model against an empirical dataset. The `fit!()` function has the same purpose, with the only difference that it stores the best fit parameter values into the original `Model` object.
The model and all component evaluation can be evaluated has if they were a function by simply passing a `Domain` object.
"""
mutable struct Model
comps::OrderedDict{Symbol, AbstractComponent}
fixed::OrderedDict{Symbol, Bool}
maincomp::Union{Nothing, Symbol}
Model() = new(OrderedDict{Symbol, AbstractComponent}(), OrderedDict{Symbol, Bool}(), nothing)
function Model(dict::AbstractDict)
model = Model()
for (name, item) in dict
# isa(item, Number) && (item = SimplePar(item))
@assert isa(name, Symbol)
@assert isa(item, AbstractComponent)
model[name] = item
end
return model
end
function Model(args::Vararg{Pair})
model = Model()
for arg in args
@assert isa(arg[1], Symbol)
if isa(arg[2], AbstractComponent)
model[arg[1]] = arg[2]
elseif isa(arg[2], FunctDesc)
model[arg[1]] = FComp(arg[2])
# elseif isa(arg[2], Number)
# out[arg[1]] = SimplePar(arg[2])
else
error("Unsupported data type: " * string(typeof(arg[2])) *
". (accepted types ar T <: AbstractComponent or FunctDesc.")
end
end
return model
end
Model(arg::AbstractComponent) = Model(:main => arg)
Model(arg::FunctDesc) = Model(:main => FComp(arg))
end
function find_maincomp(model::Model)
# Identify parent for all comps
parent = OrderedDict{Symbol, Symbol}()
for (cname, comp) in model.comps
for d in GModelFit.dependencies(model, cname)
@assert !haskey(parent, d) "Component $d has two parent nodes: $(parent[d]) and $cname"
parent[d] = cname
end
end
# Ensure no circular dependency is present by checking all parent
# nodes of a given component to be different from the component
# itself. Also collect components with no parent.
comps_with_no_parent = Vector{Symbol}()
for cname in keys(model.comps)
if haskey(parent, cname)
p = parent[cname]
@assert p != cname "Component $cname depends on itself"
while haskey(parent, p)
p = parent[p]
if cname == p
display(parent)
error("Circular dependency detected for component $cname")
end
end
else
push!(comps_with_no_parent, cname)
end
end
# If multiple possibilities are stll available neglect components
# with no dependencies
while length(comps_with_no_parent) > 1
if length(dependencies(model, comps_with_no_parent[1])) == 0
deleteat!(comps_with_no_parent, 1)
end
end
# The above check is always performed even if an explicit maincomp
# has been set
if isnothing(model.maincomp)
return comps_with_no_parent[end]
else
return model.maincomp
end
end
function dependencies(model::Model, cname::Symbol; select_domain=false)
domdeps = Vector{Symbol}()
compdeps = Vector{Symbol}()
# nd = ndims(domain(model))
for d in dependencies(model.comps[cname])
if haskey(model.comps, d) # dependency with known name
push!(compdeps, d)
else # dependency with unknown name is intended as a domain dimension
@assert length(compdeps) == 0 "Domain dependencies must be listed first"
# @assert length(domdeps) < nd "Component $cname depends on $d, but the latter is not a component in the model."
push!(domdeps, d)
end
end
# @assert (length(domdeps) == 0) || (length(domdeps) == nd) "Domain has $nd dimensions but only $(length(domdeps)) are listed as dependencies"
return (select_domain ? domdeps : compdeps)
end
# User interface
setindex!(model::Model, f::FunctDesc, cname::Symbol) = model[cname] = FComp(f)
function setindex!(model::Model, comp::AbstractComponent, cname::Symbol)
model.comps[cname] = deepcopy(comp)
model.fixed[cname] = false
end
function iterate(model::Model, i=1)
k = collect(keys(model))
(i > length(k)) && return nothing
return (k[i] => model[k[i]], i+1)
end
"""
isfreezed(model::Model, cname::Symbol)
Check whether a component is *freezed* in the model.
"""
function isfreezed(model::Model, cname::Symbol)
@assert cname in keys(model.fixed) "Component $cname is not defined"
return model.fixed[cname]
end
"""
freeze!(model::Model, cname::Symbol)
Freeze a component in the model (i.e. treat all component parameters as fixed for fitting).
"""
function freeze!(model::Model, cname::Symbol)
@assert cname in keys(model.fixed) "Component $cname is not defined"
model.fixed[cname] = true
nothing
end
"""
thaw!(model::Model, cname::Symbol)
Thaw a freezed component in the model (i.e. treat component parameters as fixed only if explicitly set in the corresponding `Parameter` structure).
"""
function thaw!(model::Model, cname::Symbol)
@assert cname in keys(model.fixed) "Component $cname is not defined"
model.fixed[cname] = false
nothing
end
Base.keys(model::Model) = collect(keys(model.comps))
"""
haskey(m::Model, name::Symbol)
Check whether a component exists in model.
"""
Base.haskey(model::Model, cname::Symbol) = haskey(model.comps, cname)
"""
getindex(model::Model, cname::Symbol)
Return the model component with name `cname`.
"""
function Base.getindex(model::Model, cname::Symbol)
@assert cname in keys(model.comps) "Component $cname is not defined"
return model.comps[cname]
end
"""
length(model::Model)
Return number of components in a model.
"""
Base.length(model::Model) = length(model.comps)
"""
comptype(model::Model, cname::Symbol)
Return a component type as a string.
"""
comptype(model::Model, cname::Symbol) = replace(string(typeof(model[cname])), "GModelFit." => "")
"""
comptypes(model::Model)
Return a `OrderedDict{Symbol, String}` with the model component types.
"""
comptypes(model::Model) = OrderedDict([cname => comptype(model, cname) for cname in keys(model)])
"""
select_maincomp!(model::Model, cname::Symbol)
Force a component to be the final one for model evaluation.
"""
function select_maincomp!(model::Model, cname::Symbol)
@assert haskey(model, cname) "Component $cname is not defined"
model.maincomp = cname
end
include("evaluation.jl")
include("snapshot.jl")
include("minimizers.jl")
include("fit.jl")
include("multimodel.jl")
include("serialize.jl")
include("show.jl")
include("utils.jl")
include("gnuplot_recipe.jl")
include("precompile.jl")
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 2505 |
module PV
using DataStructures
import Base.keys, Base.getindex, Base.setindex!,
Base.propertynames, Base.getproperty, Base.setproperty!,
Base.push!, Base.empty!, Base.iterate
export PVComp, PVModel, items
struct PVComp{T}
pnames::Vector{Symbol}
indices::Vector{Int}
data::Vector{T}
end
struct PVModel{T}
comps::OrderedDict{Symbol, PVComp{T}}
indices::Vector{Int}
data::Vector{T}
end
PVComp(parent::PVModel{T}) where T = PVComp{T}(Vector{Symbol}(), Vector{Int}(), parent.data)
propertynames(comp::PVComp) = getfield(comp, :pnames)
function index(comp::PVComp, pname::Symbol)
i = findfirst(getfield(comp, :pnames) .== pname)
@assert !isnothing(i) "Unknwon parameter name: $pname"
return getfield(comp, :indices)[i]
end
getindex(comp::PVComp, pname::Symbol) = getfield(comp, :data)[index(comp, pname)]
setindex!(comp::PVComp, value, pname::Symbol) = setindex!(getfield(comp, :data),
value, index(comp, pname))
getproperty( comp::PVComp, pname::Symbol) = getindex(comp, pname)
setproperty!(comp::PVComp, pname::Symbol, value) = setindex!(comp, value, pname)
items(comp::PVComp) = view(getfield(comp, :data), getfield(comp, :indices))
function iterate(comp::PVComp, i=1)
(i > length(getfield(comp, :pnames))) && return nothing
return (getfield(comp, :pnames)[i] => getfield(comp, :data)[getfield(comp, :indices)[i]], i+=1)
end
PVModel{T}() where T = PVModel{T}(OrderedDict{Symbol, PVComp{T}}(), Vector{Int}(), Vector{T}())
function empty!(pv::PVModel)
empty!(pv.comps)
empty!(pv.indices)
empty!(pv.data)
end
keys(pv::PVModel) = collect(keys(pv.comps))
function push!(pv::PVModel{T}, cname::Symbol, pname::Symbol, value::T) where T
comp = pv[cname]
if pname in getfield(comp, :pnames)
comp[pname] = value
else
i = length(getfield(comp, :data)) + 1
push!(getfield(comp, :pnames), pname)
push!(getfield(comp, :indices), i)
push!(getfield(comp, :data), value)
end
# Collect indices from components
empty!(pv.indices)
for (cname, comp) in pv.comps
append!(pv.indices, getfield(comp, :indices))
end
return value
end
function getindex(pv::PVModel{T}, cname::Symbol) where T
(cname in keys(pv.comps)) || (pv.comps[cname] = PVComp(pv))
return pv.comps[cname]
end
items(pv::PVModel) = view(pv.data, pv.indices)
iterate(pv::PVModel, state...) = iterate(pv.comps, state...)
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 7993 | abstract type AbstractDomain{N} end
# A non-cartesian domain has the same number of points in all
# dimensions.
"""
Domain{N}
An object representing a model, or a dataset, N-dimensional linear domain.
Available constructors:
- `Domain(coords...)`: each argument is a vector, one for each dimension (all arguments must have same lengths);
- `Domain(length)`: returns a 1-dim `Domain` object of the given length.
Coordinates for all points along a given axis can be obtained with the `coords` function.
"""
struct Domain{N} <: AbstractDomain{N}
axis::NTuple{N, Vector{Float64}}
function Domain(coords::Vararg{AbstractVector{T},N}) where {T <: Real, N}
@assert N >= 1
@assert all(length(coords[1]) .== [length.(coords)...])
return new{N}(convert(NTuple{N, Vector{Float64}}, coords))
end
function Domain(length::Integer)
@assert length >= 1
return Domain(collect(1.:length))
end
end
# A cartesian domain has the coordinates specified independently for
# each axis, and can always be trasformed into a non-cartesian one.
# Cartesian domains also support region-of-interest (ROI).
"""
CartesianDomain{N}
An object representing a model, or a dataset, N-dimensional cartesian domain (i.e. a grid).
Available constructors:
- `Domain(axis...)`: each argument is a vector containing the coordinates on a given axis (arguments may have different lengths);
- `Domain(lengths...)`: returns a N-dim `CartesianDomain` object whose axis lengths are specified in the arguments.
Note that a `CartesianDomain` requires at least 2 dimensions.
Coordinates for all points along a given axis can be obtained with the `coords()` function, while the coordinates of the grid can be obtained with `axis()`.
"""
struct CartesianDomain{N} <: AbstractDomain{N}
axis::NTuple{N, Vector{Float64}}
roi::Vector{Int}
ldomain::Domain{N}
function CartesianDomain(axis::Vararg{AbstractVector{T},N}; roi=nothing) where {T <: Real, N}
@assert N >= 2 "A cartesian domain requires at least 2 dimensions"
isnothing(roi) && (roi = collect(1:prod(length.(axis))))
# Pre-compute corresponding linear domain
ss = tuple(length.(axis)...)
ci = Tuple.(CartesianIndices(ss))[roi]
vv = Vector{Vector{Float64}}()
for i = 1:N
push!(vv, axis[i][getindex.(ci, i)])
end
ldomain = Domain(vv...)
return new{N}(deepcopy(axis), roi, ldomain)
end
function CartesianDomain(lengths::Vararg{T,N}; kw...) where {T <: Integer, N}
@assert N >= 2 "A cartesian domain requires at least 2 dimensions"
@assert all(lengths .>= 1)
axis = [collect(1.:lengths[i]) for i in 1:N]
return CartesianDomain(axis...; kw...)
end
end
ndims(d::Union{Domain{N}, CartesianDomain{N}}) where N = N
length(d::Domain) = length(d.axis[1])
length(d::CartesianDomain) = length(coords(d, 1))
"""
coords(d::Domain{1})
coords(d::Domain, dim::Integer)
coords(d::CartesianDomain, dim::Integer)
Returns coordinates of all points along a given dimension as a `Vector{Float64}`.
"""
coords(d::Domain{1}) = d.axis[1]
coords(d::Domain, dim::Integer) = d.axis[dim]
coords(d::CartesianDomain, dim::Integer) = coords(flatten(d), dim)
# Iterate through domain dimensions returning coordinates
function iterate(d::Union{Domain, CartesianDomain}, ii=1)
(ii > ndims(d)) && (return nothing)
return (coords(d, ii), ii+1)
end
# Cartesian-only methods
flatten(d::CartesianDomain) = d.ldomain
size(d::CartesianDomain) = tuple([length(v) for v in d.axis]...)
"""
axis(d::CartesianDomain, dim::Integer)
Returns the coordinates of the grid along a given dimension as a `Vector{Float64}`.
"""
axis(d::CartesianDomain, dim::Integer) = d.axis[dim]
# ====================================================================
# Measures and Counts types
#
abstract type AbstractMeasures{N} end
"""
domain(d::AbstractMeasures)
Return the domain associated to an AbstractMeasures object.
"""
domain(d::AbstractMeasures) = d.domain
ndims(d::AbstractMeasures) = ndims(domain(d))
length(d::AbstractMeasures) = length(domain(d))
size(d::AbstractMeasures) = size(domain(d))
reshape(d::Domain, v::Vector{Float64}) = v
function reshape(d::CartesianDomain, v::Vector{Float64})
out = fill(NaN, size(d))
out[d.roi] .= v
return out
end
"""
Measures{N}
An object representing a set of empirical measurements (with Gaussian uncertainties) as measured on a specific domain.
Available constructors:
- `Measures(domain::Domain{N},
values::AbstractVector{T},
uncerts::AbstractVector{T}) where {T <: AbstractFloat, N}`
- `Measures(domain::CartesianDomain{N},
values::AbstractArray{T, N},
uncerts::AbstractArray{T, N}) where {T <: AbstractFloat, N}`
- `Measures(values::AbstractVector, uncerts)`
In the above constructor methods the last argument may also be a scalar value, to set the same uncertainty for all the measurements. The method accepting a `CartesianDomain` requires arrays with at least 2 dimensions. In the last constructor the `Domain` object is automatically built depending on the length of the `values` vector.
The domain, values and uncertainties for a `Measures` object can be retrieved using the `domain`, `values` and `uncerts` functions respectively.
"""
struct Measures{N} <: AbstractMeasures{N}
domain::AbstractDomain{N}
values::NTuple{2, Vector{Float64}}
labels::NTuple{2, String}
# If no domain is provided we generate one
Measures(values::AbstractVector, uncerts) = Measures(Domain(length(values)), values, uncerts)
# Measures with linear domain are built using 1D vector(s).
function Measures(domain::Domain{N}, values::AbstractVector{T}, uncerts::AbstractVector{T}) where {T <: AbstractFloat, N}
@assert length(domain) == length(values) == length(uncerts) "Domain and dataset have incompatible length"
return new{N}(deepcopy(domain), tuple(deepcopy(values), deepcopy(uncerts)), ("values", "uncerts"))
end
# Measures with cartesian domain are built using N-dim arrays(s).
function Measures(domain::CartesianDomain{N}, values::AbstractArray{T, N}, uncerts::AbstractArray{T, N}) where {T <: AbstractFloat, N}
@assert size(domain) == size(values) == size(uncerts) "Domain and dataset have incompatible size"
return new{N}(deepcopy(domain), tuple(deepcopy(values[domain.roi]), deepcopy(uncerts[domain.roi])), ("values", "uncerts"))
end
Measures(domain::AbstractDomain{N}, values::AbstractArray{T, N}, uncert::T) where {T <: AbstractFloat, N} =
Measures(domain, values, fill(uncert, size(values)))
end
"""
values(d::Measures)
Returns the measurement values as a `Vector{Float64}`.
"""
values(d::Measures) = reshape(d.domain, d.values[1])
"""
uncerts(d::Measures)
Returns the measurement uncertainties as a `Vector{Float64}`.
"""
uncerts(d::Measures) = reshape(d.domain, d.values[2])
Measures(dom::AbstractDomain, values::AbstractArray, uncert::Real) = Measures(dom, values, fill(uncert, size(values)))
struct PoissonCounts{N} <: AbstractMeasures{N}
domain::AbstractDomain{N}
values::NTuple{1, Vector{Int}}
labels::NTuple{1, String}
# Measures with linear domain are built using 1D vector(s).
function PoissonCounts(domain::Domain{N}, values::AbstractVector{T}) where {T <: Integer, N}
@assert length(domain) == length(values) "Domain and dataset have incompatible length"
return new{N}(deepcopy(domain), tuple(deepcopy(values)), ("counts", ))
end
# Measures with cartesian domain are built using N-dim arrays(s).
function PoissonCounts(domain::CartesianDomain{N}, values::AbstractArray{T, N}) where {T <: Integer, N}
@assert size(domain) == size(values) "Domain and dataset have incompatible size"
return new{N}(deepcopy(domain), tuple(deepcopy(values[domain.roi])), ("counts", ))
end
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 12667 |
"""
CompEval(comp::AbstractComponent, domain::AbstractDomain)
A container for a component to be evaluated on a specific domain.
# Fields:
- `comp::AbstractComponent`: the wrapped component;
- `domain::AbstractDomain`: the domain where the component is supposed to be evaluated;
- `counter::Int`: the number of times the component has been evaluated since creatio of the `CompEval` object;
- `lastparvalues::Vector{Float64}`: the parameter values used in the last evaluation. A call to `update!()` with the same values stored in `lastparvalues` will not result in a new evaluation;
- `deps::Vector{Vector{Float64}}`: the evaluation buffers of all dependencies;
- `buffer::Vector{Float64}`: the buffer to store the outcome of the component.
"""
mutable struct CompEval{TComp <: AbstractComponent, TDomain <: AbstractDomain}
comp::TComp
domain::TDomain
counter::Int
lastparvalues::Vector{Float64}
deps::Vector{Vector{Float64}}
buffer::Vector{Float64}
function CompEval(comp::AbstractComponent, domain::AbstractDomain)
buffer = prepare!(comp, domain)
return new{typeof(comp), typeof(domain)}(
comp, domain, 0,
fill(NaN, length(getparams(comp))),
Vector{Vector{Float64}}(),
buffer)
end
end
"""
evaluate!(ceval::CompEval, pvalues::Vector{Float64})
Evaluate a component using the provided parameter values. Outcomes shall be stored in the `CompEval.buffer` vector.
"""
evaluate!(ceval::CompEval{T, D}, par_values...) where {T <: AbstractComponent, D <: AbstractDomain} =
error("No evaluate! method implemented for CompEval{$(T), $(D)}")
"""
update!(ceval::CompEval, pvalues::Vector{Float64})
Update a `CompEval` structure using the provided parameter values.
The component is actually evaluated if one of the following applies:
- the component has never been evaluated;
- the component has at least one dependency (whose evaluation may have changed since its last evaluation);
- at least one parameter value has changed since last evaluation.
If none of the above applies, no evaluation occurs.
"""
function update!(ceval::CompEval{<: AbstractComponent, <: AbstractDomain},
pvalues::AbstractVector{Float64})
if any(ceval.lastparvalues .!= pvalues) || (ceval.counter == 0) || (length(ceval.deps) > 0)
evaluate!(ceval, pvalues...)
ceval.lastparvalues .= pvalues
ceval.counter += 1
end
end
# Evaluate component on the given domain. Parameter values are the
# ones stored in the component unless a custom value is provided via a
# keyword.
function (comp::AbstractComponent)(domain::AbstractDomain; kws...)
@assert length(dependencies(comp)) == 0 "Can't evaluate a stand-alone component with dependencies."
ceval = CompEval(comp, domain)
pvalues = OrderedDict([(pname, par.val) for (pname, par) in getparams(comp)])
for (pname, pval) in kws
if pname in keys(pvalues)
pvalues[pname] = pval
else
@warn "$pname is not a parameter name for $(typeof(comp)). Valid names are: " * join(string.(keys(pvalues)), ", ")
end
end
update!(ceval, collect(values(pvalues)))
return ceval.buffer
end
# ====================================================================
# Built-in components
include("components/FComp.jl")
include("components/OffsetSlope.jl")
include("components/Polynomial.jl")
include("components/Gaussian.jl")
include("components/Lorentzian.jl")
include("components/SumReducer.jl")
# ====================================================================
struct ParameterVectors
params::PVModel{Parameter}
values::PVModel{Float64}
actual::PVModel{Float64}
ifree::Vector{Int}
ParameterVectors() = new(PVModel{Parameter}(), PVModel{Float64}(),
PVModel{Float64}(), Vector{Int}())
end
function empty!(pv::ParameterVectors)
empty!(pv.params)
empty!(pv.values)
empty!(pv.actual)
empty!(pv.ifree)
end
function push!(pv::ParameterVectors, cname::Symbol, pname::Symbol, par::Parameter)
push!(pv.params, cname, pname, par)
push!(pv.values, cname, pname, par.val)
push!(pv.actual, cname, pname, par.actual)
if !par.fixed
push!(pv.ifree, length(items(pv.params)))
end
end
# ====================================================================
"""
ModelEval(model::Model, domain::AbstractDomain)
A structure containing the required informations to evaluate a model on a specific domain, and to compare the outcomes to a single empirical dataset.
The model and all component evaluation can be obtained by using the `Model` object has if it was a function: with no arguments it will return the main component evaluation, while if a `Symbol` is given as argument it will return the evaluation of the component with the same name.
"""
struct ModelEval
model::Model
domain::AbstractDomain
cevals::OrderedDict{Symbol, CompEval}
pv::ParameterVectors
pvmulti::Vector{PVModel{Float64}}
maincomp::Vector{Symbol}
function ModelEval(model::Model, domain::AbstractDomain)
out = new(model, domain, OrderedDict{Symbol, CompEval}(),
ParameterVectors(), Vector{PVModel{Float64}}(),
Vector{Symbol}())
# update!(out) This would cause error in the multimodel case
return out
end
end
free_params(meval::ModelEval) = collect(items(meval.pv.params)[meval.pv.ifree])
nfree(meval::ModelEval) = length(meval.pv.ifree)
"""
update!(meval::ModelEval)
Update a `ModelEval` structure by evaluating all components in the model.
"""
function update!(meval::ModelEval)
(length(meval.model) == 0) && (return meval) # to handle empty Model object
update_init!(meval)
update_evaluation!(meval)
update_finalize!(meval)
return meval
end
# Evaluation step init:
# - update internal structures before fitting
function update_init!(meval::ModelEval)
empty!(meval.maincomp)
push!(meval.maincomp, find_maincomp(meval.model))
for (cname, comp) in meval.model.comps
(cname in keys(meval.cevals)) && continue
meval.cevals[cname] = CompEval(comp, meval.domain)
end
empty!(meval.pv)
for (cname, ceval) in meval.cevals
for (pname, _par) in getparams(ceval.comp)
# Parameter may be changed here, hence we take a copy of the original one
par = deepcopy(_par)
if !(par.low <= par.val <= par.high)
s = "Value outside limits for param [$(cname)].$(pname):\n" * string(par)
error(s)
end
if isnan(par.low) || isnan(par.high) || isnan(par.val)
s = "NaN value detected for param [$(cname)].$(pname):\n" * string(par)
error(s)
end
if !isnothing(par.patch)
@assert isnothing(par.mpatch) "Parameter [$cname].$pname has both patch and mpatch fields set, while only one is allowed"
if isa(par.patch, Symbol) # use same param. value from a different component
par.fixed = true
else # invoke a patch function
@assert length(par.patch.args) in [1,2]
if length(par.patch.args) == 1
par.fixed = true
else
par.fixed = false
end
end
elseif !isnothing(par.mpatch)
@assert length(meval.pvmulti) > 0 "Parameter [:$(cname)].$pname has the mpatch field set but no other Model is being considered"
@assert length(par.mpatch.args) in [1,2]
if length(par.mpatch.args) == 1
par.fixed = true
else
par.fixed = false
end
end
if meval.model.fixed[cname]
par.fixed = true
end
push!(meval.pv, cname, pname, par)
end
empty!(ceval.deps)
i = 1
for d in dependencies(meval.model, cname, select_domain=true)
push!(ceval.deps, coords(meval.domain, i))
i += 1
end
for d in dependencies(meval.model, cname, select_domain=false)
push!(ceval.deps, meval.cevals[d].buffer)
end
end
end
# Set new model parameters
function update_setparvals(meval::ModelEval, pvalues::Vector{Float64})
items(meval.pv.values)[meval.pv.ifree] .= pvalues
end
# Evaluation step fit:
# - copy all parameter values into actual;
# - update actual by invoking the patch functions;
# - evaluation of all components
function update_evaluation!(meval::ModelEval)
# Copy pvalues into actual
items(meval.pv.actual) .= items(meval.pv.values)
# Patch parameter values
for (cname, comp) in meval.pv.params
for (pname, par) in comp
if !isnothing(par.patch)
@assert isnothing(par.mpatch) "Parameter [:$(cname)].$pname has both patch and mpatch fields set, while only one is allowed"
if isa(par.patch, Symbol) # use same param. value from a different component
meval.pv.actual[cname][pname] = meval.pv.values[par.patch][pname]
else # invoke a patch function
if length(par.patch.args) == 1
meval.pv.actual[cname][pname] = par.patch(meval.pv.values)
else
meval.pv.actual[cname][pname] = par.patch(meval.pv.values, meval.pv.values[cname][pname])
end
end
elseif !isnothing(par.mpatch)
@assert length(meval.pvmulti) > 0 "Parameter [:$(cname)].$pname has the mpatch field set but no other Model is being considered"
if length(par.mpatch.args) == 1
meval.pv.actual[cname][pname] = par.mpatch(meval.pvmulti)
else
meval.pv.actual[cname][pname] = par.mpatch(meval.pvmulti, meval.pv.values[cname][pname])
end
end
end
end
# Evaluation of all components, starting from the main one and
# following dependencies
function update_compeval_recursive(meval::ModelEval, cname::Symbol)
for d in dependencies(meval.model, cname)
update_compeval_recursive(meval, d)
end
update!(meval.cevals[cname], items(meval.pv.actual[cname]))
end
update_compeval_recursive(meval, meval.maincomp[1])
end
# Evaluation step finalize:
# - copy back bestfit, actual values and uncertainties into their original Parameter structures.
function update_finalize!(meval::ModelEval, uncerts=Vector{Float64}[])
i = 1
for (cname, comp) in meval.pv.params
for (pname, par) in comp
par.val = meval.pv.values[cname][pname]
par.actual = meval.pv.actual[ cname][pname]
if (length(uncerts) > 0) && (!par.fixed)
par.unc = uncerts[i]
i += 1
else
par.unc = NaN
end
end
end
# Also update Model's parameters
for (cname, ceval) in meval.cevals
setparams!(meval.model[cname], meval.pv.params[cname])
end
end
"""
evalcounter(meval::ModelEval, cname::Symbol)
Return the number of times a component has been evaluated.
"""
evalcounter(meval::ModelEval, cname::Symbol) = meval.cevals[cname].counter
evalcounter(model::Model, cname::Symbol) = "???"
"""
evalcounters(meval::ModelEval)
Return a `OrderedDict{Symbol, Int}` with the number of times each model component has been evaluated.
"""
evalcounters(meval::ModelEval) = OrderedDict([cname => evalcounter(meval, cname) for cname in keys(meval.cevals)])
"""
last_evaluation(meval::ModelEval)
last_evaluation(meval::ModelEval, name::Symbol)
Return last evaluation of a component whose name is `cname` in a `ModelEval` object. If `cname` is not provided the evaluation of the main component is returned.
"""
last_evaluation(meval::ModelEval) = last_evaluation(meval, meval.maincomp[1])
last_evaluation(meval::ModelEval, name::Symbol) = reshape(meval.domain, meval.cevals[name].buffer)
# ====================================================================
# Evaluate Model on the given domain
function (model::Model)(domain::AbstractDomain, cname::Union{Nothing, Symbol}=nothing)
meval = ModelEval(model, domain)
update!(meval)
if isnothing(cname)
return last_evaluation(meval)
else
return last_evaluation(meval, cname)
end
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 4545 | # ====================================================================
"""
FitStats
A structure representing the results of a fitting process.
# Fields:
- `elapsed::Float64`: elapsed time (in seconds);
- `ndata::Int`: number of data empirical points;
- `nfree::Int`: number of free parameters;
- `dof::Int`: ndata - nfree;
- `fitstat::Float64`: fit statistics (equivalent ro reduced χ^2 for `Measures` objects);
- `status`: minimizer exit status (tells whether convergence criterion has been satisfied, or if an error has occurred during fitting);
Note: the `FitStats` fields are supposed to be accessed directly by the user.
"""
struct FitStats
elapsed::Float64
ndata::Int
nfree::Int
dof::Int
fitstat::Float64
# gofstat::Float64
# log10testprob::Float64
status::AbstractMinimizerStatus
end
function FitStats(resid::AbstractResiduals, status::AbstractMinimizerStatus, elapsed=NaN)
# tp = logccdf(Chisq(resid.dof), gof_stat) * log10(exp(1))
ndata = length(residuals(resid))
nf = nfree(resid)
FitStats(elapsed,
ndata, nf, ndata - nf, fit_stat(resid),
status)
end
# ====================================================================
"""
Residuals{T <: AbstractMeasures, M <: AbstractMinimizer}
A structure representing the distance between a `ModelEval` and a dataset. The "distance" is expressed in terms of weighted residuals.
A minimizer can be invoked via the `minimize!` function to reduce such distance by varying the model parameter values.
# Fields:
- `meval::ModelEval`: Model evaluation on a given domain;
- `data::AbstractMeasures`: Empirical dataset to be compared to the model;
- `buffer::Vector{Float64}`: Weighted residuals for each point in the domain;
- `mzer::AbstractMinimizer`: Minimizer used to reduce the residuals.
"""
struct Residuals{T <: AbstractMeasures, M <: AbstractMinimizer} <: AbstractResiduals{T, M}
meval::ModelEval
data::T
buffer::Vector{Float64}
mzer::M
function Residuals(meval::ModelEval, data::T, mzer::M=dry()) where {T <: AbstractMeasures, M <: AbstractMinimizer}
update!(meval)
buffer = fill(NaN, length(data))
return new{T,M}(meval, data, buffer, mzer)
end
end
free_params(resid::Residuals) = free_params(resid.meval)
nfree(resid::Residuals) = nfree(resid.meval)
dof(resid::Residuals) = length(resid.data) - nfree(resid)
residuals(resid::Residuals) = resid.buffer
function residuals(resid::Residuals, pvalues::Vector{Float64})
update_setparvals(resid.meval, pvalues)
update_evaluation!(resid.meval)
resid.buffer .= reshape((last_evaluation(resid.meval) .- values(resid.data)) ./ uncerts(resid.data), :)
return resid.buffer
end
fit_stat(resid::Residuals{Measures{N}}) where N =
sum(abs2, resid.buffer) / dof(resid)
function finalize!(resid::Residuals, best::Vector{Float64}, uncerts::Vector{Float64})
@assert nfree(resid) == length(best) == length(uncerts)
residuals(resid, best)
update_finalize!(resid.meval, uncerts)
end
# ====================================================================
"""
minimize!(resid::Residuals)
Invoke a minimizer to reduce the residuals between a model and a dataset.
"""
function minimize!(resid::Residuals)
starttime = time()
update!(resid.meval)
@assert nfree(resid) > 0 "No free parameter in the model"
status = _minimize!(resid)
bestfit = ModelSnapshot(resid.meval)
stats = FitStats(resid, status, time() - starttime)
# test_serialization(bestfit, stats, resid.data)
return (bestfit, stats)
end
"""
fit!(model::Model, data::Measures; minimizer::AbstractMinimizer=lsqfit())
Fit a model to an empirical data set using the specified minimizer (default: `lsqfit()`). Upon return the parameter values in the `Model` object are set to the best fit ones.
"""
function fit!(model::Model, data::Measures; minimizer::AbstractMinimizer=lsqfit())
meval = ModelEval(model, data.domain)
update!(meval)
resid = Residuals(meval, data, minimizer)
return minimize!(resid)
end
"""
fit(model::Model, data::Measures; minimizer::AbstractMinimizer=lsqfit())
Fit a model to an empirical data set using the specified minimizer (default: `lsqfit()`). See also `fit!`.
"""
fit(model::Model, data::Measures; kws...) = fit!(deepcopy(model), data; kws...)
"""
compare(model::Model, data::Measures)
Compare a model to a dataset and return a `FitStats` object.
"""
compare(model::Model, data::Measures) = fit!(model, data, minimizer=dry())
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 1104 | import Gnuplot
import Gnuplot.recipe
Gnuplot.recipe(data::Measures{1}) =
Gnuplot.parseSpecs("set bars 0",
coords(domain(data)), values(data), uncerts(data),
"with yerr t 'Data' lc rgb 'gray'")
Gnuplot.recipe(meval::ModelEval) = Gnuplot.recipe(GModelFit.ModelSnapshot(model))
function Gnuplot.recipe(model::GModelFit.ModelSnapshot;
keep=Symbol[], skip=Symbol[])
@assert ndims(domain(model)) == 1
out = Vector{Gnuplot.AbstractGPSpec}()
for (k,v) in model.buffers
(k == model.maincomp) && continue
(k in skip) && continue
if (length(keep) == 0) || (k in keep)
#isa(v.comp, GModelFit.FComp) || isa(v.comp, GModelFit.SumReducer) || continue
append!(out, Gnuplot.parseSpecs(coords(domain(model)), model(k),
"with lines t '$(k)'"))
end
end
append!(out, Gnuplot.parseSpecs(coords(domain(model)), model(),
"with lines t 'Model' lc rgb 'black' lw 2"))
return out
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 5611 | # ====================================================================
# Minimizers
#
# --------------------------------------------------------------------
abstract type AbstractMinimizerStatus end
struct MinimizerStatusOK <: AbstractMinimizerStatus
end
struct MinimizerStatusDry <: AbstractMinimizerStatus
end
struct MinimizerStatusWarn <: AbstractMinimizerStatus
message::String
end
struct MinimizerStatusError <: AbstractMinimizerStatus
message::String
end
# --------------------------------------------------------------------
abstract type AbstractMinimizer end
abstract type AbstractResiduals{T <: AbstractMeasures, M <: AbstractMinimizer} end
# --------------------------------------------------------------------
struct dry <: AbstractMinimizer; end
function _minimize!(resid::AbstractResiduals{Measures{N}, dry}) where N
params = free_params(resid)
residuals(resid, getfield.(params, :val))
finalize!(resid,
getfield.(params, :val),
fill(NaN, length(params)))
return MinimizerStatusDry()
end
# --------------------------------------------------------------------
import LsqFit
mutable struct lsqfit <: AbstractMinimizer
result
lsqfit() = new(nothing)
end
function _minimize!(resid::AbstractResiduals{Measures{N}, lsqfit}) where N
params = free_params(resid)
ndata = length(residuals(resid))
dof = ndata - length(params)
prog = ProgressUnknown(desc="Model (dof=$(dof)) evaluations:", dt=0.5, showspeed=true, color=:light_black)
resid.mzer.result = LsqFit.curve_fit((dummy, pvalues) -> begin
ProgressMeter.next!(prog; showvalues=() -> [(:fit_stat, fit_stat(resid))])
residuals(resid, pvalues)
end,
1.:ndata, fill(0., ndata),
getfield.(params, :val),
lower=getfield.(params, :low),
upper=getfield.(params, :high))
ProgressMeter.finish!(prog)
if !resid.mzer.result.converged
return MnimizerStatusError("Not converged")
end
finalize!(resid, getfield.(Ref(resid.mzer.result), :param), LsqFit.stderror(resid.mzer.result))
return MinimizerStatusOK()
end
# --------------------------------------------------------------------
import CMPFit
#=
NOTE: using custom thresholds for ftol, gtol and xtol may lead to
unexpected behaviour. E.g. settings ftol = 1.e-6 may lead to a
non-optimal fit with exit status 2 (possibly because in a single
iteration the improvement is particularly small).
The best approach is probably to use default tolerance values and
either increase the maximum allowed number of iterations
(config.maxiter) or set a threshold for relative fit statistic
improvements (ftol_after_maxiter) to be checked after the minimizer
iterated for the maximum allowed number of times.
=#
mutable struct cmpfit <: AbstractMinimizer
config::CMPFit.Config
ftol_after_maxiter::Float64
result
function cmpfit()
out = new(CMPFit.Config(), 1e-4, nothing)
out.config.maxiter = 1000
return out
end
end
function _minimize!(resid::AbstractResiduals{Measures{N}, cmpfit}) where N
params = free_params(resid)
guess = getfield.(params, :val)
low = getfield.(params, :low)
high = getfield.(params, :high)
parinfo = CMPFit.Parinfo(length(guess))
for i in 1:length(guess)
llow = isfinite(low[i]) ? 1 : 0
lhigh = isfinite(high[i]) ? 1 : 0
parinfo[i].limited = (llow, lhigh)
parinfo[i].limits = (low[i], high[i])
end
dof = length(residuals(resid)) - length(params)
residuals(resid, guess)
last_fitstat = sum(abs2, residuals(resid))
prog = ProgressUnknown(desc="Model (dof=$(dof)) evaluations:", dt=0.5, showspeed=true, color=:light_black)
while true
resid.mzer.result = CMPFit.cmpfit((pvalues) -> begin
ProgressMeter.next!(prog; showvalues=() -> [(:fit_stat, fit_stat(resid))])
residuals(resid, pvalues)
end,
guess, parinfo=parinfo, config=resid.mzer.config)
if resid.mzer.result.status <= 0
return MinimizerStatusError("CMPFit status = $(resid.mzer.result.status)")
end
if (resid.mzer.result.status == 5)
Δfitstat = (last_fitstat - resid.mzer.result.bestnorm) / last_fitstat
if Δfitstat > resid.mzer.ftol_after_maxiter
println("Reached max. number of iteration but relative Δfitstat = $(Δfitstat) > $(resid.mzer.ftol_after_maxiter), continue minimization...\n")
last_fitstat = resid.mzer.result.bestnorm
guess = getfield.(Ref(resid.mzer.result), :param)
continue
end
end
ProgressMeter.finish!(prog)
finalize!(resid,
getfield.(Ref(resid.mzer.result), :param),
getfield.(Ref(resid.mzer.result), :perror))
if resid.mzer.result.status == 2
return MinimizerStatusWarn("CMPFit status = 2 may imply one (or more) guess values are too far from optimum")
elseif resid.mzer.result.status == 5
return MinimizerStatusWarn("CMPFit status = 5, reached maximum allowed number of iteration.")
end
return MinimizerStatusOK()
end
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 5940 | function free_params_indices(mevals::Vector{ModelEval})
out = Vector{NTuple{3, Int}}()
i1 = 1
for id in 1:length(mevals)
nn = length(mevals[id].pv.ifree)
if nn > 0
i2 = i1 + nn - 1
push!(out, (id, i1, i2))
i1 += nn
end
end
return out
end
function update!(mevals::Vector{ModelEval})
update_init!(mevals)
update_evaluation!(mevals)
update_finalize!(mevals)
return mevals
end
function update_init!(mevals::Vector{ModelEval})
# Populate pvmulti fields in all Model structures to notify we are
# going to perform a multi-model fitting
for i in 1:length(mevals)
empty!(mevals[i].pvmulti)
for j in 1:length(mevals)
push!(mevals[i].pvmulti, mevals[j].pv.values)
end
end
update_init!.(mevals)
end
function update_setparvals(mevals::Vector{ModelEval}, pvalues::Vector{Float64})
for (id, i1, i2) in free_params_indices(mevals)
update_setparvals(mevals[id], pvalues[i1:i2])
end
end
function update_evaluation!(mevals::Vector{ModelEval})
update_evaluation!.(mevals)
end
function update_finalize!(mevals::Vector{ModelEval}, uncerts=Vector{Float64}[])
if length(uncerts) > 0
for (id, i1, i2) in free_params_indices(mevals)
update_finalize!(mevals[id], pvalues[i1:i2])
end
else
update_finalize!.(mevals)
end
end
function free_params(mevals::Vector{ModelEval})
out = Vector{Parameter}()
for id in 1:length(mevals)
append!(out, free_params(mevals[id]))
end
return out
end
nfree(mevals::Vector{ModelEval}) = sum(nfree.(mevals))
# ====================================================================
"""
MultiResiduals{T <: AbstractMeasures, M <: AbstractMinimizer}
A structure representing the distance between a `Vector{ModelEval}` and a corresponding number of datasets. The "distance" is expressed in terms of weighted residuals.
A minimizer can be invoked via the `minimize!` function to reduce such distance by varying the parameter valuesfor each model.
# Fields:
- `mevals::Vector{ModelEval}`: Vector of model evaluations;
- `data::Vector{<: AbstractMeasures}`: Empirical datasets to be compared to the models;
- `buffer::Vector{Float64}`: Weighted residuals for each point in the domain of each dataset;
- `mzer::AbstractMinimizer`: Minimizer used to reduce the residuals.
"""
struct MultiResiduals{T <: AbstractMeasures, M <: AbstractMinimizer} <: AbstractResiduals{T, M}
mevals::Vector{ModelEval}
data::Vector{T}
buffer::Vector{Float64}
mzer::M
function MultiResiduals(mevals::Vector{ModelEval}, datasets::Vector{T}, mzer::M=dry()) where {T <: AbstractMeasures, M <: AbstractMinimizer}
@assert length(mevals) == length(datasets)
update!(mevals)
buffer = fill(NaN, sum(length.(datasets)))
return new{T,M}(mevals, datasets, buffer, mzer)
end
end
free_params(mresid::MultiResiduals) = free_params(mresid.mevals)
nfree(mresid::MultiResiduals) = nfree(mresid.mevals)
dof(mresid::MultiResiduals) = sum(length.(mresid.data)) - nfree(mresid)
residuals(mresid::MultiResiduals) = mresid.buffer
function residuals(mresid::MultiResiduals, pvalues::Vector{Float64})
# Must set pvalues on all models before any evaluation
update_setparvals(mresid.mevals, pvalues)
# Populate residuals
i1 = 1
for i in 1:length(mresid.mevals)
meval = mresid.mevals[i]
update_evaluation!(meval)
nn = length(last_evaluation(meval))
if nn > 0
i2 = i1 + nn - 1
mresid.buffer[i1:i2] .= (last_evaluation(meval) .- values(mresid.data[i])) ./ uncerts(mresid.data[i])
i1 += nn
end
end
return mresid.buffer
end
fit_stat(mresid::MultiResiduals) =
sum(abs2, mresid.buffer) / dof(mresid)
function finalize!(mresid::MultiResiduals, best::Vector{Float64}, uncerts::Vector{Float64})
@assert nfree(mresid) == length(best) == length(uncerts)
residuals(mresid, best)
for (id, i1, i2) in free_params_indices(mresid.mevals)
update_finalize!(mresid.mevals[id], uncerts[i1:i2])
end
end
# ====================================================================
"""
minimize!(mresid::MultiResiduals)
Invoke a minimizer to reduce the residuals between a set of models and a corresponding number of datasets.
"""
function minimize!(mresid::MultiResiduals)
starttime = time()
update!(mresid.mevals)
@assert nfree(mresid) > 0 "No free parameter in the model"
status = _minimize!(mresid)
bestfit = ModelSnapshot.(mresid.mevals)
stats = FitStats(mresid, status, time() - starttime)
# test_serialization(bestfit, stats, mresid.data)
return (bestfit, stats)
end
"""
fit!(multi::Vector{Model}, data::Vector{Measures{N}}; minimizer::AbstractMinimizer=lsqfit())
Fit a multi-model to a set of empirical data sets using the specified minimizer (default: `lsqfit()`). Upon return the parameter values in the `Model` objects are set to the best fit ones.
"""
function fit!(multi::Vector{Model}, data::Vector{Measures{N}}; minimizer::AbstractMinimizer=lsqfit()) where N
mevals = [ModelEval(multi[i], data[i].domain) for i in 1:length(multi)]
update!(mevals)
mresid = MultiResiduals(mevals, data, minimizer)
return minimize!(mresid)
end
"""
fit(multi::Vector{Model}, data::Vector{Measures{N}}; minimizer::AbstractMinimizer=lsqfit())
Fit a multi-model to a set of empirical data sets using the specified minimizer (default: `lsqfit()`). See also `fit!`.
"""
fit(multi::Vector{Model}, data::Vector{Measures{N}}; kws...) where N = fit!(deepcopy(multi), data; kws...)
"""
compare(multi::Vector{Model}, data::Vector{Measures{N}})
Compare a multi-model to a multi-dataset and return a `FitStats` object.
"""
compare(multi::Vector{Model}, data::Vector{Measures{N}}) where N = fit!(multi, data, dry())
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 2349 | #=
Note: I can't compile ModelSnapshot since it involves a show()
call, which in turn involve PrettyTables, which is not currently
possible to precompile (an error is raised).
=#
using PrecompileTools
@compile_workload begin
let
x = [0.1, 1.1, 2.1, 3.1, 4.1]
domain = Domain(x)
model = Model(@fd (x, a2=1, a1=1, a0=5) -> (a2 .* x.^2 .+ a1 .* x .+ a0))
data = GModelFit.mock(Measures, model, domain, seed=1)
bestfit, stats = fit(model, data)
x = 0:0.05:6
model = Model(:l1 => GModelFit.Gaussian(1, 2, 0.2),
:l2 => GModelFit.Gaussian(1, 3, 0.5),
:bkg => GModelFit.OffsetSlope(0.5, 1, 0.1),
:main => SumReducer(:l1, :l2, :bkg));
model[:l2].norm.patch = :l1
model[:l2].norm.patch = @fd (m, v) -> v + m[:l1].norm
data = GModelFit.mock(Measures, model, Domain(x), seed=1)
bestfit, stats = fit(model, data, minimizer=GModelFit.cmpfit())
x = 0:0.05:6
model1 = Model(:l1 => GModelFit.Gaussian(1, 2, 0.2),
:l2 => GModelFit.Gaussian(1, 3, 0.5),
:bkg => GModelFit.OffsetSlope(0.5, 1, 0.1),
:main => SumReducer(:l1, :l2, :bkg));
model2 = Model(:l1 => GModelFit.Gaussian(0.8, 2.1, 0.1),
:l2 => GModelFit.Gaussian(1.2, 2.5, 0.4),
:bkg => GModelFit.OffsetSlope(0.5, 1, 0.1),
:main => SumReducer(:l1, :l2, :bkg));
model = [model1, model2]
freeze!(model[1], :bkg);
freeze!(model[2], :bkg);
thaw!(model[1], :bkg);
thaw!(model[2], :bkg);
model[2][:bkg].offset.mpatch = @fd m -> m[1][:bkg].offset
model[2][:bkg].slope.mpatch = @fd m -> m[1][:bkg].slope
model[1][:l2].center.mpatch = @fd m -> m[2][:l2].center
data = GModelFit.mock(Measures, model, [Domain(x), Domain(x)], seed=1)
bestfit, stats = fit(model, data)
# mevals = [GModelFit.ModelEval(model[i], data[i].domain) for i in 1:length(model)]
# GModelFit.update!(mevals);
# GModelFit.update_setparvals(mevals[1], rand(GModelFit.nfree(mevals[1])))
# GModelFit.update_evaluation!(mevals[1])
# @gp GModelFit.last_evaluation(mevals[1])
end
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 9763 | #=
Serialization methods: output must be suitable to be stored in a file using the JSON format.
=#
_serialize(::Nothing) = nothing
_serialize(::Function) = nothing
_serialize(v::Expr) = "_TE_" * string(v)
_serialize(v::Date) = "_TD_" * string(v)
_serialize(v::DateTime) = "_TDT" * string(v)
_serialize(v::String) = v
_serialize(v::Symbol) = "_TS_" * string(v)
_serialize(v::AbstractVector) = _serialize.(v)
_serialize(v::Tuple) = [_serialize.(v)...]
_serialize(v::Number) = (isnan(v) || isinf(v) ? "_TN_" * string(v) : v)
function _serialize(vv::AbstractDict{Symbol,T}) where {T <: Any}
out = OrderedDict{String, Any}()
for (key, val) in vv
out[String(key)] = _serialize(val)
end
return out
end
function _serialize_struct(vv; add_show=false)
out = OrderedDict{String, Any}()
out["_structtype"] = string(typeof(vv))
for field in fieldnames(typeof(vv))
ff = getfield(vv, field)
out[String(field)] = _serialize(ff)
end
if add_show
io = IOBuffer()
if showsettings.plain
show(io , vv)
else
ctx = IOContext(io, :color => true)
show(ctx, vv)
end
out["show"] = String(take!(io))
end
return out
end
_serialize(vv::PV.PVComp) = _serialize_struct(vv)
_serialize(vv::PV.PVModel) = _serialize_struct(vv)
_serialize(vv::Parameter) = _serialize_struct(vv)
_serialize(vv::FunctDesc) = _serialize_struct(vv)
_serialize(vv::FitStats) = _serialize_struct(vv, add_show=true)
_serialize(vv::ModelSnapshot) = _serialize_struct(vv, add_show=true)
_serialize(vv::AbstractMinimizerStatus) = _serialize_struct(vv, add_show=true)
_serialize(vv::AbstractDomain) = _serialize_struct(vv, add_show=true)
_serialize(vv::AbstractMeasures) = _serialize_struct(vv, add_show=true)
_serialize(model::ModelSnapshot, fitstats::FitStats ) = _serialize([model, fitstats])
_serialize(model::ModelSnapshot, fitstats::FitStats, data::AbstractMeasures ) = _serialize([model, fitstats, data])
_serialize(multi::Vector{Model } ) = _serialize(ModelSnapshot.(multi))
_serialize(multi::Vector{ModelSnapshot}, fitstats::FitStats ) = _serialize([multi, fitstats])
function _serialize(multi::Vector{ModelSnapshot}, fitstats::FitStats, data::Vector{T}) where T <: AbstractMeasures
@assert length(multi) == length(data)
_serialize([multi, fitstats, data])
end
"""
GModelFit.serialize(filename::String, ::ModelSnapshot[, ::FitStats[, ::Measures]]; compress=false)
GModelFit.serialize(filename::String, ::Vector{ModelSnapshot}[, ::FitStats[, ::Vector{Measures}]]; compress=false)
Serialize GModelFit object(s) using a JSON format. The serializable objects are:
- `ModelSnapshot` and `Vector{ModelSnapshot}` (mandatory argument);
- `FitStats` (optional);
- `Measures` and `Vector{Measures}` (optional);
If `compress=true` the resulting JSON file will be compressed using GZip.
Objects can later be deserialized in a different Julia session with `GModelFit.deserialize`.
Note: The `GModelFit.serialize` function also accepts `Model` and `Vector{Model}` but they will be internally converted to `ModelSnapshot`(s).
## Example:
```julia-repl
# Create GModelFit objects
using GModelFit
model = Model(:linear => @fd (x, b=2, m=0.5) -> (b .+ x .* m))
data = Measures([4.01, 7.58, 12.13, 19.78, 29.04], 0.4)
bestfit, stats = fit(model, data)
# Serialize objects and save in a file
GModelFit.serialize("my_snapshot.json", bestfit, stats, data)
# Restore objects (possibly in a different Julia session)
using GModelFit
(bestit, stats, data) = GModelFit.deserialize("my_snapshot.json")
```
"""
function serialize(filename::String, args...; compress=false)
data = _serialize(args...)
filename = ensure_file_extension(filename, "json")
if compress
filename = ensure_file_extension(filename, "gz")
io = GZip.open(filename, "w")
else
io = open(filename, "w")
end
JSON.print(io, data)
close(io)
return filename
end
# ====================================================================
# Deserialization methods
_deserialize(::Nothing) = nothing
_deserialize(v::Number) = v
function _deserialize(v::AbstractVector)
tmp = _deserialize.(v)
tt = unique(typeof.(tmp))
if length(tt) == 1
out = similar(tmp, tt[1])
out .= tmp
else
out = tmp
end
return out
end
function _deserialize(v::String)
if length(v) > 4
magic = v[1:4]
if magic == "_TE_"
return Meta.parse(v)
elseif magic == "_TD_"
return Date(v[5:end])
elseif magic == "_TDT"
return DateTime(v[5:end])
elseif magic == "_TS_"
return Symbol(v[5:end])
elseif v == "_TN_Inf"
return +Inf
elseif v == "_TN_-Inf"
return -Inf
elseif v == "_TN_NaN"
return NaN
end
end
return v
end
function _deserialize(dd::AbstractDict)
function deserialized_function(args...)
@warn "Can't evaluate a deserialized function"
nothing
end
if "_structtype" in keys(dd)
if !isnothing(findfirst("PVComp{GModelFit.Parameter}", dd["_structtype"]))
return PVComp{Parameter}(_deserialize(dd["pnames"]), _deserialize(dd["indices"]), _deserialize(dd["data"]))
elseif !isnothing(findfirst("PVModel{GModelFit.Parameter}", dd["_structtype"]))
return PVModel{Parameter}(_deserialize(dd["comps"]), _deserialize(dd["indices"]), _deserialize(dd["data"]))
elseif dd["_structtype"] == "GModelFit.FunctDesc"
return FunctDesc(deserialized_function,
_deserialize(dd["display"]),
_deserialize(dd["args"]),
_deserialize(dd["optargs"]))
elseif dd["_structtype"] == "GModelFit.Parameter"
return Parameter(_deserialize(dd["val"]),
_deserialize(dd["low"]),
_deserialize(dd["high"]),
_deserialize(dd["fixed"]),
_deserialize(dd["patch"]),
_deserialize(dd["mpatch"]),
_deserialize(dd["actual"]),
_deserialize(dd["unc"]))
elseif dd["_structtype"] == "GModelFit.ModelSnapshot"
return ModelSnapshot(_deserialize(dd["domain"]),
_deserialize(dd["params"]),
_deserialize(dd["buffers"]),
_deserialize(dd["maincomp"]),
_deserialize(dd["comptypes"]),
_deserialize(dd["isfreezed"]),
_deserialize(dd["deps"]),
_deserialize(dd["evalcounters"]))
elseif dd["_structtype"] == "GModelFit.FitStats"
return FitStats(_deserialize(dd["elapsed"]),
_deserialize(dd["ndata"]),
_deserialize(dd["nfree"]),
_deserialize(dd["dof"]),
_deserialize(dd["fitstat"]),
_deserialize(dd["status"]))
elseif dd["_structtype"] == "GModelFit.MinimizerStatusOK"
return MinimizerStatusOK()
elseif dd["_structtype"] == "GModelFit.MinimizerStatusDry"
return MinimizerStatusDry()
elseif dd["_structtype"] == "GModelFit.MinimizerStatusWarn"
return MinimizerStatusWarn(_deserialize(dd["message"]))
elseif dd["_structtype"] == "GModelFit.MinimizerStatusError"
return MinimizerStatusError(_deserialize(dd["message"]))
elseif !isnothing(findfirst("CartesianDomain", dd["_structtype"]))
axis = _deserialize(dd["axis"])
roi = _deserialize(dd["roi"])
return CartesianDomain(axis..., roi=roi)
elseif !isnothing(findfirst("Domain", dd["_structtype"]))
axis = _deserialize(dd["axis"])
return Domain(axis...)
elseif !isnothing(findfirst("Measures", dd["_structtype"]))
dom = _deserialize(dd["domain"])
tmp = _deserialize(dd["values"])
if isa(dom, CartesianDomain)
return Measures(dom, reshape(tmp[1], size(dom)), reshape(tmp[2], size(dom)))
else
return Measures(dom, tmp[1], tmp[2])
end
else
error("Unrecognized structure in serialized data: " * dd["_structtype"])
end
else
out = OrderedDict{Symbol, Any}()
for (kk, vv) in dd
out[Symbol(kk)] = _deserialize(vv)
end
return out
end
end
function deserialize(filename::String)
if filename[end-2:end] == ".gz"
io = GZip.open(filename)
else
io = open(filename)
end
j = JSON.parse(io, dicttype=OrderedDict)
close(io)
return _deserialize(j)
end
function test_serialization(args...)
function comparedata(A::TA, B::TB) where {TA, TB}
isa(B, Function) && return
@assert TA == TB
if (TA <: AbstractVector) || (TA <: Tuple) || (TA <: AbstractDict)
@assert length(A) == length(B)
for i in eachindex(A)
comparedata(getindex(A, i), getindex(B, i))
end
elseif isstructtype(TA)
for i in 1:nfields(A)
comparedata(getfield(A, i), getfield(B, i))
end
else
@assert isequal(A, B)
end
end
dd = deserialize(serialize(tempname(), args...))
comparedata(dd, [args...])
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 12701 | mutable struct ShowSettings
plain::Bool
tableformat::TextFormat
floatformat::String
showfixed::Bool
border::Crayon
header::Crayon
subheader::Crayon
fixed::Crayon
error::Crayon
highlighted::Crayon
section::Crayon
ShowSettings() = new(false, tf_unicode_rounded, "%9.4g", true,
crayon"light_blue", crayon"light_blue negative bold",
crayon"dark_gray bold", crayon"dark_gray",
crayon"light_red", crayon"negative", crayon"green bold")
end
const showsettings = ShowSettings()
function printtable(io, table, header, args...; formatters=(), hlines=:none, kw...)
if showsettings.plain
pretty_table(io, table; header=header, formatters=formatters, alignment=:l, crop=:none, tf=tf_compact, hlines=hlines,
highlighters=())
else
pretty_table(io, table; header=header, formatters=formatters, alignment=:l, crop=:none, tf=showsettings.tableformat, hlines=hlines,
border_crayon=showsettings.border, header_crayon=showsettings.header, subheader_crayon=showsettings.subheader,
kw...)
end
end
function section(io, args...; newline=true)
nl = newline ? "\n" : ""
if showsettings.plain
print(io, args..., nl)
else
print(io, showsettings.section, args..., nl, crayon"default")
end
end
function show(io::IO, dom::AbstractDomain)
section(io, string(typeof(dom)) * " (ndims: ", ndims(dom), ", length: ", length(dom), ")")
hrule = Vector{Int}()
push!(hrule, 0, 1, ndims(dom)+1)
table = Matrix{Union{Int,Float64}}(undef, ndims(dom), 6)
for i in 1:ndims(dom)
if isa(dom, CartesianDomain)
vv = axis(dom, i)
else
vv = coords(dom, i)
end
steps = 0
if length(vv) > 1
steps = vv .- circshift(vv, 1)
steps = steps[2:end]
end
table[i, 1] = i
table[i, 2] = length(vv)
table[i, 3:6] = [minimum(vv), maximum(vv), minimum(steps), maximum(steps)]
end
printtable(io, table, ["Dim", "Length", "Min val", "Max val", "Min step", "Max step"],
hlines=hrule, formatters=ft_printf(showsettings.floatformat, 3:6))
end
function show(io::IO, data::AbstractMeasures)
section(io, typeof(data), ": (length: ", (length(data)), ")")
table = Matrix{Union{String,Float64}}(undef, 0, 7)
hrule = Vector{Int}()
push!(hrule, 0, 1)
names = fieldnames(typeof(data))
error = Vector{Bool}()
for i in 1:length(data.labels)
vv = data.values[i]
nan = length(findall(isnan.(vv))) + length(findall(isinf.(vv)))
vv = vv[findall(isfinite.(vv))]
push!(error, nan > 0)
table = vcat(table, [data.labels[i] minimum(vv) maximum(vv) mean(vv) median(vv) std(vv) (nan > 0 ? string(nan) : "") ])
end
push!(hrule, 0, size(table)[1]+1)
if showsettings.plain
highlighters = nothing
else
highlighters = Highlighter((data,i,j) -> error[i], showsettings.error)
end
printtable(io, table, ["", "Min", "Max", "Mean", "Median", "Std. dev.", "Nan/Inf"],
hlines=hrule, formatters=ft_printf(showsettings.floatformat, 2:6),
highlighters=highlighters)
end
function show(io::IO, par::Parameter)
if par.fixed
println(io, "Value : ", par.val, " (fixed)")
elseif isnan(par.unc)
println(io, "Value : ", par.val, " [", par.low , " : ", par.high, "]")
else
if par.val == par.actual
println(io, "Value : ", par.val, " ± ", par.unc, " [", par.low , " : ", par.high, "] ")
else
println(io, "Value : ", par.val, " ± ", par.unc, " [", par.low , " : ", par.high, "], actual: " , par.actual)
end
end
end
function preparetable(comp::Union{AbstractComponent, GModelFit.PV.PVComp{GModelFit.Parameter}};
cname::String="?", ctype="?", cfixed=false)
table = Matrix{Union{String,Float64}}(undef, 0, 8)
fixed = Vector{Bool}()
warns = Vector{Bool}()
for (pname, param) in getparams(comp)
(!showsettings.showfixed) && param.fixed && continue
range = strip(@sprintf("%7.2g:%-7.2g", param.low, param.high))
# (range == "-Inf:Inf") && (range = "")
patch = ""
isa(param.patch, Symbol) && (patch = string(param.patch))
isa(param.patch, FunctDesc) && (patch = param.patch.display)
isa(param.mpatch,FunctDesc) && (patch = param.mpatch.display)
table = vcat(table,
permutedims([cname * (cfixed ? " (fixed)" : ""), ctype,
string(pname), range, param.val,
(param.fixed | cfixed ? " (fixed)" : param.unc),
(patch == "" ? "" : param.actual), patch]))
push!(fixed, param.fixed)
if !param.fixed && (isnan(param.unc) || (param.unc <= 0.))
push!(warns, true)
table[end,6] = ""
else
push!(warns, false)
end
if !showsettings.plain
cname = "" # delete from following lines within the same component box
ctype = ""
cfixed = false
end
end
return (table, fixed, warns)
end
function show(io::IO, comp::Union{AbstractComponent, GModelFit.PV.PVComp{GModelFit.Parameter}})
ctype = isa(comp, AbstractComponent) ? string(typeof(comp)) : "?"
(table, fixed, warns) = preparetable(comp, ctype=ctype)
if showsettings.plain
highlighters = nothing
else
highlighters = (Highlighter((data,i,j) -> (fixed[i] && (j in (3,4,5,6))), showsettings.fixed),
Highlighter((data,i,j) -> (warns[i] && (j in (3,4,5,6))), showsettings.error))
end
printtable(io, table, ["Component", "Type", "Param.", "Range", "Value", "Uncert.", "Actual", "Patch"],
formatters=ft_printf(showsettings.floatformat, 5:7),
highlighters=highlighters)
end
function show(io::IO, red::FunctDesc)
println(io, red.display)
end
function tabledeps(model::Union{Model, ModelSnapshot})
function alldeps(model::Union{Model, GModelFit.ModelSnapshot}, cname=nothing, level=0)
out = Vector{Tuple}()
if isnothing(cname)
cname = GModelFit.find_maincomp(model)
append!(out, alldeps(model, cname, level+1))
else
push!(out, (level, cname))
for d in GModelFit.dependencies(model, cname)
append!(out, alldeps(model, d, level+1))
end
end
return out
end
allcomps = alldeps(model)
maxdepth = maximum(getindex.(allcomps, 1))
prefix = fill("", length(allcomps), maxdepth)
for i in 1:length(allcomps)
prefix[i, allcomps[i][1]] = string(allcomps[i][2])
end
BRANCH = "├─╴"
BRCONT = "│ "
BREND = "└─╴"
for i in 2:length(allcomps)
for j in 1:(maxdepth-1)
if prefix[i,j] == "" # empty cell
if any(prefix[1:(i-1), j] .!= "") # ...it has a parent
if prefix[i,j+1] != "" # ...it is a branch
prefix[i,j] = BRANCH # Add a branch
end
end
end
end
end
for i in 2:length(allcomps)
for j in 1:(maxdepth-1)
if prefix[i,j] == BRANCH # it is a branch
# Check if this is the last row for this branch
if all(prefix[(i+1):end,j] .== "")
prefix[i,j] = BREND
elseif (prefix[i+1,j] != "") && (prefix[i+1,j] != BRANCH)
prefix[i,j] = BREND
end
end
end
end
# Join branches with vertical lines
for i in 2:length(allcomps)
for j in 1:(maxdepth-1)
if (prefix[i,j] == "") && (prefix[i-1,j] in [BRANCH, BRCONT])
prefix[i,j] = BRCONT
end
end
end
prefix[prefix .== ""] .= " "^length(BRANCH)
prefix = [string(rstrip(join(prefix[i,:]))) for i in 1:length(allcomps)]
if showsettings.plain
prefix = [replace(p,
BRANCH => "+ ",
BRCONT => "| ",
BREND => "+ ") for p in prefix]
end
table = Matrix{Union{String,Int,Float64}}(undef, length(allcomps), 8)
fixed = Vector{Bool}()
for i in 1:length(allcomps)
cname = allcomps[i][2]
table[i, 1] = prefix[i]
table[i, 2] = comptype(model, cname)
table[i, 3] = count(getproperty.(values(getparams(model[cname])), :fixed) .== false)
(table[i, 3] == 0) && (table[i, 3] = "")
if !isa(model, Model)
table[i, 4] = evalcounter(model, cname)
result = model(cname)
v = view(result, findall(isfinite.(result)))
if length(v) > 0
table[i, 5:7] .= [minimum(v), maximum(v), mean(v)]
else
table[i, 5:7] .= ["", "", ""]
end
table[i, 8] = count(isnan.(result)) + count(isinf.(result))
end
push!(fixed, isfreezed(model, cname))
end
return table, fixed
end
function show(io::IO, model::Union{Model, ModelSnapshot})
section(io, "Components:")
(length(keys(model)) == 0) && (return nothing)
table, fixed = tabledeps(model)
if showsettings.plain
highlighters = nothing
else
highlighters = Highlighter((data,i,j) -> fixed[i], showsettings.fixed)
end
if !isa(model, Model)
printtable(io, table, ["Component", "Type", "#Free", "Eval. count", "Min", "Max", "Mean", "NaN/Inf"],
hlines=[0,1, size(table)[1]+1],
formatters=ft_printf(showsettings.floatformat, 5:7),
highlighters=highlighters)
else
printtable(io, table[:, 1:3], ["Component", "Type", "#Free"],
hlines=[0,1, size(table)[1]+1],
formatters=ft_printf(showsettings.floatformat, 5:7),
highlighters=highlighters)
end
println(io)
section(io, "Parameters:")
(length(keys(model)) == 0) && (return nothing)
table = Matrix{Union{String,Float64}}(undef, 0, 8)
fixed = Vector{Bool}()
warns = Vector{Bool}()
hrule = Vector{Int}()
push!(hrule, 0, 1)
for cname in keys(model)
comp = model[cname]
(t, f, w) = preparetable(comp,
cname=string(cname),
ctype=comptype(model, cname),
cfixed=isfreezed(model, cname))
table = vcat(table, t)
append!(fixed, f .| isfreezed(model, cname))
append!(warns, w)
push!(hrule, length(fixed)+1)
end
if showsettings.plain
highlighters = nothing
else
if !isa(model, Model)
highlighters = (Highlighter((data,i,j) -> (fixed[i]), showsettings.fixed),
Highlighter((data,i,j) -> (warns[i] && (j in (3,4,5,6))), showsettings.error))
else
highlighters = Highlighter((data,i,j) -> (fixed[i]), showsettings.fixed)
end
end
printtable(io, table, ["Component", "Type", "Param.", "Range", "Value", "Uncert.", "Actual", "Patch"],
hlines=hrule, formatters=ft_printf(showsettings.floatformat, 5:7),
highlighters=highlighters)
end
function show(io::IO, multi::Union{Vector{Model}, Vector{ModelSnapshot}})
for id in 1:length(multi)
println(io)
section(io, join(fill("=", 30)) * " Model $id " * join(fill("=", 30)))
show(io, multi[id])
end
println(io)
end
getmessage(status::MinimizerStatusOK) = crayon"green", "OK"
getmessage(status::MinimizerStatusDry) = crayon"dark_gray", "Residuals not minimized"
getmessage(status::MinimizerStatusWarn) = crayon"bold yellow", "WARN\n" * status.message
getmessage(status::MinimizerStatusError) = crayon"bold red", "ERROR\n" * status.message
function show(io::IO, status::AbstractMinimizerStatus)
print(io, "Status: ")
color, ss = getmessage(status)
if showsettings.plain
print(io, @sprintf("%-8s", ss))
else
print(io, color, @sprintf("%-8s", ss), crayon"default")
end
end
function show(io::IO, res::FitStats)
section(io, "Fit results:", newline=false)
print(io, @sprintf(" #data: %d, #free pars: %d, red. fit stat.: %10.5g, ", res.ndata, res.nfree, res.fitstat))
show(io, res.status)
println(io)
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 2740 | """
ModelSnapshot
A structure containing a *snapshot* (i.e. a "*frozen*" state) of a `Model`. A snapshot contains the same parameters and component evaluations of the original model, and provide the same user interface. Moreover, a `ModelSnapshot` can be serialized to a file and de-serialized in another Julia session (see `GModelFit.serialize()`).
The best fit model and parameter values returned by the `fit()` function are provided as a `ModelSnapshot` object .
"""
struct ModelSnapshot
domain::AbstractDomain
params::PVModel{Parameter}
buffers::OrderedDict{Symbol, Vector{Float64}}
maincomp::Symbol
comptypes::OrderedDict{Symbol, String}
isfreezed::OrderedDict{Symbol, Bool}
deps::OrderedDict{Symbol, Vector{Symbol}}
evalcounters::OrderedDict{Symbol, Int}
end
function ModelSnapshot(meval::ModelEval)
deps = OrderedDict{Symbol, Vector{Symbol}}()
for cname in keys(meval.cevals)
deps[cname] = dependencies(meval.model, cname)
end
ModelSnapshot(deepcopy(meval.domain), deepcopy(meval.pv.params),
OrderedDict([Pair(cname, ceval.buffer) for (cname, ceval) in meval.cevals]),
meval.maincomp[1],
comptypes(meval.model),
OrderedDict([Pair(cname, isfreezed(meval.model, cname)) for cname in keys(meval.cevals)]),
deps, evalcounters(meval))
end
domain(model::ModelSnapshot) = model.domain
Base.keys(model::ModelSnapshot) = collect(keys(model.buffers))
(model::ModelSnapshot)() = reshape(domain(model), model.buffers[model.maincomp])
(model::ModelSnapshot)(name::Symbol) = reshape(domain(model), model.buffers[name])
find_maincomp(model::ModelSnapshot) = model.maincomp
isfreezed(model::ModelSnapshot, cname::Symbol) = model.isfreezed[cname]
dependencies(model::ModelSnapshot, cname::Symbol) = model.deps[cname]
evalcounter(model::ModelSnapshot, cname::Symbol) = model.evalcounters[cname]
comptype(model::ModelSnapshot, cname::Symbol) = model.comptypes[cname]
comptypes(model::ModelSnapshot) = model.comptypes
Base.haskey(m::ModelSnapshot, name::Symbol) = haskey(m.params, name)
function Base.getindex(model::ModelSnapshot, name::Symbol)
if name in keys(model.params)
return model.params[name]
end
error("Name $name not defined")
end
Base.length(model::ModelSnapshot) = length(model.buffers)
function iterate(model::ModelSnapshot, i=1)
k = collect(keys(model))
(i > length(k)) && return nothing
return (k[i] => model[k[i]], i+1)
end
function getparams(comp::GModelFit.PV.PVComp{GModelFit.Parameter})
out = OrderedDict{Symbol, Parameter}()
for pname in propertynames(comp)
out[pname] = getproperty(comp, pname)
end
return out
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 3811 | version() = Pkg.TOML.parsefile(joinpath(pkgdir(GModelFit), "Project.toml"))["version"]
function ensure_file_extension(_filename, _ext)
filename = deepcopy(_filename)
ext = "." * _ext
nn = length(ext)
if (length(filename) <= nn) ||
(filename[(end-nn+1):end] != ext)
filename *= ext
end
return filename
end
# ====================================================================
#= TODO
function print_param_covariance(fitres::FitStats;
select=nothing, sort=false, threshold=0.)
@assert isa(fitres.status.internal, CMPFit.Result) "Minimizer is not CMPFit"
parnames = String[]
if isa(fitres.bestfit, Vector{GModelFit.HashHashVector{GModelFit.Parameter}})
for i in 1:length(fitres.bestfit)
for (cname, hv) in fitres.bestfit[i]
for (pname, par) in hv
par.fixed && continue
push!(parnames, "[$(i)][$(cname)].$(pname)")
end
end
end
else
for (cname, hv) in fitres.bestfit
for (pname, _) in hv
par.fixed && continue
push!(parnames, "[$(cname)].$(pname)")
end
end
end
@assert length(parnames)^2 == length(fitres.status.internal.covar)
ii = Vector{Int}()
jj = Vector{Int}()
covar = Vector{Float64}()
for i in 1:length(parnames)
for j in i+1:length(parnames)
push!(covar, fitres.status.internal.covar[i, j])
push!(ii, i)
push!(jj, j)
end
end
if sort
ii = ii[ sortperm(abs.(covar))]
jj = jj[ sortperm(abs.(covar))]
covar = covar[sortperm(abs.(covar))]
end
for i in 1:length(ii)
if !isnothing(select)
(parnames[ii[i]] in select) || continue
end
(abs(covar[i]) < threshold) && continue
@printf "%-30s %-30s %10.4f\n" parnames[ii[i]] parnames[jj[i]] covar[i]
end
end
=#
"""
mock(::Type{Measures}, model::Model; keywords...)
mock(::Type{Measures}, multi::Vector{Model}; keywords...)
Generate mock dataset(s) using a ground truth `Model` or `Vector{Model}` object. The first version returns a single `Measures` object, while the second returns a `Vector{Measures}`.
The measurement random errors added to the data points are drawn from a Normal distribution centered on the data value itself, and a width given by the sum of three contributions:
- *proportional* part: error proportional to each data point value;
- *range* part: error proportional to the range spanned by all values in a single dataset;
- *absolute* part: absolute error value.
No systematic error is considered when generating mock dataset(s).
# Accepted keywords:
- `properr=0.01`: proportional error;
- `rangeerr=0.05`: range error;
- `abserr=0.`: absolute error;
- `seed=nothing`: seed for the `Random.MersenneTwister` generator.
"""
function mock(::Type{Measures}, meval::ModelEval; properr=0.01, rangeerr=0.05, abserr=0., seed=nothing)
rng = MersenneTwister(seed);
update!(meval)
values = last_evaluation(meval)
ee = extrema(values)
range = ee[2] - ee[1]
@assert range > 0
err = (properr .* abs.(values) .+ rangeerr .* range .+ abserr)
values .+= err .* randn(rng, size(values))
Measures(meval.domain, values, err)
end
mock(::Type{T}, model::Model, domain::AbstractDomain; kws...) where T =
mock(T, ModelEval(model, domain); kws...)
function mock(T, multi::Vector{ModelEval}; kws...)
update!(multi)
return [mock(T, multi[i]; kws...) for i in 1:length(multi)]
end
mock(::Type{T}, models::Vector{Model}, domains::Vector{<: AbstractDomain}; kws...) where T =
mock(T, [ModelEval(models[i], domains[i]) for i in 1:length(models)])
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 2570 | struct FComp <: AbstractComponent
func::Function
deps::Vector{Symbol}
params::OrderedDict{Symbol, Parameter}
function FComp(f::FunctDesc)
deps = deepcopy(f.args)
params = OrderedDict{Symbol, Parameter}()
for i in 1:length(f.optargs)
@assert f.optargs[1].head != :... "Splat not allowed"
@assert f.optargs[i].head == :(=)
@assert isa(f.optargs[i].args[1], Symbol)
@assert isa(f.optargs[i].args[2], Number)
params[f.optargs[i].args[1]] = Parameter(f.optargs[i].args[2])
end
return new(f.funct, deps, params)
end
function FComp(funct::Function, deps=Symbol[]; kws...)
params = OrderedDict{Symbol, Parameter}()
for (name, val) in kws
@assert isa(name, Symbol)
@assert isa(val , Number)
params[name] = Parameter(val)
end
return new(funct, deps, params)
end
end
# Allow access to parameters as `comp.parname`
propertynames(comp::FComp) = collect(keys(getfield(comp, :params)))
getproperty(comp::FComp, key::Symbol) = getfield(comp, :params)[key]
dependencies(comp::FComp) = getfield(comp, :deps)
function evaluate!(ceval::CompEval{FComp, <: AbstractDomain},
params...)
if length(ceval.deps) > 0
ceval.buffer .= getfield(ceval.comp, :func)(ceval.deps..., params...)
else
ceval.buffer .= getfield(ceval.comp, :func)(params...)
end
end
# ====================================================================
struct FCompv <: GModelFit.AbstractComponent
funct::Function
deps::Vector{Symbol}
params::OrderedDict{Symbol, Parameter}
FCompv(funct::Function, guess::Vector{T}) where T <: Number =
FCompv(funct, Symbol[], guess)
function FCompv(funct::Function, deps::Vector{Symbol}, guess::Vector{T}) where T <: Number
params = OrderedDict{Symbol, Parameter}()
for i in 1:length(guess)
params[Symbol(:p, i)] = Parameter(guess[i])
end
new(funct, deps, params)
end
end
propertynames(comp::FCompv) = collect(keys(getfield(comp, :params)))
getproperty(comp::FCompv, key::Symbol) = getfield(comp, :params)[key]
dependencies(comp::FCompv) = getfield(comp, :deps)
function evaluate!(ceval::CompEval{FCompv, <: AbstractDomain},
params::Vararg{Float64})
if length(ceval.deps) > 0
ceval.buffer .= getfield(ceval.comp, :func)(deps..., [params...])
else
ceval.buffer .= getfield(ceval.comp, :funct)([params...])
end
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 2650 | # ====================================================================
# Component structure
#
mutable struct Gaussian_1D <: AbstractComponent
norm::Parameter
center::Parameter
sigma::Parameter
function Gaussian_1D(norm::Number, center::Number, sigma::Number)
@assert norm > 0
@assert sigma > 0
out = new(Parameter(norm), Parameter(center), Parameter(sigma))
out.norm.low = 0
out.sigma.low = 0
return out
end
end
mutable struct Gaussian_2D <: AbstractComponent
norm::Parameter
centerX::Parameter
centerY::Parameter
sigmaX::Parameter
sigmaY::Parameter
angle::Parameter
function Gaussian_2D(norm::Number, centerX::Number, centerY::Number, sigmaX::Number, sigmaY::Number, angle::Number)
@assert norm > 0
@assert sigmaX > 0
@assert sigmaY > 0
out = new(Parameter(norm), Parameter(centerX), Parameter(centerY), Parameter(sigmaX), Parameter(sigmaY), Parameter(angle))
out.norm.low = 0
out.sigmaX.low = 0
out.sigmaY.low = 0
return out
end
end
# ====================================================================
Gaussian(norm, center, sigma) = Gaussian_1D(norm, center, sigma)
Gaussian(norm, centerX, centerY, sigmaX, sigmaY, angle) = Gaussian_2D(norm, centerX, centerY, sigmaX, sigmaY, angle)
function Gaussian(norm, centerX, centerY, sigma)
out = Gaussian_2D(norm, centerX, centerY, sigma, sigma, 0.)
out.sigmaX.fixed = true
out.angle.fixed = true
return out
end
# ====================================================================
# Evaluate component
function evaluate!(ceval::CompEval{Gaussian_1D, Domain{1}},
norm, center, sigma)
X = coords(ceval.domain)
@. (ceval.buffer = exp( ((X - center) / sigma)^2. / (-2.)) /
2.5066282746310002 / sigma * norm) # sqrt(2pi) = 2.5066282746310002
end
function evaluate!(ceval::CompEval{Gaussian_2D, <: AbstractDomain{2}},
norm, centerX, centerY, sigmaX, sigmaY, angle)
angle *= -pi / 180.
a = (cos(angle) / sigmaX)^2 / 2 + (sin(angle) / sigmaY)^2 / 2
b = -sin(2angle) / sigmaX^2 / 2 + sin(2angle) / sigmaY^2 / 2
c = (sin(angle) / sigmaX)^2 / 2 + (cos(angle) / sigmaY)^2 / 2
x = coords(ceval.domain, 1)
y = coords(ceval.domain, 2)
@. (ceval.buffer = norm *
exp(
-(
a * (x - centerX)^2. +
b * (x - centerX) * (y - centerY) +
c * (y - centerY)^2.
)
) / 6.283185307179586 / sigmaX / sigmaY) # 2pi = 6.283185307179586
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 1888 | # ====================================================================
# Component structure
#
mutable struct Lorentzian_1D <: AbstractComponent
norm::Parameter
center::Parameter
fwhm::Parameter
function Lorentzian_1D(norm::Number, center::Number, fwhm::Number)
@assert norm > 0
@assert fwhm > 0
out = new(Parameter(norm), Parameter(center), Parameter(fwhm))
out.norm.low = 0
out.fwhm.low = 0
return out
end
end
mutable struct Lorentzian_2D <: AbstractComponent
norm::Parameter
centerX::Parameter
centerY::Parameter
fwhmX::Parameter
fwhmY::Parameter
function Lorentzian_2D(norm::Number, centerX::Number, centerY::Number, fwhmX::Number, fwhmY::Number)
@assert norm > 0
@assert fwhmX > 0
@assert fwhmY > 0
out = new(Parameter(norm), Parameter(centerX), Parameter(centerY), Parameter(fwhmX), Parameter(fwhmY))
out.norm.low = 0
out.fwhmX.low = 0
out.fwhmY.low = 0
return out
end
end
Lorentzian(norm, center, fwhm) = Lorentzian_1D(norm, center, fwhm)
Lorentzian(norm, centerX, centerY, fwhmX, fwhmY) = Lorentzian_2D(norm, centerX, centerY, fwhmX, fwhmY)
# ====================================================================
# Evaluate component
function evaluate!(ceval::CompEval{Lorentzian_1D, <: AbstractDomain{1}},
norm, center, fwhm)
X = coords(ceval.domain)
@. (ceval.buffer = norm /
(1. +
((X - center) / fwhm)^2.
))
end
function evaluate!(ceval::CompEval{Lorentzian_2D, <: AbstractDomain{2}},
norm, centerX, centerY, fwhmX, fwhmY)
x = coords(ceval.domain, 1)
y = coords(ceval.domain, 2)
@. (ceval.buffer = norm /
(1. +
((x - centerX) / fwhmX)^2. +
((y - centerY) / fwhmY)^2.
))
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 1594 | # ====================================================================
# Component structure
mutable struct OffsetSlope_1D <: AbstractComponent
offset::Parameter
x0::Parameter
slope::Parameter
function OffsetSlope_1D(offset::Number, x0::Number, slope::Number)
out = new(Parameter(offset), Parameter(x0), Parameter(slope))
out.x0.fixed = true
return out
end
end
mutable struct OffsetSlope_2D <: AbstractComponent
offset::Parameter
x0::Parameter
y0::Parameter
slopeX::Parameter
slopeY::Parameter
function OffsetSlope_2D(offset::Number, x0::Number, y0::Number, slopeX::Number, slopeY::Number)
out = new(Parameter(offset), Parameter(x0), Parameter(y0), Parameter(slopeX), Parameter(slopeY))
out.x0.fixed = true
out.y0.fixed = true
return out
end
end
OffsetSlope(offset, x0, slope) = OffsetSlope_1D(offset, x0, slope)
OffsetSlope(offset, x0, y0, slopeX, slopeY) = OffsetSlope_2D(offset, x0, y0, slopeX, slopeY)
# ====================================================================
# Evaluate component
function evaluate!(ceval::CompEval{OffsetSlope_1D, <: AbstractDomain{1}},
offset, x0, slope)
X = coords(ceval.domain)
@. (ceval.buffer = slope * (X - x0) + offset)
end
function evaluate!(ceval::CompEval{OffsetSlope_2D, <: AbstractDomain{2}},
offset, x0, y0, slopeX, slopeY)
x = coords(ceval.domain, 1)
y = coords(ceval.domain, 2)
@. (ceval.buffer =
slopeX * (x - x0) +
slopeY * (y - y0) +
offset)
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 554 | mutable struct Polynomial <: AbstractComponent
params::OrderedDict{Symbol, Parameter}
function Polynomial(args...)
params = OrderedDict{Symbol, Parameter}()
for i in 1:length(args)
params[Symbol(:p, (i-1))] = Parameter(args[i])
end
new(params)
end
end
function evaluate!(ceval::CompEval{Polynomial, <: AbstractDomain{1}},
params...)
ceval.buffer .= params[1]
for deg in 1:length(params)-1
ceval.buffer .+= coords(ceval.domain).^deg .* params[deg+1]
end
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 488 | struct SumReducer <: AbstractComponent
list::Vector{Symbol}
SumReducer() = new(Symbol[])
SumReducer(args::AbstractSet{Symbol}) = new(collect(args))
SumReducer(args::Vector{Symbol}) = new(args)
SumReducer(args::Vararg{Symbol}) = new([args...])
end
dependencies(comp::SumReducer) = comp.list
function evaluate!(ceval::CompEval{SumReducer, <: AbstractDomain})
ceval.buffer .= 0.
for i in 1:length(ceval.deps)
ceval.buffer .+= ceval.deps[i]
end
end
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | code | 4735 | using Random, Test, GModelFit, GModelFit.PV
# Test Model
mm = PVModel{Float64}()
@assert typeof(mm) == PVModel{Float64}
mm[:comp] # empty component
push!(mm, :comp3, :par, 3.1)
push!(mm, :comp1, :par, 1.1)
push!(mm, :comp1, :alt, 1.2)
mm[:comp3][:par] = 99
@assert getfield(mm, :data) == [99, 1.1, 1.2]
@assert mm[:comp1].par == 1.1
@assert mm[:comp1].alt == 1.2
@assert mm[:comp3].par == 99
@assert keys(mm) == [:comp, :comp3, :comp1]
@assert propertynames(mm[:comp]) == Symbol[]
@assert propertynames(mm[:comp1]) == [:par, :alt]
@assert propertynames(mm[:comp3]) == [:par]
@assert items(mm) == [99, 1.1, 1.2]
@assert items(mm[:comp]) == Float64[]
@assert items(mm[:comp1]) == [1.1, 1.2]
@assert items(mm[:comp3]) == [99]
for (cname, comp) in mm
println(cname)
for (pname, par) in comp
println(" ", pname, ": ", par)
@assert getproperty(mm[cname], pname) == par
end
end
# ====================================================================
x = [0.1, 1.1, 2.1, 3.1, 4.1]
meas = [6.29, 7.27, 10.41, 18.67, 25.3]
unc = [1.1, 1.1, 1.1, 1.2, 1.2]
domain = Domain(x)
data = Measures(domain, meas, unc)
model = Model(@fd (x, a2=1, a1=1, a0=5) -> (a2 .* x.^2 .+ a1 .* x .+ a0))
res = fit(model, data)
# @gp data model
# ====================================================================
x = 0:0.1:5
model = Model(:parabola => @fd (x, a2=1, a1=1, a0=5) -> @. (a2 * x^2 + a1 * x + a0))
res = fit(model, GModelFit.mock(Measures, model, Domain(x), seed=1))
# @gp x y "w l t 'True model'" x values(data) uncerts(data) "w yerr t 'Data'" x model() "w l t 'Best fit'"
# ====================================================================
x = 0:0.1:5
model = Model(@fd (x, a2=1, a1=1, a0=5) -> @. (a2 * x^2 + a1 * x + a0))
res = fit(model, GModelFit.mock(Measures, model, Domain(x), seed=1))
# ====================================================================
f = @fd (x, p1=1, p2=1.e-3, p3=1e-6, p4=4, p5=5) ->
@. (p1 + p2 * x + p3 * x^2 + p4 * sin(p5 * x)) * cos(x)
x = 1.:50:10000
model = Model(f)
res = fit(model, GModelFit.mock(Measures, model, Domain(x), seed=1))
# ====================================================================
f1 = @fd (x, p1=1, p2=1e-3, p3=1e-6) -> @. p1 + p2 * x + p3 * x^2
f2 = @fd (x, p4=4, p5=5) -> @. p4 * sin(p5 * x)
f3 = @fd (x) -> cos.(x)
x = 1.:50:10000
model = Model(:f1 => f1,
:f2 => f2,
:f3 => f3,
:main => @fd (x, f1, f2, f3) -> (f1 .+ f2) .* f3)
res = fit(model, GModelFit.mock(Measures, model, Domain(x), seed=1))
# Same results with
model = Model(:f1 => f1)
model[:f2] = f2
model[:f3] = f3
model[:main] = @fd (x, f1, f2, f3) -> (f1 .+ f2) .* f3
res = fit(model, GModelFit.mock(Measures, model, Domain(x), seed=1))
# ====================================================================
x = 0:0.05:6
model = Model(:l1 => GModelFit.Gaussian(1, 2, 0.2),
:l2 => GModelFit.Gaussian(1, 3, 0.5),
:bkg => GModelFit.OffsetSlope(0.5, 1, 0.1),
:main => SumReducer(:l1, :l2, :bkg));
res = fit(model, GModelFit.mock(Measures, model, Domain(x), seed=1))
# Tie two parameters
model[:l2].norm.patch = :l1
res = fit(model, GModelFit.mock(Measures, model, Domain(x), seed=1))
# Patch one parameter to another via a λ function
model[:l2].norm.patch = @fd (m, v) -> v + m[:l1].norm
res = fit(model, GModelFit.mock(Measures, model, Domain(x), seed=1))
# ====================================================================
x = 0:0.05:6
model1 = Model(:l1 => GModelFit.Gaussian(1, 2, 0.2),
:l2 => GModelFit.Gaussian(1, 3, 0.5),
:bkg => GModelFit.OffsetSlope(0.5, 1, 0.1),
:main => SumReducer(:l1, :l2, :bkg));
model2 = Model(:l1 => GModelFit.Gaussian(0.8, 2.1, 0.1),
:l2 => GModelFit.Gaussian(1.2, 2.5, 0.4),
:bkg => GModelFit.OffsetSlope(0.5, 1, 0.1),
:main => SumReducer(:l1, :l2, :bkg));
models = [model1, model2]
freeze!(models[1], :bkg);
freeze!(models[2], :bkg);
data = GModelFit.mock(Measures, models, [Domain(x), Domain(x)], seed=1)
res = fit(models, data, minimizer=GModelFit.cmpfit())
# GModelFit.print_param_covariance(res, sort=true, select=["[2][l1].norm"])
thaw!(models[1], :bkg);
thaw!(models[2], :bkg);
models[2][:bkg].offset.mpatch = @fd m -> m[1][:bkg].offset
models[2][:bkg].slope.mpatch = @fd m -> m[1][:bkg].slope
models[1][:l2].center.mpatch = @fd m -> m[2][:l2].center
@time res = fit(models, data)
#=
@gp x y1 "w l t 'True model'" x values(data1) uncerts(data1) "w yerr t 'Data'" x model1() "w l t 'Best fit'"
@gp x y2 "w l t 'True model'" x values(data2) uncerts(data2) "w yerr t 'Data'" x model2() "w l t 'Best fit'"
=#
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 1926 | # Version 0.3.2
* Updated docstrings and documentation;
* Internals: ModelEval is now able to track the changes in the original Model after it has been created;
# Version 0.3.1
* Updated docstrings
* Removed unused dependency
# Version 0.3.0
- Breaking Changes:
* The `Model` constructor no longer accepts a `Domain` argument;
* The `mock()` function now requires a `Domain` argument;
* The `fit()` function no longer modifies the provided `Model` argument. The old behaviour is available with the newly added `fit!()` function;
* The `@λ` macro has been renamed to `@fd` to remind that the resulting value is a `FunctDesc` structure;
* Domain objects can no longer be indexed as if they were vectors. The same functionality is available via the `coords()` or `axis()` functions;
* The minimizer status is no longer an `Enum`, the same information is now indicated by the corresponding subtypes of `AbstractMinimizerStatus`;
# Version 0.2.1
- New features:
* Components can now be directly evaluated on a domain by invoking them as function, e.g.
```
comp = GModelFit.Gaussian(1, 0, 1);
comp(Domain(-5:5))
```
- Performance improvements:
* During a fit the `Model.maincomp` is temporarily set to the main component name. This allow avoiding unnecessay invocations of `find_maincomp()` during model evaluation;
* Refactored code in GModelFit.PV
- Bugfix:
* Fixed a bug in `show()` when a component evaluates to NaN;
# Version 0.2.0
- New features:
* using PrecompileTools to reduce time-to-first-run in Julia v1.9;
* Implemented `GModelFit.evalcounter()` to retrieve the number of times a component has been evaluated;
* Implemented `show()` method for `ModelSnapshot` objects;
* Refactored serialization code;
- Bugfix:
* Fixed accessibility issue for parameter of `FCompv`;
# Version 0.1.0
- First release.
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 2238 | # GModelFit.jl
[](LICENSE.md)
[](https://gcalderone.github.io/GModelFit.jl/)
`GModelFit` is a general purpose, data-driven model fitting framework for Julia.
> [!WARNING]
> The code in version 0.3.0 underwent a signficant refactoring, and a few details may break your code.
> **Please have a look at ChangeLog.md !!**
## Installation
Install with:
```julia
]add GModelFit
```
## Example
```julia
using GModelFit
# Prepare vectors with domain points, empirical measures and uncertainties
x = [0.1, 1.1, 2.1, 3.1, 4.1]
meas = [6.29, 7.27, 10.41, 18.67, 25.3]
unc = [1.1, 1.1, 1.1, 1.2, 1.2]
dom = Domain(x)
data = Measures(dom, meas, unc)
# Create a model using an explicit mathematical expression, and provide the
# initial guess values:
model = Model(@fd (x, a2=1, a1=1, a0=5) -> (a2 .* x.^2 .+ a1 .* x .+ a0))
# Fit model to the data
bestfit, stats = fit(model, data)
```
The output is as follows:
```julia
(Components:
╭───────────┬───────┬───────┬─────────────┬───────────┬───────────┬───────────┬─────────╮
│ Component │ Type │ #Free │ Eval. count │ Min │ Max │ Mean │ NaN/Inf │
├───────────┼───────┼───────┼─────────────┼───────────┼───────────┼───────────┼─────────┤
│ main │ FComp │ 3 │ 76 │ 6.088 │ 25.84 │ 13.56 │ 0 │
╰───────────┴───────┴───────┴─────────────┴───────────┴───────────┴───────────┴─────────╯
Parameters:
╭───────────┬───────┬────────┬──────────┬───────────┬───────────┬────────┬───────╮
│ Component │ Type │ Param. │ Range │ Value │ Uncert. │ Actual │ Patch │
├───────────┼───────┼────────┼──────────┼───────────┼───────────┼────────┼───────┤
│ main │ FComp │ a2 │ -Inf:Inf │ 1.201 │ 0.3051 │ │ │
│ │ │ a1 │ -Inf:Inf │ -0.106 │ 1.317 │ │ │
│ │ │ a0 │ -Inf:Inf │ 6.087 │ 1.142 │ │ │
╰───────────┴───────┴────────┴──────────┴───────────┴───────────┴────────┴───────╯
, Fit results: #data: 5, #free pars: 3, red. fit stat.: 1.0129, Status: OK
)
```
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 963 | # API
## Index
```@index
```
## Exported symbols
The list of **GModelFit.jl** exported symbols is as follows:
```@docs
CartesianDomain{N}
Domain{N}
Measures{N}
Model
@fd
axis
compare
comptype
coords
domain
fit
fit!
freeze!
getindex
haskey
isfreezed
length
select_maincomp!
thaw!
uncerts
values
```
## Non-exported symbols
The following symbols are not exported by the **GModelFit.jl** package since they are typically not used in every day work, or aimed to debugging purposes. Still, they can be useful in some case, hence they are documented here.
```@docs
GModelFit.evalcounter
GModelFit.evalcounters
GModelFit.CompEval
GModelFit.FitStats
GModelFit.FunctDesc
GModelFit.ModelEval
GModelFit.ModelSnapshot
GModelFit.MultiResiduals
GModelFit.Parameter
GModelFit.Residuals
GModelFit.comptypes
GModelFit.dependencies
GModelFit.evaluate!
GModelFit.last_evaluation
GModelFit.minimize!
GModelFit.mock
GModelFit.prepare!
GModelFit.serialize
GModelFit.update!
```
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 11290 | ```@setup abc
include("setup.jl")
```
# Built-in components
The **GModelFit.jl** provides several built-in components which may be used to build arbitrarily complex models.
## OffsetSlope
An offset and slope component for 1D and 2D domains.
The constructors are defined as follows:
- 1D: `GModelFit.OffsetSlope(offset, x0, slope)`;
- 2D: `GModelFit.OffsetSlope(offset, x0, y0, slopeX, slopeY)`;
The parameters are:
- 1D:
- `offset::Parameter`: a global offset;
- `x0::Parameter`: the X coordinate of the point where the component equals `offset`. This parameter is fixed by default;
- `slope::Parameter`: the slope of the linear function;
- 2D:
- `offset::Parameter`: a global offset;
- `x0::Parameter`: the X coordinate of the point where the component equals `offset`. This parameter is fixed by default;
- `y0::Parameter`: the Y coordinate of the point where the component equals `offset`. This parameter is fixed by default;
- `slopeX::Parameter` (only 2D): the slope of the plane along the X direction;
- `slopeY::Parameter` (only 2D): the slope of the plane along the Y direction;
#### Example
```@example abc
using GModelFit
# Define a linear model using the OffsetSlope component
model = Model(:linear => GModelFit.OffsetSlope(2, 0, 0.5))
# Fit model against data
data = Measures([4.01, 7.58, 12.13, 19.78, 29.04], 0.4)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
The best fit parameter values can be retrieved with:
```@example abc
println("Best fit values:")
println("b: ", bestfit[:linear].offset.val, " ± ", bestfit[:linear].offset.unc)
println("m: ", bestfit[:linear].slope.val , " ± ", bestfit[:linear].slope.unc)
```
A similar example in 2D is as follows:
```@example abc
using GModelFit
# Define a linear model using the OffsetSlope component
model = Model(:plane => GModelFit.OffsetSlope(2, 0, 0, 0.5, 0.5))
# Fit model against data
dom = CartesianDomain(1:5, 1:5)
data = Measures(dom, [ 3.08403 3.46719 4.07612 4.25611 5.04716
3.18361 3.88546 4.52338 5.12838 5.7864
3.80219 4.90894 5.24232 5.06982 6.29545
4.34554 4.68698 5.51505 5.69245 6.35409
4.643 5.91825 6.18011 6.67073 7.01467], 0.25)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
## Polynomial
A *n*-th degree polynomial function (*n > 1*) for 1D domains.
The constructor is defined as follows:
- `GModelFit.Polynomial(p1, p2, ...)`;
where `p1`, `p2`, etc. are the guess values for the coefficients of each degree of the polynomial.
The parameters are accessible as `p0`, `p1`, etc.
#### Example
```@example abc
using GModelFit
# Define domain and a linear model using the Polynomial component
model = Model(GModelFit.Polynomial(2, 0.5))
# Fit model against data
data = Measures([4.01, 7.58, 12.13, 19.78, 29.04], 0.4)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
Note that the numerical results are identical to the previous example involving the `OffsetSlope` component. Also note that the default name for a component (if none is provided) is `:main`. To use a 2nd degree polynomial we can simply replace the `:main` component with a new one:
```@example abc
model[:main] = GModelFit.Polynomial(2, 0.5, 1)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
## Gaussian
A normalized Gaussian component for 1D and 2D domains.
The constructors are defined as follows:
- 1D: `GModelFit.Gaussian(norm, center, sigma)`;
- 2D: `GModelFit.Gaussian(norm, centerX, centerY, sigma)` (implies `sigmaX=sigmaY`, `angle=0`);
- 2D: `GModelFit.Gaussian(norm, centerX, centerY, sigmaX, sigmaY, angle)`;
The parameters are:
- 1D:
- `norm::Parameter`: the area below the Gaussian function;
- `center::Parameter`: the location of the center of the Gaussian;
- `sigma::Parameter`: the width the Gaussian;
- 2D:
- `norm::Parameter`: the volume below the Gaussian function;
- `centerX::Parameter`: the X coordinate of the center of the Gaussian;
- `centerY::Parameter`: the Y coordinate of the center of the Gaussian;
- `sigmaX::Parameter`: the width the Gaussian along the X direction (when `angle=0`);
- `sigmaY::Parameter`: the width the Gaussian along the Y direction (when `angle=0`);
- `angle::Parameter`: the rotation angle (in degrees) of the Gaussian.
#### Example
```@example abc
using GModelFit
# Define a model with a single Gaussian component
model = Model(GModelFit.Gaussian(1, 3, 0.5))
# Fit model against data
data = Measures([0, 0.3, 6.2, 25.4, 37.6, 23., 7.1, 0.4, 0], 0.6)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
A very common problem is to fit the histogram of a distribution with a Gaussian model. The following example shows how to fit such Gaussian model to a distribution generated with `Random.randn`, and how to plot the results using [Gnuplot.jl](https://github.com/gcalderone/Gnuplot.jl/):
```@example abc
using Random, GModelFit, Gnuplot
# Calculate histogram of the distribution
hh = hist(randn(10000), bs=0.25)
# Define domain and data and fit a model
dom = Domain(hist_bins(hh, side=:center, pad=false))
data = Measures(dom, hist_weights(hh, pad=false), 1.)
model = Model(GModelFit.Gaussian(1e3, 0, 1))
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
```@example abc
@gp hh coords(dom) bestfit() "w l t 'Model' lw 3"
saveas("gaussian") # hide
```

A similar problem in 2D can be handled as follows:
```@example abc
using Random, GModelFit, Gnuplot
# Calculate histogram of the distribution
hh = hist(1 .+ randn(10000), 2 .* randn(10000))
# Define domain and data and fit a model
dom = CartesianDomain(hist_bins(hh, 1), hist_bins(hh, 2))
data = Measures(dom, hist_weights(hh) .* 1., 1.)
model = Model(GModelFit.Gaussian(1e3, 0, 0, 1, 1, 0))
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
## FComp
As anticipated in [Basic concepts and data types](@ref) any Julia function can be used as a component to evaluate. The corresponding component type is `FComp`, whose constructors are defined as follows:
```julia
FComp(funct::Function, deps=Symbol[]; par1=guess1, par2=guess2, ...)
FComp(funct::FunctDesc)
```
In the first constructor `funct` is the Julia function, `deps` is a vector of dependencies (either the domain dimensions or other component names) and `par1`, `par2` etc. are the named parameters with their corresponding initial guess values.
#### Example
```@example abc
using GModelFit
# Define a simple Julia function to evaluate a linear relationship
myfunc(x, b, m) = b .+ x .* m
# Define a model with a `FComp` wrapping the previously defined function.
# Also specify the initial guess parameters.
model = Model(:linear => GModelFit.FComp(myfunc, [:x], b=2, m=0.5))
# Fit model against a data set
data = Measures([4.01, 7.58, 12.13, 19.78, 29.04], 0.4)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
In the second constructor a [`GModelFit.FunctDesc`](@ref) object is accepted, as generated by the [`@fd`](@ref) macro). The function is typically a mathematical expression combining any number of parameters and/or other component evaluations within the same model. The expression should be given in the form:
```
@fd (x, [y, [further domain dimensions...],]
[comp1, [comp2, [further components ...],]]
[par1=guess1, [par2=guess2, [further parameters]]]) ->
(mathematical expression)
```
where the mathematical expression returns a `Vector{Float64}` with the same length as the model domain.
The previous example can be rewritten as follows:
```@example abc
using GModelFit
# Define a linear model (with initial guess parameters)
model = Model(:linear => @fd (x, b=2, m=0.5) -> (b .+ x .* m))
# Fit model against data
data = Measures([4.01, 7.58, 12.13, 19.78, 29.04], 0.4)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
Note that a `FComp` component can be added to a model without explicitly invoking its constructor when the [`@fd`](@ref) macro is used.
The evaluation of a `FComp` component may also involve the outcomes from other components. Continuing from previous example, whose fit was clearly a poor one, we may add a quadratic term to the previously defined `linear` component:
```@example abc
model[:quadratic] = @fd (x, linear, p2=1) -> (linear .+ p2 .* x.^2)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
The keywords given when defining the function are interpreted as component parameters, hence their properties can be retrieved with:
```@example abc
println("Best fit values:")
println("b: ", bestfit[:linear].b.val , " ± ", bestfit[:linear].b.unc)
println("m: ", bestfit[:linear].m.val , " ± ", bestfit[:linear].m.unc)
println("p2: ", bestfit[:quadratic].p2.val, " ± ", bestfit[:quadratic].p2.unc)
```
## FCompv
Just like `FComp`, also `FCompv` is a wrapper for a standard Julia function whose evaluation is performed using the function itself. However, `FCompv` parameters are passed to the function as a single vector of floats. The `FCompv` constructors are defined as follows:
```julia
FComp(funct::Function, guess::Vector{Float64})
FComp(funct::Function, deps::Vector{Symbol}, guess::Vector{Float64})
```
where `funct` is the Julia function, `deps` is an optional vector of dependencies (either the domain dimensions or other component names) and `guess` is a vector of initial guess values.
#### Example
The following example shows how to estimate the vector `x` satisfying the linear equation `Ax = b`, where `A` and `b` are:
```@example abc
A = [1.43 2.17 -0.38
0.21 -0.33 -1.71
-1.23 -1.16 0.83
-2.09 0.44 0.64]
b = [3.14, -8.48, 0.53, 0.54]
println() # hide
```
To define the model we will rewrite the equation as `Ax - b = 0`, and define a model as follows
```@example abc
model = Model(GModelFit.FCompv(x -> A*x - b,
[1, 1, 1]))
println() # hide
```
where `x = [1, 1, 1]` are the initial guess values for the three parameters in the fit. In this case the *empirical data* to compare the model to are just zeros, and we will assume a constant uncertainty of 1 for all samples:
```@example abc
data = Measures(fill(0., length(b)), 1.)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
## SumReducer
A component calculating the element-wise sum of a number of other components.
The `SumReducer` constructor is defined as follows:
```julia
SumReducer(args::AbstractSet{Symbol})
SumReducer(args::Vector{Symbol})
SumReducer(args::Vararg{Symbol})
```
where the `Symbol`s represent the component names
The `SumReducer` component has no parameter.
#### Example
```@example abc
using GModelFit
# Define domain and a linear model (with initial guess parameters)
model = Model(:linear => @fd (x, b=2, m=0.5) -> (b .+ x .* m))
# Add a quadratic component to the model
model[:quadratic] = @fd (x, p2=1) -> (p2 .* x.^2)
# The total model is the sum of `linear` and `quadratic`
model[:main] = SumReducer(:linear, :quadratic)
# Fit model against data
dom = Domain(1:5)
data = Measures(dom, [4.01, 7.58, 12.13, 19.78, 29.04], 0.4)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 6045 | ```@setup abc
include("setup.jl")
```
# Basic concepts and data types
In order to exploit the **GModelFit.jl** model expressiveness a few concepts need to be introduced, along with their associated data types:
- *Domain*: an N-dimensional grid of points associated to empirical measures, and used to evaluate a model. It is analogous to the independent varible $\vec{x}$ in the $f(\vec{x})$ notation. It is represented by either:
- a [`Domain{N}`](@ref) object for linear domains, where the coordinates for each of the N dimensions are explicitly specified for all the points;
- or a [`CartesianDomain{N}`](@ref) object where the coordinates are specified for each of the `N` axis and the coordinates for all points are obtained as the cartesian product of all the axes. A cartesian domain is internally transformed into a linear one when needed;
A domain object (either linear or cartesian) is required as first argument for the `Measures` constructor (see below).
- *Measures*: a container for the N-dimensional empirical data and their associated $1\sigma$ Gaussian uncertainties, represented by an object of type [`Measures{N}`](@ref) (further options may be available in the future, such as Poisson counts);
- *Model component*: the atomic building block of a (potentially complex) model, it is essentially a function used to map a `Domain` or `CartesianDomain` object into a `Vector{Float64}` representing the component evaluation. A component is a structure inheriting from `GModelFit.AbstractComponent` and is typically characterized by one or more *parameters* (see below). The **GModelFit.jl** package provides several [Built-in components](@ref), and new ones can be implemented by the user (see [Custom components](@ref)). The memoization mechanism operates at the component level and aims to avoid unnecessary re-evaluation of the component if none of its parameter values has changed since last evaluation;
- *Parameter*: a single floating point number characterizing a specific aspect for the evaluation of a component (e.g. the slope of a power law or the width of a Gaussian profile). The parameter values are automatically varied during the fitting process until the residuals between the global model evaluation and the empirical data are minimized. A parameter can be fixed to a specific value, limited in an interval, and/or be dynamically calculated (patched) according to the values of other parameters. All parameters are represented by an object of type [`GModelFit.Parameter`](@ref);
- *Model*: is the overall model description, whose evaluation is supposed to be compared to a single `Measures` object and whose parameters are varied during fitting to reduce the residuals. Internally, a model is implemented as a dictionary containing one or more *components*, each identified by a unique `Symbol` name (see [`Model`](@ref));
- Component dependencies and *main component*: the evaluation of a component, say `A`, may use the outcome of another component, say `B`, to calculate its output, thus inducing a dependency between the two. In this case we say that `A` *depends* on `B`, and therefore `B` needs to be evaluated before `A` (circular dependencies are not allowed, and would raise an error if attempted). The dependencies are automatically identified, and the last component being evaluated is dubbed *main component* since its output represent the overall model evaluation;
- *Multi-model*: a `Vector{Model}` containing two or more models, suitable to be compared to a corresponding `Vector{Measures}` to perform [Multi-dataset fitting](@ref);
- *Minimizer*: the **GModelFit.jl** package provides just the tools to define and manipulate a model, but the actual fitting (namely, the minimization of the residuals) is performed by an external *minimizer* library. Two minimizers are currently available:
- [LsqFit](https://github.com/JuliaNLSolvers/LsqFit.jl): a pure-Julia minimizer;
- [CMPFit](https://github.com/gcalderone/CMPFit.jl): a C minimizer wrapped in a Julia package.
Both are automatically installed with **GModelFit.jl**, and `LsqFit` is the default choice (unless otherwise specified in the [`fit()`](@ref) function call).
- *Model snapshot*: the best fit model, as well as the best fit parameter values and associated uncertainties, are returned by the [`fit()`](@ref) function as a [`GModelFit.ModelSnapshot`](@ref) structure, namely a *frozen snapshot* of the evaluation of a `Model` object on a given `Domain`. Components, parameters and evaluations outcomes are accessed in exactly the same way on both `Model` and `ModelSnapshot` objects, the only difference being that the latter can't be re-evaluated on different domains or parameter values.
- *Fit statistics*: the purpose of fitting is to minimize the *distance* between the model and the data, as quantified by a proper fit statistic (typically a reduced $\chi^2$ for the Gaussian uncertainties case). Such statistic, as well as other information concerning the fit, are returned by the [`fit()`](@ref) function in a [`GModelFit.FitStats`](@ref) structure;
- *function descriptor*: **GModelFit.jl** uses standard Julia function in two different contexts:
- to calculate the value of a `Parameter` as a function of other `Parameter`'s values. In this case the parameters are said to be *patched*, or linked, since there is a constraint between their values. Two (or more) parameters may be patched within the same model, or across models when performing [Multi-dataset fitting](@ref);
- to define a model component using a standard Julia mathematical expression involving `Parameter`s values or other components;
To use a standard function in this fashion it should be wrapped into a [`GModelFit.FunctDesc`](@ref) object which allows both to invoke the function itself, as well as to provide a string representation for display purposes. In order to create a function descriptor object it typically is much easier to invoke the [`@fd`](@ref) macro rather than the `FunctDesc` constructor.
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 6499 | ```@setup abc
include("setup.jl")
```
# Custom components
Besides the [Built-in components](@ref), the user may define any number of custom components. The latter are structures satisfying the following constraints:
- They must be structures inheriting from `GModelFit.AbstractComponent`;
- The component parameters (if present) must be defined as fields with type `Parameter`, e.g.:
```julia
struct MyComponent <: AbstractComponent
param1::Parameter
param2::Parameter
...
end
```
Alternatively, the parameters may be specified as a single field of type `OrderedDict{Symbol, Parameter}` (see the [`Polynomial`](https://github.com/gcalderone/GModelFit.jl/blob/master/src/components/Polynomial.jl) component for an example). The structure may also contain further fields of any type;
- The [`GModelFit.evaluate!`](@ref) function should be extended with a dedicated method to evaluate the component, as shown below.
Optionally, the user may choose to extend also the following functions:
- [`GModelFit.prepare!`](@ref): to pre-compute quantities depending on the evaluation domain, and to allocate the evaluation buffer;
- [`GModelFit.dependencies`](@ref): to specify the list of the component dependencies.
Note that before being evaluated, all components need to be wrapped into a [`GModelFit.CompEval`](@ref) structure, and that the above mentioned `evaluate!` function requires a `CompEval` object as first argument. The outcomes of the evaluations should be placed in `CompEval.buffer`.
The following example shows how to define a custom component:
```@example abc
using GModelFit
import GModelFit: prepare!, evaluate!
struct MyComponent <: GModelFit.AbstractComponent
param1::GModelFit.Parameter
function MyComponent(param1)
println(" -> call to MyComponent constructor;")
new(GModelFit.Parameter(param1))
end
end
function prepare!(comp::MyComponent, domain::AbstractDomain)
println(" -> call to prepare!()")
return fill(NaN, length(domain)) # buffer for evaluations
end
function evaluate!(ceval::GModelFit.CompEval{MyComponent, <: AbstractDomain{1}},
param1)
println(" -> call to evaluate!() with parameter value: ", param1)
ceval.buffer .= param1
end
println() # hide
```
## Life cycle of a component
The life cycle of a component is as follows:
1. The component is created by invoking its constructor, and is added to a [`Model`](@ref) object;
1. When the [`fit!`](@ref) function is invoked, all components in a `Model` are wrapped into [`GModelFit.CompEval`](@ref) objects;
- During creation of the `CompEval` structure the [`GModelFit.prepare!`](@ref) function is invoked to allocate the proper buffer for evaluations. Note that the `prepare!` function is called only once for each `fit!` invocation, hence it is the perfect place to pre-compute quantities which will be used during the component evaluation;
1. During the minimization process the [`GModelFit.evaluate!`](@ref) function is repeatedly invoked to evalute the component varying the parameter values until a convergence criterion is met.
The following example shows how to simulate the life cycle for the `MyComponent` structure defined above:
```@example abc
# Create a component and a domain for evaluation
comp = MyComponent(1)
dom = Domain(1:5)
# Create CompEval object (the `prepare!` function is invoked here):
ceval = GModelFit.CompEval(comp, dom)
# Repeated evaluations varying parameter value:
GModelFit.evaluate!(ceval, 1)
GModelFit.evaluate!(ceval, 2)
GModelFit.evaluate!(ceval, 3)
# Retrieve results
println(ceval.buffer)
```
The actual life cycle during minimization is slightly more complex since the `evaluate!` function is invoked only if a change in the parameter values with respect to previous evaluation has been detected.
## Complete example
A common case is to compare empirical data with a numerically evaluated theoretical model, possibly defined on a different grid with respect to the empirical one. An interpolation is therefore required in order to compare the model to the data.
Let's assume the theoretical model is defined as follows:
```@example abc
theory_x = 0.:10
theory_y = [0, 0.841, 0.909, 0.141, -0.757, -0.959, -0.279, 0.657, 0.989, 0.412, -0.544]
println() # hide
```
while the empirical data are:
```@example abc
obs_x = [0.500, 2.071, 3.642, 5.212, 6.783, 8.354, 9.925]
obs_y = [2.048, 3.481, 1.060, 0.515, 3.220, 4.398, 1.808]
println() # hide
```
The following example shows how to implement a component which interpolates a theoretical model onto a specific empirical domain, with the only parameter being a global scaling factor:
```@example abc
using GModelFit, Interpolations
import GModelFit.prepare!, GModelFit.evaluate!
# Define the component structure and constructor
struct Interpolator <: GModelFit.AbstractComponent
theory_x::Vector{Float64}
theory_y::Vector{Float64}
interp_y::Vector{Float64} # will contain the interpolated values
scale::GModelFit.Parameter
function Interpolator(theory_x, theory_y)
scale = GModelFit.Parameter(1)
scale.low = 0 # ensure scale parameter is positive
interp_y = Vector{Float64}() # this will be populated in prepare!()
return new(theory_x, theory_y, interp_y, scale)
end
end
# Component preparation: invoked only once to precompute quantities
# and allocate evaluation buffer
function prepare!(comp::Interpolator, domain::AbstractDomain{1})
# Pre-compute interpolation on the empirical domain
itp = linear_interpolation(comp.theory_x, comp.theory_y)
append!(comp.interp_y, itp(coords(domain)))
return fill(NaN, length(comp.interp_y)) # buffer for evaluations
end
# Component evaluation (apply scaling factor)
function evaluate!(ceval::GModelFit.CompEval{Interpolator, <: AbstractDomain{1}},
scale)
ceval.buffer .= scale .* ceval.comp.interp_y
end
println() # hide
```
The following code shows how to prepare a `Model` including the interpolated theoretical model, and to take into account the possible background introduced by the detector used to obtain empirical data:
```@example abc
model = Model(:theory => Interpolator(theory_x, theory_y),
:background => GModelFit.OffsetSlope(1., 0., 0.2),
:main => SumReducer(:theory, :background))
data = Measures(Domain(obs_x), obs_y, 0.2)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 5484 | ```@setup abc
include("setup.jl")
```
# GModelFit.jl
## A model fitting framework for Julia.
[](https://github.com/gcalderone/GModelFit.jl)
**GModelFit.jl** is a general purpose, data-driven model fitting framework for Julia.
It provides the basic tools to define, interactively manipulate and efficiently evaluate a (possibly very complex) model, and to fit the latter to empirical data. The main functionalities are:
- it handles datasets of any dimensionality;
- the syntax is very simple and concise as it resembles the indexing for dictionaries and the field access for structs. The most relevant functions are the self-explanatory `fit()` and the object constructors (see [Main functionalities](@ref));
- the fitting model is evaluated on a user defined domain, and is the result of a combination of *model components* or mathematical expressions (in the form of [lambda functions](https://en.wikipedia.org/wiki/Anonymous_function)), or any arbitrary mixture of the two;
- it provides several ready-to-use [Built-in components](@ref), and it also allows to define new components to suit specific needs ([Custom components](@ref));
- all components results are cached so that repeated evaluations with the same parameter values do not involve further calculations (memoization);
- model parameters can be fixed to a specific value, limited in an interval, and/or be dynamically linked (patched) to the values of other parameters (see [Parameter constraints](@ref));
- multiple data sets can be fitted simultaneously against different models whose parameters can be patched (see [Multi-dataset fitting](@ref));
- it supports different minimizers ([LsqFit](https://github.com/JuliaNLSolvers/LsqFit.jl) and [CMPFit](https://github.com/gcalderone/CMPFit.jl)), both aimed to carry out [non-linear least squares](https://en.wikipedia.org/wiki/Non-linear_least_squares) minimization (see [Minimizers](@ref));
- it provides facilities for interactive fitting and quick plotting (see [Quick plot (1D)](@ref)).
The fitting process involves the automatic variation of the parameter values, subject to the user defined constraints, until the differences between the evaluated model and the empirical data are minimized. The implementation details depends on the chosen minimizer. The purpose of **GModelFit.jl** is thus to act as an interface between the high-level model definition and manipulation (facing the user), and the low-level implementation details (facing the minimizer).
## Installation
In the Julia REPL type:
```julia-repl
julia> ]add GModelFit
```
The `]` character starts the Julia [package manager](https://julialang.github.io/Pkg.jl/v1/getting-started.html#Basic-Usage-1). Hit backspace key to return to Julia prompt.
In order to easily visualize the outcomes of 1D analysis you may be interested in installing also [Gnuplot.jl](https://github.com/gcalderone/Gnuplot.jl):
```julia-repl
julia> ]add Gnuplot
```
## Workflow
The typical workflow to use **GModelFit.jl** is as follows:
- Wrap empirical data domain and measures into one (ore more) `Domain` and `Measures` object(s);
- Create a `Model` object by providing components or mathematical expressions, each representing a specific *aspect* of the theoretical model;
- Optionally set initial guess parameter values and/or constraints between model parameters;
- Fit the model against the data and inspect the results;
- If needed, modify the model and repeat the fitting process;
- Exploit the results and outputs.
A very simple example showing the above workflow is:
```@example abc
using GModelFit
# Prepare vectors with domain points, empirical measures and associated
# uncertainties
x = [0.1, 1.1, 2.1, 3.1, 4.1]
meas = [6.29, 7.27, 10.41, 18.67, 25.3]
unc = [1.1, 1.1, 1.1, 1.2, 1.2]
# Prepare Domain and Measures objects
dom = Domain(x)
data = Measures(dom, meas, unc)
# Create a model using an explicit mathematical expression, and provide the
# initial guess values:
model = Model(@fd (x, a2=1, a1=1, a0=5) -> (a2 .* x.^2 .+ a1 .* x .+ a0))
# Fit model to the data
bestfit, stats = fit(model, data)
nothing # hide
```
The **GModelFit.jl** package implements a `show` method for many of the data types involved, hence the above code results in the following output:
```@example abc
show((bestfit, stats)) # hide
```
showing the best fit parameter values and the associated uncertaintites, as well as a few statistics concerning the fitting process.
If not saitisfied with the result you may, for instance, change the initial value for a parameter and re-run the fit:
```@example abc
model[:main].a0.val = 5
bestfit, stats = fit(model, data)
nothing # hide
```
Once done, you may plot the data and the best fit model with a plotting framework of your choice. E.g., with [Gnuplot.jl](https://github.com/gcalderone/Gnuplot.jl):
```@example abc
using Gnuplot
@gp coords(dom) values(data) uncerts(data) "w yerr t 'Data'" :-
@gp :- coords(dom) bestfit() "w l t 'Best fit model'"
saveas("simple_example"); # hide
```

Also, you can easily access the numerical results for further analysis, e.g.:
```@example abc
println("Best fit value for the offset parameter: ",
bestfit[:main].a0.val, " ± ",
bestfit[:main].a0.unc, "\n",
"Reduced χ^2: ", stats.fitstat)
```
The above example is definitely a simple one, but more complex ones follow essentially the same workflow.
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 4694 | ```@setup abc
include("setup.jl")
```
# Main functionalities
- *Preparation of empirical data*: both the data domain and empirical values (with associated uncertainties) should be wrapped into `Domain` (or `CartesianDomain`) and `Measures` objects respectively. Such objects are created by simply passing `AbstractVector{<: Real}` to their respective constructors, e.g.:
```@example abc
using GModelFit
dom = Domain([0.1, 1.1, 2.1, 3.1, 4.1])
data = Measures(dom, [6.29, 7.27, 10.41, 18.67, 25.3],
[1.1, 1.1, 1.1, 1.2, 1.2])
println() # hide
```
- *Component creation*: a component is a structure inheriting `GModelFit.AbstractComponent` and hosting one or more fields with type [`GModelFit.Parameter`](@ref). It is created by simply invoking its constructor, e.g.:
```@example abc
using GModelFit
# Create a stand-alone component (i.e. a component used outisde a model)
comp = GModelFit.Gaussian(1, 0, 1) # numbers represent the parameter values
# A stand-alone component can be evaluated on a user provided domain as follows:
comp(Domain(-4:0.1:4))
# Evaluate the component providing custom parameter values:
comp(Domain(-4:0.1:4), center=0.1, sigma=1.3)
println() # hide
```
The list of available components is available in [Built-in components](@ref).
Note: a component with dependencies can't be evaluated as a stand-alone since it requires the corresponding dependencies to be available in a model.
- *Model definition* and *manipulation*: a [`Model`](@ref) object is essentially a dictionary of components with `Symbol` keys. The `keys()`, `haskey()` and `iterate()` methods defined for the `Model` object provide the usual functionalities as for any dictionary. . A model object can be created and manipulated as follows:
```@example abc
using GModelFit
# Create an empty model
model = Model()
# Add a two Gaussian components, and a third one representing their sum
model[:comp1] = GModelFit.Gaussian(1, 3, 1)
model[:comp2] = GModelFit.Gaussian(0.5, 4, 0.3)
model[:sum] = @fd (comp1, comp2) -> comp1 .+ comp2
# Modify a parameter value:
model[:comp1].center.val = 5
# Evaluate the model on a user defined domain
dom = Domain(0:0.1:10)
model(dom)
# Evaluate the model, but retrieve the outcome of the :comp2 component
model(dom, :comp2)
println() # hide
```
- *Mock data*: the [`GModelFit.mock()`](@ref) function allows to generate mock data set(s) using a (multi-)model as ground truth, and add a random noise to simulate the measurement process. An example using the previously defined model and domain is as follows:
```@example abc
data = GModelFit.mock(Measures, model, dom)
println() # hide
```
This functionality is used in the examples of the next sections to generate the mock datasets.
- *Fitting*: the main functions to fit a model (represented by a [`Model`](@ref) object) to an empirical dataset (represented by a [`Measures`](@ref) object) are [`fit`](@ref) and [`fit!`](@ref). The latter provide the same functionality as the former with the only difference that upon return the `Model` object will have their parameters set to the best fit values. In both cases the `Model` object will be evaluated on the same domain associated with the `Measures` object. An overview of the fit workflow is as follows:

The following code shows how to fit the previously generated mock data set to the above model:
```@example abc
bestfit, stats = fit(model, data)
```
The [`fit`](@ref) function returns a tuple with:
- a [`GModelFit.ModelSnapshot`](@ref) structure containing a snapshot of the best fit model;
- a [`GModelFit.FitStats`](@ref) structure containing statistics on the fit.
To perform a [Multi-dataset fitting](@ref) simply pass a `Vector{Model}` and a `Vector{Measures` to the `fit` function.
- *Serialization*: a few structures (such as [`GModelFit.ModelSnapshot`](@ref), [`GModelFit.FitStats`](@ref) and [`Measures{N}`](@ref)) can be *serialized*, i.e. stored in a file, and later *de-serialized* in a separata Julia session. This is useful when the best fit model and associated informations must be saved for a later use, without the need to re-run the fitting. The best fit model, fit statistics and mock dataset used above can be serialized with:
```@example abc
GModelFit.serialize("my_snapshot.json", bestfit, stats, data)
println() # hide
```
In a separate Julia session, you can obtain a copy of exactly the same data with
```@example abc
using GModelFit
(bestit, stats, data) = GModelFit.deserialize("my_snapshot.json")
println() # hide
```
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 2674 | # Minimizers
The **GModelFit.jl** main purpose is to act as an high-level interface between the user and the underlying minimizer.
Currently, two [non-linear least squares](https://en.wikipedia.org/wiki/Non-linear_least_squares) minimizers are supported:
- [LsqFit](https://github.com/JuliaNLSolvers/LsqFit.jl);
- [CMPFit](https://github.com/gcalderone/CMPFit.jl).
More minimizers may be added in the future.
To choose a specific minimizer use the `minimizer=` keyword when invoking the [`fit()`](@ref) function, e.g. `minimizer=GModelFit.lsqfit()` or `minimizer=GModelFit.cmpfit()`. If the keyword is not provided the `lsqfit()` minimizer is used.
There is also a dummy minimizer, `GModelFit.dry()`, whose purpose is to compare the model and the data, and to generate a `FitStats` object without modifying the model parameters. The dry minimizer is used by the [`compare()`](@ref) function.
### Examples
```@example abc
using GModelFit
model = Model(:main => @fd (x, T=3.14) -> sin.(x ./ T) ./ (x ./ T))
data = GModelFit.mock(Measures, model, Domain(1:0.1:50), seed=1)
bestfit, stats = fit(model, data, minimizer=GModelFit.lsqfit())
println(); # hide
```
or
```@example abc
bestfit, stats = fit(model, data, minimizer=GModelFit.cmpfit())
println(); # hide
```
The above minimizers typically provide the same results, although in some complex case the [CMPFit](https://github.com/gcalderone/CMPFit.jl) may be more robust and less sensitive to initial guess parameters.
## The `cmpfit()` minimizer
The `cmpfit()` minimizer allows to specify several options to fine-tune the minimizer behaviour. Specifically:
- the `CMPFit.Config` structure allows to specify the convergence criteria, the maximum number of iterations, etc. (see the "CONFIGURING MPFIT()" section [here](https://pages.physics.wisc.edu/~craigm/idl/cmpfit.html);
- the `ftol_after_maxiter` allows to specify a threshold on the relative difference in fit statistics before and after the `mpfit()` execution. If the latter terminates because the maximum number of iterations has been reached, and the relative difference in fit statistics is still greater than `ftol_after_maxiter` the minimization process will continue. E.g.:
```@example abc
using GModelFit
dom = Domain(1:0.1:50)
model = Model(:main => @fd (x, T=3.14) -> sin.(x ./ T) ./ (x ./ T))
data = GModelFit.mock(Measures, model, dom, seed=1)
# Set minimizer options
mzer = GModelFit.cmpfit()
mzer.config.maxiter = 1
mzer.ftol_after_maxiter = 1e-8
# Run the fit
model[:main].T.val = 10 # guess value, purposely far from true one
bestfit, stats = fit(model, data, minimizer=mzer)
println(); # hide
```
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 5356 | ```@setup abc
include("setup.jl")
```
# Miscellaneous
## Generate mock datasets
In some case it is useful to test a model for robustness before the emprical data are available for fitting. This can be achieved via the [`GModelFit.mock()`](@ref) function, whose purpose is to generate a mock dataset which simulates a measurement process by adding random noise to the foreseen ground-truth.
### Example
```@example abc
using GModelFit
model = Model(:main => @fd (x, T=3.14) -> sin.(x ./ T) ./ (x ./ T))
# Generate a mock dataset on a specific domain
dom = Domain(1:0.1:50)
data = GModelFit.mock(Measures, model, dom, seed=1)
# Fit model against the mock dataset
bestfit, stats = fit(model, data)
```
## Serialization
A few structures, namely [`GModelFit.ModelSnapshot`](@ref), [`GModelFit.FitStats`](@ref) and [`Measures{N}`](@ref), as well as `Vector`(s) of such structures can be *serialized*, i.e. stored in a file using a dedicated JSON format. The structures can lated be *de-serialized* in a separata Julia session without the need to re-run the fitting process used to create them in the first place.
### Example
In the following we will generate a few **GModelFit.jl** objects and serialized them in a file.
```@example abc
using GModelFit
dom = Domain(1:0.1:50)
model = Model(:main => @fd (x, T=3.14) -> sin.(x ./ T) ./ (x ./ T))
data = GModelFit.mock(Measures, model, dom, seed=1)
bestfit, stats = fit(model, data)
# Serialize objects and save in a file
GModelFit.serialize("save_for_future_use.json", bestfit, stats, data)
println(); # hide
```
The same objects can be de-serialized in a different Julia session:
```@example abc
using GModelFit
bestfit, stats, data = GModelFit.deserialize("save_for_future_use.json")
```
## Quick plot (1D)
The **GModelFit.jl** package implements [**Gnuplot.jl**](https://github.com/gcalderone/Gnuplot.jl/) recipes to display plots of `Measures{1}` and `ModelSnapshot` objects., e.g.:
### Example
Create a model, a mock dataset and run a fit:
```@example abc
using GModelFit
dom = Domain(0:0.01:5)
model = Model(:bkg => GModelFit.OffsetSlope(1, 1, 0.1),
:l1 => GModelFit.Gaussian(1, 2, 0.2),
:l2 => GModelFit.Gaussian(1, 3, 0.4),
:main => SumReducer(:bkg, :l1, :l2))
data = GModelFit.mock(Measures, model, dom)
bestfit, stats = fit(model, data)
println(); # hide
```
A plot of the dataset and of the best fit model can be simply obtained with
```@example abc
using Gnuplot
@gp data bestfit
saveas("gnuplot1") # hide
```

You may also specify axis range, labels, title, etc. using the standard [**Gnuplot.jl**](https://github.com/gcalderone/Gnuplot.jl/) keyword syntax, e.g.:
```@example abc
using Gnuplot
@gp xr=[1, 4.5] xlabel="Wavelength" ylab="Flux" "set key outside" data bestfit
saveas("gnuplot2") # hide
```

## GModelFit internals
(This section deals with **GModelFit.jl** internals, feel free to skip if not interested.)
During minimization a number of internal data structures are created to avoid reallocating heap memory at each iteration. The most important of such structures are:
- [`GModelFit.CompEval`](@ref): a container to perform component evaluation on a specific domain. This structure is relevant when defining [Custom components](@ref) as it is used to dispatch component evaluation to the proper `evaluate!` method;
- [`GModelFit.ModelEval`](@ref): a container for a [`Model`](@ref) evaluation on a specific domain. This structure contains a dictionary of `CompEval` structures for all components in a model, as well as the values of *patched* parameters (see [Parameter constraints](@ref)), and is updated at each iteration of the minimizer to reflect the current model evaluaion.
An important functionality of the `ModelEval` structure is that it detects the changes in the original `Model` even after it has been created, e.g.:
```@example abc
# Create a Model
model = Model(:comp1 => @fd (x, p1=1) -> p1 .* x)
# Wrap the model into a ModelEval to perform evaluation on a specific domain
dom = Domain(1:5)
meval = GModelFit.ModelEval(model, dom)
# Evaluate and print the maximum value
GModelFit.update!(meval)
println(maximum(GModelFit.last_evaluation(meval)))
# Add a second component to the original model
model[:comp2] = @fd (x, comp1, p2=1) -> comp1 .+ p2 .* x.^2
# Re-evaluate the ModelEval (it will automatically detect the addition of :comp2)
GModelFit.update!(meval)
println(maximum(GModelFit.last_evaluation(meval)))
```
- [`GModelFit.Residuals`](@ref): container for a `ModelEval` object, a `Measures` object, a `Vector{Float64}` to store the normalized residuals, and a minimizer instance. A `Residuals` object contains all the relevant informations to perform minimization, and is therefore the only argument required for the [`GModelFit.minimize!`](@ref) function. Since `Residuals` wraps a `ModelEval` object it is also able to detect changes in the original model.
An example of its usage is as follows:
```@example abc
data = GModelFit.mock(Measures, model, dom)
resid = GModelFit.Residuals(meval, data, GModelFit.lsqfit())
GModelFit.minimize!(resid)
println() # hide
```
The [`GModelFit.MultiResiduals`](@ref) has the same purpose in the [Multi-dataset fitting](@ref) case.
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 1962 | ```@setup abc
include("setup.jl")
```
# Multi-dataset fitting
**GModelFit.jl** is able to simultaneously fit several models against a corresponding number of datasets, while placing constraints among the models. Typical use cases are:
- a single phenomenon is observed with two (or more) instruments/detectors;
- a single phenomenon is observed at different times;
Fitting multiple datasets simultaneously may provide tighter constraints on the best fit parameters under the assumption that the models are somehow related, i.e. that their parameters are constrained (or *patched*).
To perform a multi-dataset fitting we should create one `Model` for each dataset in the usual way, collect them in a `Vector{Model}`, and define patch constraints among models. The following example shows how to fit two Gaussian curves under the hypotesis that the center and normalization parameters are the same:
```@example abc
using GModelFit
# Create individual models and the Vector{Model} container
model1 = Model(GModelFit.Gaussian(1, 0., 1.))
model2 = Model(GModelFit.Gaussian(1, 0., 1.))
multi = [model1, model2]
# Patch parameters
multi[2][:main].norm.mpatch = @fd m -> m[1][:main].norm
multi[2][:main].center.mpatch = @fd m -> m[1][:main].center
# Create datasets and fit
dom = Domain(-5.:5)
data1 = Measures(dom, [-0.006, 0.015, 0.001, 0.049, 0.198, 0.430, 0.226, 0.048, 0.017, -0.001, -0.006], 0.04)
data2 = Measures(dom, [-0.072, -0.033, -0.070, 0.108, 0.168, 0.765, 0.113, -0.054, 0.032, 0.013, 0.015], 0.04)
bestfit, stats = fit(multi, [data1, data2])
show((bestfit, stats)) # hide
```
The best fit models and values are returned as a `Vector{ModelSnapshot}` in `bestfit`, i.e.:
```@example abc
println("Width of Gaussian 1: ", bestfit[1][:main].sigma.val, " ± ", bestfit[1][:main].sigma.unc)
println("Width of Gaussian 2: ", bestfit[2][:main].sigma.val, " ± ", bestfit[2][:main].sigma.unc)
println("Reduced χ^2: ", stats.fitstat)
```
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 0.3.2 | 56ffe14a38a29e27091bba299bc8b91172380957 | docs | 5487 | ```@setup abc
include("setup.jl")
```
# Parameter constraints
Models are characterized by *parameters* (see [Basic concepts and data types](@ref)) whose values are modified during fitting until a convergence criterion is met, and the *best fit* values are identified. In many cases, however, the parameters can not vary arbitrarily but should satisfy some constraints for their values to be meaningful. **GModelFit.jl** supports the definition of constraints by fixing the parameter to a specific value, limiting the value in a user defined range, or by dynamically calculating its value using a mathematical expression involving other parameter values. In the latter case the parameter is not free to vary in the fit since its actual value is determined by the patch constraint, hence it is dubbed a *patched* parameter. Such unused parameter can optionally be repurposed as a new free parameter in a *parametrized patch expression* (see example below).
An important concept to bear in mind is that the [`GModelFit.Parameter`](@ref) structure provides two field for the associated numerical value:
- `val`: is the parameter value which is being varied by the minimizer during fitting. The value set before the fitting is the *guess* value. The value after fitting is the *best fit* one;
- `actual`: is the result of the patch expression evaluation, and the actual value used when evaluating a component via its `evaluate!` method. Note that this value will be overwitten at each model evaluation, hence setting this field has no effect. The `val` and `actual` values are identical if no patch constraint has been defined.
A parameter constraint is defined by explicitly modifiying the fields of the corresponding [`GModelFit.Parameter`](@ref) structure. More specifically:
1. to set a parameter to a specific value: set the `val` field to the numeric value and set the `fixed` field to `true`;
1. to set a parameter value range: set one or both the `low` and `high` fields (default values are `-Inf` and `+Inf` respectively);
1. to constraint a parameter to have the same numerical value as another one with the same name (but in another component): set the `patch` value to the component name (it must be a `Symbol`);
1. to dynamically calculate an `actual` value using a mathematical expression depending on other parameter values: set the `patch` field to an anonymous function generated with the [`@fd`](@ref) macro. The function must accept a single argument (actually a dictionary of components) and return a scalar numberl;
1. to define a parametrized patch expression: create an anonymous function with the [`@fd`](@ref) macro with two arguments, the first has the same meaning as in the previous case, and the second is the free parameter value. Note that patched parameter loses its original meaning, and becomes the parameter of the patch expression;
1. to define a patch constraint involving parameters from other models in a [Multi-dataset fitting](@ref) scenario: simply use `mpatch` in place of `patch`, and the first argument to the λ-function will be a vector with as many elements as the number of models in the `Vector{Model}` object.
The following examples show how to define constraints for each of the afore-mentioned cases.
### Example
We will consider a model for a 1D domain consisting of the sum of a linear background component (named `bkg`) and two Gaussian-shaped features (`l1` and `l2`):
```@example abc
using GModelFit
model = Model(:bkg => GModelFit.OffsetSlope(1, 1, 0.1),
:l1 => GModelFit.Gaussian(1, 2, 0.2),
:l2 => GModelFit.Gaussian(1, 3, 0.4),
:main => SumReducer(:bkg, :l1, :l2))
println() # hide
```
Assume that, for the model to be meaningful, the parameters should satisfy the following constraints:
- the `bkg` should have a fixed value of 1 at `x`=1, and a slope which is in the range [0:0.2]:
```@example abc
model[:bkg].offset.val = 1
model[:bkg].offset.fixed = true
model[:bkg].slope.low = 0
model[:bkg].slope.high = 0.2
println() # hide
```
- the normalization of `l1` and `l2` must be the same:
```@example abc
model[:l2].norm.patch = :l1
println() # hide
```
- the width of `l2` must be twice that of `l1` (patched parameter):
```@example abc
model[:l2].sigma.patch = @fd m -> 2 * m[:l1].sigma
println() # hide
```
- the center of `l2` must be at a larger coordinate with respect to the center of `l1`. In this case we re-interpret the `model[:l2].center` parameter as the distance between the two centers, and create a parametrized patch expression to calculate the actual center value of `l2`:
```@example abc
model[:l2].center.patch = @fd (m, v) -> v + m[:l1].center
model[:l2].center.val = 1 # guess value for the distance between the centers
model[:l2].center.low = 0 # ensure [l2].center > [l1].center
println() # hide
```
We can fit the model against a mock dataset (see [Generate mock datasets](@ref)):
```@example abc
dom = Domain(0:0.1:5)
data = GModelFit.mock(Measures, model, dom)
bestfit, stats = fit(model, data)
show((bestfit, stats)) # hide
```
and plot the results with [Gnuplot.jl](https://github.com/gcalderone/Gnuplot.jl):
```@example abc
using Gnuplot
@gp coords(dom) values(data) uncerts(data) "w yerr t 'Data'" :-
@gp :- coords(dom) model(dom) "w l t 'Model'"
saveas("example_patch1") # hide
```

See [Multi-dataset fitting](@ref) for an example on how to create a patch epression involving multiple models.
| GModelFit | https://github.com/gcalderone/GModelFit.jl.git |
|
[
"MIT"
] | 1.0.0 | f39ffb3c8e62a54a0f0be44742c99748ec492ff1 | code | 368 | include("../src/coordinates.jl")
include("../src/matrix_elements.jl")
include("../src/sampling.jl")
include("../src/constants.jl")
w_list = [ [1, -1, 0], [1, 0, -1], [0, 1, -1] ]
masses = [1, 1, 1]
K = [0 0 0; 0 1/2 0; 0 0 1/2]
J, U = Ω(masses)
K_trans = J * K * J'
w_trans = [U' * w_list[i] for i in 1:length(w_list)]
p = run_simulation(50,:psudorandom)
display(p)
| FewBodyPhysics | https://github.com/MartinMikkelsen/FewBodyPhysics.jl.git |
|
[
"MIT"
] | 1.0.0 | f39ffb3c8e62a54a0f0be44742c99748ec492ff1 | code | 1149 | include("../src/coordinates.jl")
include("../src/matrix_elements.jl")
include("../src/sampling.jl")
include("../src/constants.jl")
b = 3.9
S = 41.5
energies, Gaussians, eigenvectors, coordinates, masses = run_simulation_nuclear(5,2,5)
rmax = 5 * b
rmin = 0.01 * b
start = log(rmin)
stop = log(rmax)
grid_points = range(rmin,rmax,3000)
Φ = zeros(length(grid_points), length(coordinates))
for i in 1:length(coordinates)
local ϕ = zeros(length(grid_points))
ϕ_sum = zeros(length(grid_points))
rs = coordinates[i]
c = eigenvectors[i]
A = [1 / (b^2) for b in rs]
for j in 2:min(length(c), length(A))
ϕ_sum .+= c[j] .* exp.(-(A[j-1]) .* grid_points.^2)
end
ϕ .+= ϕ_sum
Φ[:, i] = ϕ ./ c[1]
end
r = range(rmin,rmax, length=3000)
p1 = plot(r, Φ[:,1], title="Φ(r)", label="Φ(r)",ylabel="Φ",xlabel="r", linewidth=2) #with phase
Φ_prime = diff(Φ[:,1]) ./ diff(r)
plot!(r[1:end-1], Φ_prime, label="Φ'(r)", linewidth=2)
p2 = plot(Gaussians, energies, title="Energy = $(round(energies[end]; digits=3))", label="Convergence",ylabel="Energy",xlabel="Number of Gaussians",linewidth=2)
plot(p1, p2, layout = (2, 1)) | FewBodyPhysics | https://github.com/MartinMikkelsen/FewBodyPhysics.jl.git |
|
[
"MIT"
] | 1.0.0 | f39ffb3c8e62a54a0f0be44742c99748ec492ff1 | code | 388 | include("../src/coordinates.jl")
include("../src/matrix_elements.jl")
include("../src/sampling.jl")
include("../src/constants.jl")
w_list = [ [1, -1, 0], [1, 0, -1], [0, 1, -1] ]
masses = [5496.918, 3670.481, 206.7686]
K = [0 0 0; 0 1/2 0; 0 0 1/2]
J, U = Ω(masses)
K_trans = J * K * J'
w_trans = [U' * w_list[i] for i in 1:length(w_list)]
p = run_simulation(50,:quasirandom)
display(p) | FewBodyPhysics | https://github.com/MartinMikkelsen/FewBodyPhysics.jl.git |
|
[
"MIT"
] | 1.0.0 | f39ffb3c8e62a54a0f0be44742c99748ec492ff1 | code | 264 | using Documenter
using FewBodyPhysics
makedocs(
#source = "src",
workdir = "build",
sitename = "FewBodyPhysics.jl",
format = Documenter.HTML(),
)
deploydocs(
repo = "github.com/MartinMikkelsen/FewBodyPhysics.jl.git",
target = "build",
) | FewBodyPhysics | https://github.com/MartinMikkelsen/FewBodyPhysics.jl.git |
|
[
"MIT"
] | 1.0.0 | f39ffb3c8e62a54a0f0be44742c99748ec492ff1 | code | 158 | module FewBodyPhysics
using LinearAlgebra, Plots
include("coordinates.jl")
include("matrix_elements.jl")
include("sampling.jl")
include("constants.jl")
end | FewBodyPhysics | https://github.com/MartinMikkelsen/FewBodyPhysics.jl.git |
|
[
"MIT"
] | 1.0.0 | f39ffb3c8e62a54a0f0be44742c99748ec492ff1 | code | 170 | export m_π, m_π0, m_p, m_n, μ, m, ħc, mbare
m_p = 938.27
m_n = 939.57
m_π0 = 134.98
m_π = 139.57
μ = (m_p+m_n)/2*m_π0/((m_p+m_n)/2+m_π0)
ħc = 197.3
mbare=(m_p+m_n)/2 | FewBodyPhysics | https://github.com/MartinMikkelsen/FewBodyPhysics.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.