licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 1.0.0 | f9458a3b5fa9cc7aef3e6f105695db69964c4b72 | docs | 863 | # Vision.jl
A Julia package for interacting with the [Google Vision API](https://cloud.google.com/vision/).
## Example code snippets
### Using base64 encoded images
```julia
using Vision
using Base64
image = base64encode(open("example.jpg", "r"))
requestBody = makeRequestBody(image, visionFeature("DOCUMENT_TEXT_DETECTION"))
response = getResponse(requestBody)
println(parseFeatures(response))
```
### Using URI's
```julia
using Vision
using URIs
requestBody = makeRequestBody(
URI("https://upload.wikimedia.org/wikipedia/commons/thumb/1/1f/Julia_Programming_Language_Logo.svg/1920px-Julia_Programming_Language_Logo.svg.png"),
[
visionFeature("LABEL_DETECTION", 50),
visionFeature("TEXT_DETECTION", 50),
visionFeature("LOGO_DETECTION", 1),
]
)
response = getResponse(requestBody)
println(parseFeatures(response))
``` | Vision | https://github.com/joshniemela/Vision.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 987 | using MultiData
using Documenter
DocMeta.setdocmeta!(MultiData, :DocTestSetup, :(using MultiData); recursive=true)
makedocs(;
modules=[MultiData],
authors="Lorenzo Balboni, Federico Manzella, Giovanni Pagliarini, Eduard I. Stan",
repo=Documenter.Remotes.GitHub("aclai-lab", "MultiData.jl"),
sitename="MultiData.jl",
format=Documenter.HTML(;
size_threshold = 4000000,
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://aclai-lab.github.io/MultiData.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Datasets" => "datasets.md",
"Manipulation" => "manipulation.md",
"Description" => "description.md",
"Utils" => "utils.md",
],
# NOTE: warning
warnonly = :true,
)
deploydocs(;
repo = "github.com/aclai-lab/MultiData.jl",
target = "build",
branch = "gh-pages",
versions = ["main" => "main", "stable" => "v^", "v#.#", "dev" => "dev"],
)
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 4660 |
# -------------------------------------------------------------
# LabeledMultiDataset
"""
LabeledMultiDataset(md, labeling_variables)
Create a `LabeledMultiDataset` by associating an `AbstractMultiDataset` with
some labeling variables, specified as a column index (`Int`)
or a vector of column indices (`Vector{Int}`).
# Arguments
* `md` is the original `AbstractMultiDataset`;
* `labeling_variables` is an `AbstractVector` of integers indicating the indices of the
variables that will be set as labels.
# Examples
```julia-repl
julia> lmd = LabeledMultiDataset(MultiDataset([[2],[4]], DataFrame(
:id => [1, 2],
:age => [30, 9],
:name => ["Python", "Julia"],
:stat => [[sin(i) for i in 1:50000], [cos(i) for i in 1:50000]]
)), [1, 3])
β LabeledMultiDataset
ββ labels
β ββ id: Set([2, 1])
β ββ name: Set(["Julia", "Python"])
ββ dimensionalities: (0, 1)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 30
2 β 9
- Modality 2 / 2
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
```
"""
struct LabeledMultiDataset{MD} <: AbstractLabeledMultiDataset
md::MD
labeling_variables::Vector{Int}
function LabeledMultiDataset{MD}(
md::MD,
labeling_variables::Union{Int,AbstractVector},
) where {MD<:AbstractMultiDataset}
labeling_variables = Vector{Int}(vec(collect(labeling_variables)))
for i in labeling_variables
if _is_variable_in_modalities(md, i)
# TODO: consider enforcing this instead of just warning
@warn "Setting as label a variable used in a modality: this is " *
"discouraged and probably will not be allowed in future versions"
end
end
return new{MD}(md, labeling_variables)
end
function LabeledMultiDataset(
md::MD,
labeling_variables::Union{Int,AbstractVector},
) where {MD<:AbstractMultiDataset}
return LabeledMultiDataset{MD}(md, labeling_variables)
end
# TODO
# function LabeledMultiDataset(
# labeling_variables::AbstractVector{L},
# dfs::Union{AbstractVector{DF},Tuple{DF}}
# ) where {DF<:AbstractDataFrame,L}
# return LabeledMultiDataset(labeling_variables, MultiDataset(dfs))
# end
# # Helper
# function LabeledMultiDataset(
# labeling_variables::AbstractVector{L},
# dfs::AbstractDataFrame...
# ) where {L}
# return LabeledMultiDataset(labeling_variables, collect(dfs))
# end
end
# -------------------------------------------------------------
# LabeledMultiDataset - accessors
unlabeleddataset(lmd::LabeledMultiDataset) = lmd.md
grouped_variables(lmd::LabeledMultiDataset) = grouped_variables(unlabeleddataset(lmd))
data(lmd::LabeledMultiDataset) = data(unlabeleddataset(lmd))
labeling_variables(lmd::LabeledMultiDataset) = lmd.labeling_variables
# -------------------------------------------------------------
# LabeledMultiDataset - informations
function show(io::IO, lmd::LabeledMultiDataset)
println(io, "β LabeledMultiDataset")
_prettyprint_labels(io, lmd)
_prettyprint_modalities(io, lmd)
_prettyprint_sparevariables(io, lmd)
end
# -------------------------------------------------------------
# LabeledMultiDataset - variables
function sparevariables(lmd::LabeledMultiDataset)
filter!(var -> !(var in labeling_variables(lmd)), sparevariables(unlabeleddataset(lmd)))
end
function dropvariables!(lmd::LabeledMultiDataset, i::Integer)
dropvariables!(unlabeleddataset(lmd), i)
for (i_lbl, lbl) in enumerate(labeling_variables(lmd))
if lbl > i
labeling_variables(lmd)[i_lbl] = lbl - 1
end
end
return lmd
end
# -------------------------------------------------------------
# LabeledMultiDataset - utils
function SoleBase.instances(
lmd::LabeledMultiDataset,
inds::AbstractVector,
return_view::Union{Val{true},Val{false}} = Val(false)
)
LabeledMultiDataset(
SoleBase.instances(unlabeleddataset(lmd), inds, return_view),
labeling_variables(lmd)
)
end
function vcat(lmds::LabeledMultiDataset...)
LabeledMultiDataset(
vcat(unlabeleddataset.(lmds)...),
labeling_variables(first(lmds))
)
end
function _empty(lmd::LabeledMultiDataset)
return LabeledMultiDataset(
_empty(unlabeleddataset(lmd)),
deepcopy(grouped_variables(lmd)),
)
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 6088 |
__precompile__()
module MultiData
using DataFrames
using StatsBase
using ScientificTypes
using DataStructures
using Statistics
using Catch22
using CSV
using Random
using Reexport
using SoleBase
using SoleBase: AbstractDataset, slicedataset
@reexport using DataFrames
import Base: eltype, isempty, iterate, map, getindex, length
import Base: firstindex, lastindex, ndims, size, show, summary
import Base: vcat
import Base: isequal, isapprox
import Base: ==, β
import Base: in, issubset, setdiff, setdiff!, union, union!, intersect, intersect!
import Base: β, β, βͺ, β©
import DataFrames: describe
import ScientificTypes: show
import SoleBase: instances, ninstances, concatdatasets
import SoleBase: eachinstance
# -------------------------------------------------------------
# exports
# export types
export AbstractDataset, AbstractMultiDataset
export MultiDataset
export AbstractLabeledMultiDataset
export LabeledMultiDataset
# information gathering
export instance, ninstances, slicedataset, concatdatasets
export modality, nmodalities
export variables, nvariables, dimensionality, sparevariables, hasvariables
export variableindex
export isapproxeq, β
export isapprox
export eachinstance, eachmodality
# filesystem
export datasetinfo, loaddataset, savedataset
# instance manipulation
export pushinstances!, deleteinstances!, keeponlyinstances!
# variable manipulation
export insertvariables!, dropvariables!, keeponlyvariables!, dropsparevariables!
# modality manipulation
export addmodality!, removemodality!, addvariable_tomodality!, removevariable_frommodality!
export insertmodality!, dropmodalities!
# labels manipulation
export nlabelingvariables, label, labels, labeldomain, setaslabeling!, unsetaslabeling!, joinlabels!
# re-export from DataFrames
export describe
# re-export from ScientificTypes
export schema
# -------------------------------------------------------------
# Abbreviations
const DF = DataFrames
# -------------------------------------------------------------
# Abstract types
"""
Abstract supertype for all multimodal datasets.
A concrete multimodal dataset should always provide accessors
[`data`](@ref), to access the underlying tabular structure (e.g., `DataFrame`) and
[`grouped_variables`](@ref), to access the grouping of variables
(a vector of vectors of column indices).
"""
abstract type AbstractMultiDataset <: AbstractDataset end
"""
Abstract supertype for all labeled multimodal datasets (used in supervised learning).
As any multimodal dataset, any concrete labeled multimodal dataset should always provide
the accessors [`data`](@ref), to access the underlying tabular structure (e.g., `DataFrame`) and
[`grouped_variables`](@ref), to access the grouping of variables.
In addition to these, implementations are required for
[`labeling_variables`](@ref), to access the indices of the labeling variables.
See also [`AbstractMultiDataset`](@ref).
"""
abstract type AbstractLabeledMultiDataset <: AbstractMultiDataset end
# -------------------------------------------------------------
# AbstractMultiDataset - accessors
#
# Inspired by the "Delegation pattern" of "Design Patterns and Best Practices with
# Julia" Chap. 5 by Tom KwongHands-On
"""
grouped_variables(amd)::Vector{Vector{Int}}
Return the indices of the variables grouped by modality, of an `AbstractMultiDataset`.
The grouping describes how the different modalities are composed from the underlying
`AbstractDataFrame` structure.
See also [`data`](@ref), [`AbstractMultiDataset`](@ref).
"""
function grouped_variables(amd::AbstractMultiDataset)::Vector{Vector{Int}}
return error("`grouped_variables` accessor not implemented for type "
* string(typeof(amd))) * "."
end
"""
data(amd)::AbstractDataFrame
Return the structure that underlies an `AbstractMultiDataset`.
See also [`grouped_variables`](@ref), [`AbstractMultiDataset`](@ref).
"""
function data(amd::AbstractMultiDataset)::AbstractDataFrame
return error("`data` accessor not implemented for type "
* string(typeof(amd))) * "."
end
function concatdatasets(amds::AbstractMultiDataset...)
@assert allequal(grouped_variables.(amds)) "Cannot concatenate datasets " *
"with different variable groupings. " *
"$(@show grouped_variables.(amds))"
Base.vcat(amds...)
end
# -------------------------------------------------------------
# AbstractLabeledMultiDataset - accessors
"""
labeling_variables(almd)::Vector{Int}
Return the indices of the labelling variables, of the `AbstractLabeledMultiDataset`.
with respect to the underlying `AbstractDataFrame` structure (see [`data`](@ref)).
See also [`grouped_variables`](@ref), [`AbstractLabeledMultiDataset`](@ref).
"""
function labeling_variables(almd::AbstractLabeledMultiDataset)::Vector{Int}
return error("`labeling_variables` accessor not implemented for type " *
string(typeof(almd)))
end
function concatdatasets(almds::AbstractLabeledMultiDataset...)
@assert allequal(grouped_variables.(almds)) "Cannot concatenate datasets " *
"with different variable grouping. " *
"$(@show grouped_variables.(almds))"
@assert allequal(labeling_variables.(almds)) "Cannot concatenate datasets " *
"with different labeling variables. " *
"$(@show labeling_variables.(almds))"
Base.vcat(almds...)
end
Base.summary(amd::AbstractMultiDataset) = string(length(amd), "-modality ", typeof(amd))
Base.summary(io::IO, amd::AbstractMultiDataset) = print(stdout, summary(amd))
include("utils.jl")
include("describe.jl")
include("iterable.jl")
include("comparison.jl")
include("set.jl")
include("variables.jl")
include("instances.jl")
include("modalities.jl")
include("interfaces.jl")
include("MultiDataset.jl")
include("labels.jl")
include("LabeledMultiDataset.jl")
include("filesystem.jl")
include("dimensionality.jl")
export dataframe2dimensional, dimensional2dataframe
export cube2dataframe, dataframe2cube
export get_instance, maxchannelsize
export hasnans, displaystructure
include("dimensional-data.jl")
include("deprecate.jl")
end # module
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 10146 |
# -------------------------------------------------------------
# MultiDataset
"""
MultiDataset(df, grouped_variables)
Create a `MultiDataset` from an `AbstractDataFrame` `df`,
initializing its modalities according to the grouping in `grouped_variables`.
`grouped_variables` is an `AbstractVector` of variable grouping which are `AbstractVector`s
of integers representing the index of the variables selected for that modality.
Note that the order matters for both the modalities and the variables.
```julia-repl
julia> df = DataFrame(
:age => [30, 9],
:name => ["Python", "Julia"],
:stat1 => [[sin(i) for i in 1:50000], [cos(i) for i in 1:50000]],
:stat2 => [[cos(i) for i in 1:50000], [sin(i) for i in 1:50000]]
)
2Γ4 DataFrame
Row β age name stat1 stat2 β―
β Int64 String Arrayβ¦ Arrayβ¦ β―
ββββββΌββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β 30 Python [0.841471, 0.909297, 0.14112, -0β¦ [0.540302, -0.416147, -0.989992,β¦ β―
2 β 9 Julia [0.540302, -0.416147, -0.989992,β¦ [0.841471, 0.909297, 0.14112, -0β¦
julia> md = MultiDataset([[2]], df)
β MultiDataset
ββ dimensionalities: (0,)
- Modality 1 / 1
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
- Spare variables
ββ dimensionality: mixed
2Γ3 SubDataFrame
Row β age stat1 stat2
β Int64 Arrayβ¦ Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β 30 [0.841471, 0.909297, 0.14112, -0β¦ [0.540302, -0.416147, -0.989992,β¦
2 β 9 [0.540302, -0.416147, -0.989992,β¦ [0.841471, 0.909297, 0.14112, -0β¦
```
MultiDataset(df; group = :none)
Create a `MultiDataset` from an `AbstractDataFrame` `df`,
automatically selecting modalities.
The selection of modalities can be controlled by the `group` argument which can be:
- `:none` (default): no modality will be created
- `:all`: all variables will be grouped by their [`dimensionality`](@ref)
- a list of dimensionalities which will be grouped.
Note: `:all` and `:none` are the only `Symbol`s accepted by `group`.
# TODO: fix passing a vector of Integer to `group`
# TODO: rewrite examples
# Examples
```julia-repl
julia> df = DataFrame(
:age => [30, 9],
:name => ["Python", "Julia"],
:stat1 => [[sin(i) for i in 1:50000], [cos(i) for i in 1:50000]],
:stat2 => [[cos(i) for i in 1:50000], [sin(i) for i in 1:50000]]
)
2Γ4 DataFrame
Row β age name stat1 stat2 β―
β Int64 String Arrayβ¦ Arrayβ¦ β―
ββββββΌββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β 30 Python [0.841471, 0.909297, 0.14112, -0β¦ [0.540302, -0.416147, -0.989992,β¦ β―
2 β 9 Julia [0.540302, -0.416147, -0.989992,β¦ [0.841471, 0.909297, 0.14112, -0β¦
julia> md = MultiDataset(df)
β MultiDataset
ββ dimensionalities: ()
- Spare variables
ββ dimensionality: mixed
2Γ4 SubDataFrame
Row β age name stat1 stat2 β―
β Int64 String Arrayβ¦ Arrayβ¦ β―
ββββββΌββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β 30 Python [0.841471, 0.909297, 0.14112, -0β¦ [0.540302, -0.416147, -0.989992,β¦ β―
2 β 9 Julia [0.540302, -0.416147, -0.989992,β¦ [0.841471, 0.909297, 0.14112, -0β¦
julia> md = MultiDataset(df; group = :all)
β MultiDataset
ββ dimensionalities: (0, 1)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β age name
β Int64 String
ββββββΌβββββββββββββββ
1 β 30 Python
2 β 9 Julia
- Modality 2 / 2
ββ dimensionality: 1
2Γ2 SubDataFrame
Row β stat1 stat2
β Arrayβ¦ Arrayβ¦
ββββββΌββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦ [0.540302, -0.416147, -0.989992,β¦
2 β [0.540302, -0.416147, -0.989992,β¦ [0.841471, 0.909297, 0.14112, -0β¦
julia> md = MultiDataset(df; group = [0])
β MultiDataset
ββ dimensionalities: (0, 1, 1)
- Modality 1 / 3
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β age name
β Int64 String
ββββββΌβββββββββββββββ
1 β 30 Python
2 β 9 Julia
- Modality 2 / 3
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat1
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
- Modality 3 / 3
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat2
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.540302, -0.416147, -0.989992,β¦
2 β [0.841471, 0.909297, 0.14112, -0β¦
```
"""
struct MultiDataset{DF<:AbstractDataFrame} <: AbstractMultiDataset
grouped_variables::Vector{Vector{Int}}
data::DF
function MultiDataset(
df::DF,
grouped_variables::AbstractVector,
) where {DF<:AbstractDataFrame}
grouped_variables = map(group->begin
if !(group isa AbstractVector)
group = [group]
end
group = collect(group)
if any(var_name->var_name isa Symbol, group) &&
any(var_name->var_name isa Integer, group)
return error("Cannot mix different types of " *
"column identifiers; please, only use column indices (integers) or " *
"Symbols. Encountered: $(group), $(join(unique(typeof.(group)), ", ")).")
end
group = [var_name isa Symbol ? _name2index(df, var_name) : var_name for var_name in group]
@assert group isa Vector{<:Integer}
group
end, grouped_variables)
grouped_variables = collect(Vector{Int}.(collect.(grouped_variables)))
grouped_variables = Vector{Vector{Int}}(grouped_variables)
return new{DF}(grouped_variables, df)
end
# Helper
function MultiDataset(
grouped_variables::AbstractVector,
df::DF,
) where {DF<:AbstractDataFrame}
return MultiDataset(df, grouped_variables)
end
function MultiDataset(
df::AbstractDataFrame;
group::Union{Symbol,AbstractVector{<:Integer}} = :all
)
@assert isa(group, AbstractVector) || group in [:all, :none] "group can be " *
"`:all`, `:none` or an `AbstractVector` of dimensionalities"
if group == :none
@warn "Creating MultiDataset with no modalities"
return MultiDataset([], df)
end
dimdict = Dict{Integer,AbstractVector{<:Integer}}()
spare = AbstractVector{Integer}[]
for (i, c) in enumerate(eachcol(df))
dim = dimensionality(DataFrame(:curr => c))
if isa(group, AbstractVector) && !(dim in group)
push!(spare, [i])
elseif haskey(dimdict, dim)
push!(dimdict[dim], i)
else
dimdict[dim] = Integer[i]
end
end
desc = sort(collect(zip(keys(dimdict), values(dimdict))), by = x -> x[1])
return MultiDataset(append!(map(x -> x[2], desc), spare), df)
end
function MultiDataset(
dfs::Union{AbstractVector{DF},Tuple{DF}}
) where {DF<:AbstractDataFrame}
for (i, j) in Iterators.product(1:length(dfs), 1:length(dfs))
if i == j continue end
df1 = dfs[i]
df2 = dfs[j]
@assert length(
intersect(names(df1), names(df2))
) == 0 "Cannot build MultiDataset with clashing " *
"variable names across modalities: $(intersect(names(df1), names(df2)))"
end
grouped_variables = []
i = 1
for nvars in ncol.(dfs)
push!(grouped_variables, i:(nvars+i-1))
i += nvars
end
df = hcat(dfs...)
return MultiDataset(df, grouped_variables)
end
# Helper
MultiDataset(dfs::AbstractDataFrame...) = MultiDataset(collect(dfs))
end
# -------------------------------------------------------------
# MultiDataset - accessors
grouped_variables(md::MultiDataset) = md.grouped_variables
data(md::MultiDataset) = md.data
# -------------------------------------------------------------
# MultiDataset - informations
function show(io::IO, md::MultiDataset)
_prettyprint_header(io, md)
_prettyprint_modalities(io, md)
_prettyprint_sparevariables(io, md)
end
# -------------------------------------------------------------
# MultiDataset - utils
function SoleBase.instances(
md::MultiDataset,
inds::AbstractVector,
return_view::Union{Val{true},Val{false}} = Val(false),
)
@assert return_view == Val(false)
@assert all(i->i<=ninstances(md), inds) "Cannot slice MultiDataset of $(ninstances(md)) instances with indices $(inds)."
MultiDataset(data(md)[inds,:], grouped_variables(md))
end
import Base: view
Base.@propagate_inbounds function view(md::MultiDataset, inds...)
MultiDataset(view(data(md), inds...), grouped_variables(md))
end
Base.@propagate_inbounds function view(md::MultiDataset, inds::Integer, ::Colon)
MultiDataset(view(data(md), [inds], :), grouped_variables(md))
end
function vcat(mds::MultiDataset...)
MultiDataset(vcat((data.(mds)...)), grouped_variables(first(mds)))
end
"""
_empty(md)
Return a copy of a multimodal dataset with no instances.
Note: since the returned AbstractMultiDataset will be empty its columns types will be
`Any`.
"""
function _empty(md::MultiDataset)
return MultiDataset(
deepcopy(grouped_variables(md)),
DataFrame([var_name => [] for var_name in Symbol.(names(data(md)))])
)
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 2366 |
# -------------------------------------------------------------
# AbstractMultiDataset - comparison
"""
==(md1, md2)
isequal(md1, md2)
Determine whether two `AbstractMultiDataset`s are equal.
Note: the check is also performed on the instances; this means that if the two datasets only
differ by the order of their instances, this will return `false`.
If the intent is to check if two `AbstractMultiDataset`s have same instances regardless
of the order use [`isapproxeq`](@ref) instead.
If the intent is to check if two `AbstractMultiDataset`s have same variable groupings and
variables use [`isapprox`](@ref) instead.
"""
function isequal(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
return (data(md1) == data(md2) && grouped_variables(md1) == grouped_variables(md2)) ||
(_same_md(md1, md2) && _same_labeling_variables(md1, md2))
end
function ==(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
return isequal(md1, md2)
end
"""
β(md1, md2)
isapproxeq(md1, md2)
Determine whether two `AbstractMultiDataset`s have
the same variable groupings, variables and instances.
Note: the order of the instance does not matter.
If the intent is to check if two `AbstractMultiDataset`s have same instances in the
same order use [`isequal`](@ref) instead.
If the intent is to check if two `AbstractMultiDataset`s have same variable groupings and
variables use [`isapprox`](@ref) instead.
TODO review
"""
function isapproxeq(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
return isequal(md1, md2) && _same_instances(md1, md2)
end
function β(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
return isapproxeq(md1, md2)
end
"""
β(md1, md2)
isapprox(md1, md2)
Determine whether two `AbstractMultiDataset`s are similar, that is,
if they have same variable groupings
and variables. Note that this means no check over instances is performed.
If the intent is to check if two `AbstractMultiDataset`s have same instances in the same
order use [`isequal`](@ref) instead.
If the intent is to check if two `AbstractMultiDataset`s have same instances regardless
of the order use [`isapproxeq`](@ref) instead.
"""
function isapprox(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
# NOTE: _same_grouped_variables already includes variables checking
return _same_grouped_variables(md1, md2)
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 207 | export MultiModalDataset
export LabeledMultiModalDataset
const AbstractMultiModalDataset = AbstractMultiDataset
const MultiModalDataset = MultiDataset
const LabeledMultiModalDataset = LabeledMultiDataset
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 4563 |
# -------------------------------------------------------------
# AbstractMultiDataset - describe
const desc_dict = Dict{Symbol,Function}(
:mean => mean,
:min => minimum,
:max => maximum,
:median => median,
:quantile_1 => (q_1 = x -> quantile(x, 0.25)),
:quantile_3 =>(q_3 = x -> quantile(x, 0.75)),
# allow catch22 desc
(getnames(catch22) .=> catch22)...
)
const auto_desc_by_dim = Dict{Integer,Vector{Symbol}}(
1 => [:mean, :min, :max, :quantile_1, :median, :quantile_3]
)
function _describeonm(
df::AbstractDataFrame;
descfunction::Function,
cols::AbstractVector{<:Integer} = 1:ncol(df),
t::AbstractVector{<:NTuple{3,Integer}} = [(1, 0, 0)]
)
modality_dim = dimensionality(df)
@assert length(t) == 1 || length(t) == modality_dim "`t` length has to be `1` or the " *
"dimensionality of the modality ($(modality_dim))"
if modality_dim > 1 && length(t) == 1
# if dimensionality is > 1 but only 1 triple is passed use it for all dimensionalities
t = fill(t, modality_dim)
end
x = Matrix{AbstractFloat}[]
for j in cols
# TODO: not a good habit using abstract type as elements of Arrays
y = Matrix{AbstractFloat}(undef, nrow(df), t[1][1])
# TODO: maybe
Threads.@threads for (i, paa_result) in collect(enumerate(paa.(df[:,j]; f = descfunction, t = t)))
y[i,:] = paa_result
end
push!(x, y)
end
return x
end
# TODO: describeonm should have the same interface as the `describe` function from DataFrames
# describe(df::AbstractDataFrame; cols=:)
# describe(df::AbstractDataFrame, stats::Union{Symbol,Pair}...; cols=:)
function describeonm(
df::AbstractDataFrame;
desc::AbstractVector{Symbol} = Symbol[],
t::AbstractVector{<:NTuple{3,Integer}} = [(1, 0, 0)],
)
for d in desc
@assert d in keys(desc_dict) "`$(d)` is not a valid descriptor Symbol; available " *
"descriptors are $(keys(desc_dict))"
end
return DataFrame(
:Variables => Symbol.(propertynames(df)),
[d => _describeonm(df; descfunction = desc_dict[d], t) for d in desc]...
)
end
# TODO: same as above
"""
describe(md; t = fill([(1, 0, 0)], nmodalities(md)), kwargs...)
Return descriptive statistics for an `AbstractMultiDataset` as a `Vector` of new
`DataFrame`s where each row represents a variable and each column a summary statistic.
# Arguments
* `md`: the `AbstractMultiDataset`;
* `t`: is a vector of `nmodalities` elements,
where each element is a vector as long as the dimensionality of
the i-th modality. Each element of the innermost vector is a tuple
of arguments for [`paa`](@ref).
For other see the documentation of [`DataFrames.describe`](@ref) function.
# Examples
TODO: examples
"""
function DF.describe(
md::AbstractMultiDataset;
t::AbstractVector{<:AbstractVector{<:NTuple{3,Integer}}} = fill([(1, 0, 0)], nmodalities(md)),
kwargs...
)
return [DF.describe(md, i; t = t[i], kwargs...) for i in 1:nmodalities(md)]
end
# TODO: implement this
# function DF.describe(md::MultiDataset, stats::Union{Symbol,Pair}...; cols=:)
# # TODO: select proper defaults stats based on `dimensionality` of each modality
# end
function DF.describe(md::AbstractMultiDataset, i::Integer; kwargs...)
modality_dim = dimensionality(modality(md, i))
if modality_dim == :mixed || modality_dim == :empty
# TODO: implement for mixed???
throw(ErrorException("Description for `:$(modality_dim)` dimensionality modality not implemented"))
elseif modality_dim == 0
return DF.describe(modality(md, i))
else
desc = haskey(kwargs, :desc) ? kwargs[:desc] : auto_desc_by_dim[modality_dim]
return describeonm(modality(md, i); desc = desc, kwargs...)
end
end
function _stat_description(
df::AbstractDataFrame;
functions::AbstractVector{Function} = [var, std],
cols::AbstractVector{<:Integer} = collect(2:ncol(df))
)
for col in eachcol(df)[cols]
@assert eltype(col) <: AbstractArray "`df` is not a description DataFrame"
end
function apply_func_2_col(func::Function)
return cat(
[Symbol(names(df)[c] * "_" * string(nameof(func))) =>
[[func(r[:,chunk]) for chunk in 1:size(r, 2)] for r in df[:,c]] for c in cols]...;
dims = 1
)
end
total_cols = length(functions)*length(cols)
order = cat([collect(i:length(cols):total_cols) for i in 1:length(cols)]...; dims = 1)
gen_cols = cat([apply_func_2_col(f) for f in functions]...; dims = 1)
return DataFrame(:VARIABLE => df[:,1], gen_cols[order]...)
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 9513 | # -------------------------------------------------------------
# Dimensional dataset: a simple dataset structure (basically, a hypercube)
using StatsBase
import Base: eltype
import SoleBase: dimensionality, channelsize
_isnan(n::Number) = isnan(n)
_isnan(n::Nothing) = false
hasnans(n::Number) = _isnan(n)
hasnans(a::AbstractArray) = any(_isnan.(a))
############################################################################################
"""
AbstractDimensionalDataset{T<:Number,D} = AbstractVector{<:AbstractArray{T,D}}
A `D`-dimensional dataset is a vector of
(multivariate) `D`-dimensional instances (or samples):
Each instance is an `Array` with size X Γ Y Γ ... Γ nvariables
The dimensionality of the channel is denoted as N = D-1 (e.g. 1 for time series,
2 for images), and its dimensionalities are denoted as X, Y, Z, etc.
Note: It'd be nice to define these with N being the dimensionality of the channel:
e.g. `const AbstractDimensionalDataset{T<:Number,N} = AbstractVector{<:AbstractArray{T,N+1}}`
Unfortunately, this is not currently allowed (see https://github.com/JuliaLang/julia/issues/8322 )
"""
const AbstractDimensionalDataset{T<:Number,D} = AbstractVector{<:AbstractArray{T,D}}
function eachinstance(X::AbstractDimensionalDataset)
X
end
hasnans(d::AbstractDimensionalDataset{<:Union{Nothing,Number}}) = any(hasnans, eachinstance(d))
dimensionality(::Type{<:AbstractDimensionalDataset{T,D}}) where {T<:Number,D} = D-1
dimensionality(d::AbstractDimensionalDataset) = dimensionality(typeof(d))
ninstances(d::AbstractDimensionalDataset{T,D}) where {T<:Number,D} = length(d)
function checknvariables(d::AbstractDimensionalDataset{T,D}) where {T<:Number,D}
if !allequal(map(instance->size(instance, D), eachinstance(d)))
error("Non-uniform nvariables in dimensional dataset:" *
" $(countmap(map(instance->size(instance, D), eachinstance(d))))")
else
true
end
end
nvariables(d::AbstractDimensionalDataset{T,D}) where {T<:Number,D} = size(first(eachinstance(d)), D)
function instances(d::AbstractDimensionalDataset, inds::AbstractVector, return_view::Union{Val{true},Val{false}} = Val(false))
if return_view == Val(true) @views d[inds] else d[inds] end
end
function concatdatasets(ds::AbstractDimensionalDataset{T}...) where {T<:Number}
vcat(ds...)
end
function displaystructure(d::AbstractDimensionalDataset; indent_str = "", include_ninstances = true)
padattribute(l,r) = string(l) * lpad(r,32+length(string(r))-(length(indent_str)+2+length(l)))
pieces = []
push!(pieces, "AbstractDimensionalDataset")
push!(pieces, "$(padattribute("dimensionality:", dimensionality(d)))")
if include_ninstances
push!(pieces, "$(padattribute("# instances:", ninstances(d)))")
end
push!(pieces, "$(padattribute("# variables:", nvariables(d)))")
push!(pieces, "$(padattribute("channelsize countmap:", StatsBase.countmap(map(i_instance->channelsize(d, i_instance), 1:ninstances(d)))))")
push!(pieces, "$(padattribute("maxchannelsize:", maxchannelsize(d)))")
push!(pieces, "$(padattribute("size Γ eltype:", "$(size(d)) Γ $(eltype(d))"))")
return join(pieces, "\n$(indent_str)β ", "\n$(indent_str)β ")
end
# TODO remove one of the two. @ferdiu
instance(d::AbstractDimensionalDataset, idx::Integer) = @views d[idx]
get_instance(args...) = instance(args...)
channelsize(d::AbstractDimensionalDataset, i_instance::Integer) = instance_channelsize(d[i_instance])
maxchannelsize(d::AbstractDimensionalDataset) = maximum(i_instance->channelsize(d, i_instance), 1:ninstances(d))
instance_channel(instance::AbstractArray{T,1}, i_var::Integer) where T = @views instance[ i_var]::T # N=0
instance_channel(instance::AbstractArray{T,2}, i_var::Integer) where T = @views instance[:, i_var]::AbstractArray{T,1} # N=1
instance_channel(instance::AbstractArray{T,3}, i_var::Integer) where T = @views instance[:, :, i_var]::AbstractArray{T,2} # N=2
instance_channelsize(instance::AbstractArray) = size(instance)[1:end-1]
instance_nvariables(instance::AbstractArray) = size(instance, ndims(instance))
############################################################################################
############################################################################################
############################################################################################
# import Tables: subset
# function Tables.subset(X::AbstractDimensionalDataset, inds; viewhint = nothing)
# slicedataset(X, inds; return_view = (isnothing(viewhint) || viewhint == true))
# end
# using MLJBase
# using MLJModelInterface
# import MLJModelInterface: selectrows, _selectrows
# # From MLJModelInferface.jl/src/data_utils.jl
# function MLJModelInterface._selectrows(X::AbstractDimensionalDataset{T,4}, r) where {T<:Number}
# slicedataset(X, inds; return_view = (isnothing(viewhint) || viewhint == true))
# end
# function MLJModelInterface._selectrows(X::AbstractDimensionalDataset{T,5}, r) where {T<:Number}
# slicedataset(X, inds; return_view = (isnothing(viewhint) || viewhint == true))
# end
# function MLJModelInterface.selectrows(::MLJBase.FI, ::Val{:table}, X::AbstractDimensionalDataset, r)
# r = r isa Integer ? (r:r) : r
# return Tables.subset(X, r)
# end
function _check_dataframe(df::AbstractDataFrame)
coltypes = eltype.(eachcol(df))
wrong_coltypes = filter(t->!(t <: Union{Real,AbstractArray}), coltypes)
@assert length(wrong_coltypes) == 0 "Column types not allowed: " *
"$(join(wrong_coltypes, ", "))"
wrong_eltypes = filter(t->!(t <: Real), eltype.(coltypes))
@assert length(wrong_eltypes) == 0 "Column eltypes not allowed: " *
"$(join(wrong_eltypes, ", "))"
common_eltype = Union{eltype.(coltypes)...}
@assert common_eltype <: Real
if !isconcretetype(common_eltype)
@warn "Common variable eltype `$(common_eltype)` is not concrete. " *
"consider converting all values to $(promote_type(eltype.(coltypes)...))."
end
end
# function dimensional2dataframe(X::AbstractDimensionalDataset, colnames = nothing) # colnames = :auto
function dimensional2dataframe(X, colnames = nothing) # colnames = :auto
MultiData.checknvariables(X)
varslices = [begin
map(instance->MultiData.instance_channel(instance, i_variable), eachinstance(X))
end for i_variable in 1:nvariables(X)]
if isnothing(colnames)
colnames = ["V$(i_var)" for i_var in 1:length(varslices)]
end
DataFrame(varslices, colnames)
end
function dataframe2dimensional(
df::AbstractDataFrame;
dry_run::Bool = false,
)
MultiData._check_dataframe(df)
coltypes = eltype.(eachcol(df))
common_eltype = Union{eltype.(coltypes)...}
n_variables = ncol(df)
dataset = [begin
instance = begin
# if !dry_run
cat(collect(row)...; dims=ndims(row[1])+1)
# else
# Array{common_eltype}(undef, __channelsize..., n_variables)
# end
end
instance
end for row in eachrow(df)]
return dataset, names(df)
end
function cube2dataframe(X::AbstractArray, colnames = nothing) # colnames = :auto
varslices = eachslice(X; dims=ndims(X)-1)
if isnothing(colnames)
colnames = ["V$(i_var)" for i_var in 1:length(varslices)]
end
DataFrame(eachslice.(varslices; dims=ndims(X)-1), colnames)
end
function dataframe2cube(
df::AbstractDataFrame;
dry_run::Bool = false,
)
_check_dataframe(df)
coltypes = eltype.(eachcol(df))
common_eltype = Union{eltype.(coltypes)...}
# _channelndims = (x)->ndims(x) # (hasmethod(ndims, (typeof(x),)) ? ndims(x) : missing)
# _channelsize = (x)->size(x) # (hasmethod(size, (typeof(x),)) ? size(x) : missing)
df_ndims = ndims.(df)
percol_channelndimss = [(colname => unique(df_ndims[:,colname])) for colname in names(df)]
wrong_percol_channelndimss = filter(((colname, ndimss),)->length((ndimss)) != 1, percol_channelndimss)
@assert length(wrong_percol_channelndimss) == 0 "All instances should have the same " *
"ndims for each variable. Got ndims's: $(wrong_percol_channelndimss)"
df_size = size.(df)
percol_channelsizess = [(colname => unique(df_size[:,colname])) for colname in names(df)]
wrong_percol_channelsizess = filter(((colname, channelsizess),)->length((channelsizess)) != 1, percol_channelsizess)
@assert length(wrong_percol_channelsizess) == 0 "All instances should have the same " *
"size for each variable. Got sizes: $(wrong_percol_channelsizess)"
channelsizes = first.(last.(percol_channelsizess))
@assert allequal(channelsizes) "All variables should have the same " *
"channel size. Got: $(SoleBase._groupby(channelsizes, names(df))))"
__channelsize = first(channelsizes)
n_variables = ncol(df)
n_instances = nrow(df)
cube = Array{common_eltype}(undef, __channelsize..., n_variables, n_instances)
if !dry_run
for (i_col, colname) in enumerate(eachcol(df))
for (i_row, row) in enumerate(colname)
if ndims(row) == 0
row = first(row)
end
cube[[(:) for i in 1:length(size(row))]...,i_col,i_row] = row
end
end
end
return cube, names(df)
end
cube2dimensional(X::AbstractArray) = dataframe2dimensional(cube2dataframe(X))
dimensional2cube(X) = dataframe2cube(dimensional2dataframe(X))
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 1499 | import SoleBase: dimensionality
# -------------------------------------------------------------
# AbstractMultiDataset - infos
"""
dimensionality(df)
Return the dimensionality of a dataframe `df`.
If the dataframe has variables of various dimensionalities `:mixed` is returned.
If the dataframe is empty (no instances) `:empty` is returned.
This behavior can be controlled by setting the keyword argument `force`:
- `:no` (default): return `:mixed` in case of mixed dimensionality
- `:max`: return the greatest dimensionality
- `:min`: return the lowest dimensionality
"""
function dimensionality(df::AbstractDataFrame; force::Symbol = :no)::Union{Symbol,Integer}
@assert force in [:no, :max, :min] "`force` can be either :no, :max or :min"
if nrow(df) == 0
return :empty
end
dims = [maximum(x -> isa(x, AbstractArray) ? ndims(x) : 0, [inst for inst in c])
for c in eachcol(df)]
if all(y -> y == dims[1], dims)
return dims[1]
elseif force == :max
return max(dims...)
elseif force == :min
return min(dims...)
else
return :mixed
end
end
function dimensionality(md::AbstractMultiDataset, i::Integer; kwargs...)
return dimensionality(modality(md, i); kwargs...)
end
function dimensionality(md::AbstractMultiDataset; kwargs...)
return Tuple([dimensionality(modality; kwargs...) for modality in md])
end
dimensionality(dfc::DF.DataFrameColumns; kwargs...) = dimensionality(DataFrame(dfc); kwargs...)
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 21219 |
# -------------------------------------------------------------
# AbstractMultiDataset - filesystem operations
const _ds_inst_prefix = "Example_"
const _ds_modality_prefix = "Modality_"
const _ds_metadata = "Metadata.txt"
const _ds_labels = "Labels.csv"
DATASET_ENC_NAME = "Dataset" # Name for our enconding TODO choose name
function _parse_dim_tuple(t::AbstractString)
t = strip(t, ['(', ')', '\n', ',', ' '])
div = contains(t, ",") ? "," : " "
return Tuple(parse.(Int64, split(t, div; keepempty = false)))
end
function _read_dataset_metadata(datasetdir::AbstractString)
@assert isfile(joinpath(datasetdir, _ds_metadata)) "Missing $(_ds_metadata) in dataset " *
"$(datasetdir)"
file = open(joinpath(datasetdir, _ds_metadata))
dict = Dict{String,Any}()
for (k, v) in split.(filter(x -> length(x) > 0, strip.(readlines(file))), '=')
k = strip(k)
v = strip(v)
if k == "name"
dict[k] = v
elseif k == "supervised"
dict[k] = parse(Bool, v)
elseif k == "num_modalities" ||
!isnothing(match(r"modality[[:digit:]]+", string(k))) ||
k == "num_classes"
dict[k] = parse(Int64, v)
else
@warn "Unknown key-value pair found in " *
"$(joinpath(datasetdir, _ds_metadata)): $(k)=$(v)"
end
end
if dict["supervised"] && haskey(dict, "num_classes") && dict["num_classes"] < 1
@warn "$(DATASET_ENC_NAME) $(dict["name"]) is marked as `supervised` but has `num_classes` = 0"
end
close(file)
return dict
end
function _read_example_metadata(datasetdir::AbstractString, inst_id::Integer)
@assert isfile(joinpath(
datasetdir,
"$(_ds_inst_prefix)$(string(inst_id))",
_ds_metadata
)) "Missing $(_ds_metadata) in dataset `$(_ds_inst_prefix)$(string(inst_id))` in " *
"`$(datasetdir)`"
file = open(joinpath(datasetdir, "$(_ds_inst_prefix)$(string(inst_id))", _ds_metadata))
dict = Dict{String,Any}()
for (k, v) in split.(filter(x -> length(x) > 0, strip.(readlines(file))), '=')
k = strip(k)
v = strip(v)
if startswith(k, "dim")
dict[k] = _parse_dim_tuple(v)
else
dict[k] = v
end
end
close(file)
return dict
end
function _read_labels(
datasetdir::AbstractString;
shufflelabels::AbstractVector{Symbol} = Symbol[], # TODO: add tests for labels shuffling
rng::Union{<:AbstractRNG,<:Integer} = Random.GLOBAL_RNG
)
@assert isfile(joinpath(datasetdir, _ds_labels)) "Missing $(_ds_labels) in dataset " *
"$(datasetdir)"
df = CSV.read(joinpath(datasetdir, _ds_labels), DataFrame; types = String)
df[!,:id] = parse.(Int64, replace.(df[!,:id], _ds_inst_prefix => ""))
rng = isa(rng, Integer) ? MersenneTwister(rng) : rng
for l in shufflelabels
@assert l in Symbol.(names(df)[2:end]) "`$l` is not a label of the dataset"
df[!,l] = shuffle(rng, df[:,l])
end
return df
end
"""
datasetinfo(datasetpath; onlywithlabels = [], shufflelabels = [], rng = Random.GLOBAL_RNG)
Show dataset size on disk and return a Touple with first element a vector of selected IDs,
second element the labels DataFrame or nothing and third element the total size in bytes.
# Arguments
* `onlywithlabels` is used to select which portion of the $(DATASET_ENC_NAME) to load, by specifying
labels and their values to use as filters. See [`loaddataset`](@ref) for more info.
* `shufflelabels` is an `AbstractVector` of names of labels to shuffle (default = [], means
no shuffle).
* `rng` is a random number generator to be used when shuffling (for reproducibility); can be
either a `Integer` (used as seed for `MersenneTwister`) or an `AbstractRNG`.
"""
function datasetinfo(
datasetpath::AbstractString;
onlywithlabels::AbstractVector{<:AbstractVector{<:Pair{<:AbstractString,<:AbstractVector{<:Any}}}} =
AbstractVector{Pair{AbstractString,AbstractVector{Any}}}[],
kwargs...
)
@assert isdir(datasetpath) "$(DATASET_ENC_NAME) at path $(datasetpath) does not exist"
ds_metadata = _read_dataset_metadata(datasetpath)
if ds_metadata["supervised"] && !isfile(joinpath(datasetpath, _ds_labels))
@warn "$(DATASET_ENC_NAME) $(ds_metadata["name"]) is marked as `supervised` but has no " *
"file `$(_ds_labels)`"
end
function isexdir(name::AbstractString)
return isdir(joinpath(datasetpath, name)) && startswith(name, _ds_inst_prefix)
end
examples_ids = sort!(parse.(Int64, replace.(
filter(isexdir, readdir(datasetpath)),
_ds_inst_prefix => ""
)))
labels = nothing
if ds_metadata["supervised"] ||
(haskey(ds_metadata, "num_classes") && ds_metadata["num_classes"] > 0)
labels = _read_labels(datasetpath; kwargs...)
missing_in_labels = setdiff(labels[:,:id], examples_ids)
there_are_missing = false
if length(missing_in_labels) > 0
there_are_missing = true
@warn "The following examples IDs are present in $(_ds_labels) but there is no " *
"directory for them: $(missing_in_labels)"
end
missing_in_dirs = setdiff(examples_ids, labels[:,:id])
if length(missing_in_dirs) > 0
there_are_missing = true
@warn "The following examples IDs are present on filsystem but are not referenced by " *
"$(_ds_labels): $(missing_in_dirs)"
end
if there_are_missing
examples_ids = sort!(collect(intersect(examples_ids, labels[:,:id])))
@warn "Will be considered only instances with IDs: $(examples_ids)"
end
end
if length(onlywithlabels) > 0
if isnothing(labels)
@warn "A filter was passed but no $(_ds_labels) was found in this dataset: all " *
"instances will be used"
else
# CHECKS
keys_not_found = String[]
labels_cols = names(labels)[2:end]
for i in 1:length(onlywithlabels)
for k in [pair[1] for pair in onlywithlabels[i]]
if !(k in labels_cols)
push!(keys_not_found, k)
end
end
end
if length(keys_not_found) > 0
throw(ErrorException("Key(s) provided as filters not found: " *
"$(unique(keys_not_found)); availabels are $(labels_cols)"))
end
# ACTUAL FILTERING
filtered_ids = Integer[]
for i in 1:length(onlywithlabels)
for filters in [Base.product([Base.product((key,), value)
for (key, value) in onlywithlabels[i]]...)...]
nt = NamedTuple([Symbol(fs[1]) => string(fs[2]) for fs in filters])
grouped_by_keys = groupby(labels, collect(keys(nt)))
if haskey(grouped_by_keys, nt)
push!(filtered_ids, grouped_by_keys[nt][:,1]...)
else
@warn "No example found for combination of labels $(nt): check " *
"if the proper Type was used"
end
end
end
examples_ids = sort(collect(intersect(examples_ids, unique(filtered_ids))))
end
end
totalsize = 0
for id in examples_ids
ex_metadata = _read_example_metadata(datasetpath, id)
# TODO: perform some checks on metadata
for modality in 1:ds_metadata["num_modalities"]
totalsize += filesize(joinpath(
datasetpath,
"$(_ds_inst_prefix)$(string(id))",
"$(_ds_modality_prefix)$(modality).csv"
))
end
end
if !isnothing(labels)
labels = labels[findall(id -> id in examples_ids, labels[:,:id]),:]
end
return examples_ids, labels, totalsize
end
function _load_instance(
datasetpath::AbstractString,
inst_id::Integer;
types::Union{DataType,Nothing} = nothing
)
inst_metadata = _read_example_metadata(datasetpath, inst_id)
instancedir = joinpath(datasetpath, "$(_ds_inst_prefix)$(inst_id)")
type_info = isnothing(types) ? NamedTuple() : (types = types,)
modality_reg = Regex("^$(_ds_modality_prefix)([[:digit:]]+).csv\$")
function ismodalityfile(path::AbstractString)
return isfile(joinpath(instancedir, path)) && !isnothing(match(modality_reg, path))
end
function unlinearize_var(p::Pair{Symbol,<:Any}, dims::Tuple)
return p[1] => unlinearize_data(p[2], dims)
end
function unlinearize_modality(ps::AbstractVector{<:Pair{Symbol,<:Any}}, dims::Tuple)
return [unlinearize_var(p, dims) for p in ps]
end
function load_modality(path::AbstractString, dims::Tuple)
return OrderedDict(unlinearize_modality(
collect(CSV.read(path, pairs; type_info...)),
dims
))
end
modalities = filter(ismodalityfile, readdir(instancedir))
modalities_num = sort!([match(modality_reg, f).captures[1] for f in modalities])
result = Vector{OrderedDict}(undef, length(modalities_num))
# TODO: address problem with Threads.@threads
# (`@threads :static` cannot be used concurrently or nested)
for (i, f) in collect(enumerate(modalities_num))
result[i] = load_modality(
joinpath(instancedir, "$(_ds_modality_prefix)$(f).csv"),
inst_metadata[string("dim_modality_", i)]
)
end
return result
end
"""
loaddataset(datasetpath; onlywithlabels = [], shufflelabels = [], rng = Random.GLOBAL_RNG)
Create a `MultiDataset` or a `LabeledMultiDataset`
from a $(DATASET_ENC_NAME), based on the
presence of file Labels.csv.
# Arguments
* `datasetpath` is an `AbstractString` that denote the $(DATASET_ENC_NAME)'s position;
* `onlywithlabels` is an AbstractVector{AbstractVector{Pair{AbstractString,AbstractVector{Any}}}}
and it's used to select which portion of the $(DATASET_ENC_NAME) to load, by specifying labels and
their values.
Beginning from the center, each Pair{AbstractString,AbstractVector{Any}} must contain,
as AbstractString the label's name, and, as AbstractVector{Any} the values for that label.
Each Pair in one vector must refer to a different label, so if the $(DATASET_ENC_NAME) has in total
n labels, this vector of Pair can contain maximun n element. That's because the elements
will combine with each other.
Every vector of Pair act as a filter.
Note that the same label can be used in different vector of Pair as they do not combine
with each other.
If `onlywithlabels` is an empty vector (default) the function will load the entire
$(DATASET_ENC_NAME).
* `shufflelabels` is an `AbstractVector` of names of labels to shuffle (default = [], means
no shuffle).
* `rng` is a random number generator to be used when shuffling (for reproducibility); can be
either a Integer (used as seed for `MersenneTwister`) or an `AbstractRNG`.
# Examples
```julia-repl
julia> df_data = DataFrame(
:id => [1, 2, 3, 4, 5],
:age => [30, 9, 30, 40, 9],
:name => ["Python", "Julia", "C", "Java", "R"],
:stat => [deepcopy(ts_sin), deepcopy(ts_cos), deepcopy(ts_sin), deepcopy(ts_cos), deepcopy(ts_sin)]
)
5Γ4 DataFrame
Row β id age name stat
β Int64 Int64 String Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β 1 30 Python [0.841471, 0.909297, 0.14112, -0β¦
2 β 2 9 Julia [0.540302, -0.416147, -0.989992,β¦
3 β 3 30 C [0.841471, 0.909297, 0.14112, -0β¦
4 β 4 40 Java [0.540302, -0.416147, -0.989992,β¦
5 β 5 9 R [0.841471, 0.909297, 0.14112, -0β¦
julia> lmd = LabeledMultiDataset(
MultiDataset([[4]], deepcopy(df_data)),
[2,3],
)
β LabeledMultiDataset
ββ labels
β ββ age: Set([9, 30, 40])
β ββ name: Set(["C", "Julia", "Python", "Java", "R"])
ββ dimensionalities: (1,)
- Modality 1 / 1
ββ dimensionality: 1
5Γ1 SubDataFrame
Row β stat
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
3 β [0.841471, 0.909297, 0.14112, -0β¦
4 β [0.540302, -0.416147, -0.989992,β¦
5 β [0.841471, 0.909297, 0.14112, -0β¦
- Spare variables
ββ dimensionality: 0
5Γ1 SubDataFrame
Row β id
β Int64
ββββββΌβββββββ
1 β 1
2 β 2
3 β 3
4 β 4
5 β 5
julia> savedataset("langs", lmd, force = true)
julia> loaddataset("langs", onlywithlabels = [ ["name" => ["Julia"], "age" => ["9"]] ] )
Instances count: 1
Total size: 981670 bytes
β LabeledMultiDataset
ββ labels
β ββ age: Set(["9"])
β ββ name: Set(["Julia"])
ββ dimensionalities: (1,)
- Modality 1 / 1
ββ dimensionality: 1
1Γ1 SubDataFrame
Row β stat
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.540302, -0.416147, -0.989992,β¦
- Spare variables
ββ dimensionality: 0
1Γ1 SubDataFrame
Row β id
β Int64
ββββββΌβββββββ
1 β 2
julia> loaddataset("langs", onlywithlabels = [ ["name" => ["Julia"], "age" => ["30"]] ] )
Instances count: 0
Total size: 0 bytes
ERROR: AssertionError: No instance found
julia> loaddataset("langs", onlywithlabels = [ ["name" => ["Julia"]] , ["age" => ["9"]] ] )
Instances count: 2
Total size: 1963537 bytes
β LabeledMultiDataset
ββ labels
β ββ age: Set(["9"])
β ββ name: Set(["Julia", "R"])
ββ dimensionalities: (1,)
- Modality 1 / 1
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.540302, -0.416147, -0.989992,β¦
2 β [0.841471, 0.909297, 0.14112, -0β¦
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β id
β Int64
ββββββΌβββββββ
1 β 2
2 β 5
julia> loaddataset("langs", onlywithlabels = [ ["name" => ["Julia"]], ["name" => ["C"], "age" => ["30"]] ] )
Instances count: 2
Total size: 1963537 bytes
β LabeledMultiDataset
ββ labels
β ββ age: Set(["9", "30"])
β ββ name: Set(["C", "Julia"])
ββ dimensionalities: (1,)
- Modality 1 / 1
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.540302, -0.416147, -0.989992,β¦
2 β [0.841471, 0.909297, 0.14112, -0β¦
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β id
β Int64
ββββββΌβββββββ
1 β 2
2 β 3
```
"""
function loaddataset(
datasetpath::AbstractString;
types::Union{DataType,Nothing} = nothing,
kwargs...
)
selected_ids, labels, datasetsize = datasetinfo(datasetpath; kwargs...)
@assert length(selected_ids) > 0 "No instance found"
instance_modalities = _load_instance(datasetpath, selected_ids[1]; types = types)
modalities_cols = [Symbol.(var_name) for var_name in keys.(instance_modalities)]
df = DataFrame(
:id => [selected_ids[1]],
[Symbol(k) => [v] for modality in instance_modalities for (k, v) in modality]...;
makeunique = true
)
for id in selected_ids[2:end]
curr_row = Any[id]
for (i, modality) in enumerate(_load_instance(datasetpath, id; types = types))
for var_name in modalities_cols[i]
push!(curr_row, modality[var_name])
end
end
push!(df, curr_row)
end
grouped_variables = Vector{Integer}[]
df_names = Symbol.(names(df))
for modality in modalities_cols
push!(grouped_variables, [findfirst(x -> x == k, df_names) for k in modality])
end
md = MultiDataset(df, grouped_variables)
if !isnothing(labels)
orig_length = nvariables(md)
for l in names(labels)[2:end]
insertvariables!(md, Symbol(l), labels[:,l])
end
return LabeledMultiDataset(md, collect((orig_length+1):nvariables(md)))
else
return md
end
end
"""
savedataset(datasetpath, md; instance_ids, name, force = false)
Save `md` AbstractMultiDataset on disk at path `datasetpath` in the following format:
datasetpath
ββ Example_1
β ββ Modality_1.csv
β ββ Modality_2.csv
β ββ ...
β ββ Modality_n.csv
β ββ Metadata.txt
ββ Example_2
β ββ Modality_1.csv
β ββ Modality_2.csv
β ββ ...
β ββ Modality_n.csv
β ββ Metadata.txt
ββ ...
ββ Example_n
ββ Metadata.txt
ββ Labels.csv
# Arguments
* `instance_ids` is an `AbstractVector{Integer}` that denote the identifier of the instances,
* `name` is an `AbstractString` and denote the name of the $(DATASET_ENC_NAME), that will be saved in the
Metadata of the $(DATASET_ENC_NAME),
* `force` is a `Bool`, if it's set to `true`, then in case `datasetpath` already exists, it will
be overwritten otherwise the operation will be aborted. (default = `false`)
* `labels_indices` is an `AbstractVector{Integer}` and contains the indices of the labels'
column (allowed only when passing a MultiDataset)
Alternatively to an `AbstractMultiDataset`, a `DataFrame`
can be passed as second argument.
If this is the case a third positional argument is required representing the
`grouped_variables` of the dataset. See [`MultiDataset`](@ref) for syntax of
`grouped_variables`.
"""
function savedataset(
datasetpath::AbstractString, md::AbstractMultiDataset;
kwargs...
)
return savedataset(
datasetpath, data(md),
grouped_variables(md);
kwargs...
)
end
function savedataset(
datasetpath::AbstractString, lmd::LabeledMultiDataset;
kwargs...
)
return savedataset(
datasetpath, unlabeleddataset(lmd);
labels_indices = labeling_variables(lmd),
kwargs...
)
end
function savedataset(
datasetpath::AbstractString,
df::AbstractDataFrame,
grouped_variables::AbstractVector{<:AbstractVector{<:Integer}} = [collect(1:ncol(df))];
instance_ids::AbstractVector{<:Integer} = 1:nrow(df),
labels_indices::AbstractVector{<:Integer} = Int[],
name::AbstractString = basename(replace(datasetpath, r"/$" => "")),
force::Bool = false
)
@assert force || !isdir(datasetpath) "Directory $(datasetpath) already present: set " *
"`force` to `true` to overwrite existing dataset"
@assert length(instance_ids) == nrow(df) "Mismatching `length(instance_ids)` " *
"($(length(instance_ids))) and `nrow(df)` ($(nrow(df)))"
mkpath(datasetpath)
# NOTE: maybe this can be done in `savedataset` accepting a labeled modal dataset
df_labels = nothing
if length(labels_indices) > 0
df_labels = DataFrame(
:id => [string(_ds_inst_prefix, i) for i in instance_ids],
[l => df[:,l] for l in Symbol.(names(df)[labels_indices])]...
)
end
for (i_inst, (id, inst)) in enumerate(zip(instance_ids, eachrow(df)))
inst_metadata_path = joinpath(datasetpath, string(_ds_inst_prefix, id), _ds_metadata)
curr_inst_path = mkpath(dirname(inst_metadata_path))
inst_metadata_file = open(inst_metadata_path, "w+")
for (i_modality, curr_modality_indices) in enumerate(grouped_variables)
curr_modality_inst = inst[curr_modality_indices]
# TODO: maybe assert all instances have same size or fill with missing
println(inst_metadata_file,
"dim_modality_", i_modality, "=", size(first(curr_modality_inst))
)
CSV.write(
joinpath(curr_inst_path, string(_ds_modality_prefix, i_modality, ".csv")),
DataFrame(
[a => linearize_data(curr_modality_inst[a])
for a in Symbol.(names(curr_modality_inst))]
)
)
end
# NOTE: this is not part of the `Data Input Format` specification pdf and it is a
# duplicated info from Labels.csv
if !isnothing(df_labels)
example_labels = select(df_labels, Not("id"))[i_inst,:]
for col in 1:length(names(example_labels))
println(inst_metadata_file,
names(example_labels)[col], "=",
string(select(df_labels, Not("id"))[i_inst, col])
)
end
end
close(inst_metadata_file)
end
ds_metadata_file = open(joinpath(datasetpath, _ds_metadata), "w+")
println(ds_metadata_file, "name=", name)
if !isnothing(df_labels)
CSV.write(joinpath(datasetpath, _ds_labels), df_labels)
println(ds_metadata_file, "supervised=true")
println(ds_metadata_file, "num_classes=", (ncol(df_labels)-1))
else
println(ds_metadata_file, "supervised=false")
end
println(ds_metadata_file, "num_modalities=", length(grouped_variables))
for (i_modality, curr_modality_indices) in enumerate(grouped_variables)
println(ds_metadata_file, "modality", i_modality, "=", dimensionality(df[:,curr_modality_indices]))
end
close(ds_metadata_file)
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 4958 |
# -------------------------------------------------------------
# AbstractMultiDataset - instances manipulation
"""
ninstances(md)
Return the number of instances in a multimodal dataset.
# Examples
```julia-repl
julia> md = MultiDataset([[1],[2]],DataFrame(:age => [25, 26], :sex => ['M', 'F']))
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 25
2 β 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
julia> mod2 = modality(md, 2)
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
julia> ninstances(md) == ninstances(mod2) == 2
true
```
"""
ninstances(df::AbstractDataFrame) = nrow(df)
ninstances(md::AbstractMultiDataset) = nrow(data(md))
# ninstances(md::AbstractMultiDataset, i::Integer) = nrow(modality(md, i))
"""
pushinstances!(md, instance)
Add an instance to a multimodal dataset, and return the dataset itself.
The instance can be a `DataFrameRow` or an `AbstractVector` but in both cases the number and
type of variables should match those of the dataset.
"""
function pushinstances!(md::AbstractMultiDataset, instance::DataFrameRow)
@assert length(instance) == nvariables(md) "Mismatching number of variables " *
"between dataset ($(nvariables(md))) and instance ($(length(instance)))"
push!(data(md), instance)
return md
end
function pushinstances!(md::AbstractMultiDataset, instance::AbstractVector)
@assert length(instance) == nvariables(md) "Mismatching number of variables " *
"between dataset ($(nvariables(md))) and instance ($(length(instance)))"
push!(data(md), instance)
return md
end
function pushinstances!(md::AbstractMultiDataset, instances::AbstractDataFrame)
for inst in eachrow(instances)
pushinstances!(md, inst)
end
return md
end
"""
deleteinstances!(md, i)
Remove the `i`-th instance in a multimodal dataset, and return the dataset itself.
deleteinstances!(md, i_instances)
Remove the instances at `i_instances` in a multimodal dataset, and return the dataset itself.
"""
function deleteinstances!(md::AbstractMultiDataset, i_instances::AbstractVector{<:Integer})
for i in i_instances
@assert 1 β€ i β€ ninstances(md) "Index $(i) no in range 1:ninstances " *
"(1:$(ninstances(md)))"
end
deleteat!(data(md), unique(i_instances))
return md
end
deleteinstances!(md::AbstractMultiDataset, i::Integer) = deleteinstances!(md, [i])
"""
keeponlyinstances!(md, i_instances)
Remove all instances from a multimodal dataset, which index does not appear in `i_instances`.
"""
function keeponlyinstances!(
md::AbstractMultiDataset,
i_instances::AbstractVector{<:Integer}
)
return deleteinstances!(md, setdiff(collect(1:ninstances(md)), i_instances))
end
"""
instance(md, i)
Return the `i`-th instance in a multimodal dataset.
instance(md, i_modality, i_instance)
Return the `i_instance`-th instance in a multimodal dataset with only variables from the
the `i_modality`-th modality.
instance(md, i_instances)
Return instances at `i_instances` in a multimodal dataset.
instance(md, i_modality, i_instances)
Return i_instances at `i_instances` in a multimodal dataset with only variables from the
the `i_modality`-th modality.
"""
function instance(df::AbstractDataFrame, i::Integer)
@assert 1 β€ i β€ ninstances(df) "Index ($i) must be a valid instance number " *
"(1:$(ninstances(md))"
return @view df[i,:]
end
function instance(md::AbstractMultiDataset, i::Integer)
@assert 1 β€ i β€ ninstances(md) "Index ($i) must be a valid instance number " *
"(1:$(ninstances(md))"
return instance(data(md), i)
end
function instance(md::AbstractMultiDataset, i_modality::Integer, i_instance::Integer)
@assert 1 β€ i_modality β€ nmodalities(md) "Index ($i_modality) must be a valid " *
"modality number (1:$(nmodalities(md))"
return instance(modality(md, i_modality), i_instance)
end
function instance(df::AbstractDataFrame, i_instances::AbstractVector{<:Integer})
for i in i_instances
@assert 1 β€ i β€ ninstances(df) "Index ($i) must be a valid instance number " *
"(1:$(ninstances(md))"
end
return @view df[i_instances,:]
end
function instance(md::AbstractMultiDataset, i_instances::AbstractVector{<:Integer})
return instance(data(md), i_instances)
end
function instance(
md::AbstractMultiDataset,
i_modality::Integer,
i_instances::AbstractVector{<:Integer}
)
@assert 1 β€ i_modality β€ nmodalities(md) "Index ($i_modality) must be a valid " *
"modality number (1:$(nmodalities(md))"
return instance(modality(md, i_modality), i_instances)
end
function eachinstance(md::AbstractMultiDataset)
df = data(md)
Iterators.map(i->(@view md[i,:]), 1:ninstances(md))
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 2127 | using ScientificTypes
function ScientificTypes.schema(md::AbstractMultiDataset, i::Integer; kwargs...)
ScientificTypes.schema(modality(md, i); kwargs...)
end
using Tables
Tables.istable(X::AbstractMultiDataset) = true
Tables.rowaccess(X::AbstractMultiDataset) = true
function Tables.rows(X::AbstractMultiDataset)
eachinstance(X)
end
function Tables.subset(X::AbstractMultiDataset, inds; viewhint = nothing)
slicedataset(X, inds; return_view = (isnothing(viewhint) || viewhint == true))
end
function _columntruenames(row::Tuple{AbstractMultiDataset,Integer})
multilogiset, i_row = row
return [(i_mod, i_feature) for i_mod in 1:nmodalities(multilogiset) for i_feature in Tables.columnnames((modality(multilogiset, i_mod), i_row),)]
end
function Tables.getcolumn(row::Tuple{AbstractMultiDataset,Integer}, i::Int)
multilogiset, i_row = row
(i_mod, i_feature) = _columntruenames(row)[i] # Ugly and not optimal. Perhaps AbstractMultiDataset should have an index attached to speed this up
m = modality(multilogiset, i_mod)
feats, featchs = Tables.getcolumn((m, i_row), i_feature)
featchs
end
function Tables.columnnames(row::Tuple{AbstractMultiDataset,Integer})
# [(i_mod, i_feature) for i_mod in 1:nmodalities(multilogiset) for i_feature in Tables.columnnames((modality(multilogiset, i_mod), i_row),)]
1:length(_columntruenames(row))
end
using MLJModelInterface: Table
import MLJModelInterface: selectrows, nrows
function nrows(X::AbstractMultiDataset)
length(Tables.rows(X))
end
function selectrows(X::AbstractMultiDataset, r)
r = r isa Integer ? (r:r) : r
return Tables.subset(X, r)
end
# function scitype(X::AbstractMultiDataset)
# Table{
# if featvaltype(X) <: AbstractFloat
# scitype(1.0)
# elseif featvaltype(X) <: Integer
# scitype(1)
# elseif featvaltype(X) <: Bool
# scitype(true)
# else
# @warn "Unexpected featvaltype: $(featvaltype(X)). SoleModels may need adjustments."
# typejoin(scitype(1.0), scitype(1), scitype(true))
# end
# }
# end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 2922 |
# -------------------------------------------------------------
# AbstractMultiDataset - iterable interface
getindex(md::AbstractMultiDataset, i::Integer) = modality(md, i)
function getindex(md::AbstractMultiDataset, indices::AbstractVector{<:Integer})
return [modality(md, i) for i in indices]
end
length(md::AbstractMultiDataset) = length(grouped_variables(md))
ndims(md::AbstractMultiDataset) = length(md)
isempty(md::AbstractMultiDataset) = length(md) == 0
firstindex(md::AbstractMultiDataset) = 1
lastindex(md::AbstractMultiDataset) = length(md)
eltype(::Type{AbstractMultiDataset}) = SubDataFrame
eltype(::AbstractMultiDataset) = SubDataFrame
Base.@propagate_inbounds function iterate(md::AbstractMultiDataset, i::Integer = 1)
(i β€ 0 || i > length(md)) && return nothing
return (@inbounds modality(md, i), i+1)
end
function getindex(
md::AbstractMultiDataset,
i::Colon,
j::Colon
)
return deepcopy(md)
end
# Slice on instances and modalities/variables
function getindex(
md::AbstractMultiDataset,
i::Union{Integer,AbstractVector{<:Integer},Tuple{<:Integer}},
j::Union{Integer,AbstractVector{<:Integer},Tuple{<:Integer}},
)
i = vec(collect(i))
j = vec(collect(j))
return getindex(getindex(md, i, :), :, j)
# return error("MultiDataset currently does not allow simultaneous slicing on " *
# * "instances and modalities/variables.")
end
# Slice on modalities/variables
function getindex(
md::AbstractMultiDataset,
::Colon,
j::Union{Integer,AbstractVector{<:Integer},Tuple{<:Integer}},
)
j = vec(collect(j))
return keeponlymodalities!(deepcopy(md), j)
end
# Slice on instances
function getindex(
md::AbstractMultiDataset,
i::Union{Integer,AbstractVector{<:Integer},Tuple{<:Integer}},
::Colon
)
i = vec(collect(i))
return slicedataset(md, i; return_view = false)
end
function getindex(
md::AbstractMultiDataset,
i::Union{Colon,<:Integer,<:AbstractVector{<:Integer},<:Tuple{<:Integer}},
j::typeof(!),
)
# NOTE: typeof(!) is left-out to avoid problems but consider adding it
return error("MultiDataset currently does not allow in-place operations.")
end
function getindex(
md::AbstractMultiDataset,
i::typeof(!),
j::Union{Colon,<:Integer,<:AbstractVector{<:Integer},<:Tuple{<:Integer}},
)
# NOTE: typeof(!) is left-out to avoid problems but consider adding it
return error("MultiDataset currently does not allow in-place operations.")
end
# # TODO: consider adding interfaces to access the underlying AbstractDataFrame
# function getindex(
# md::AbstractMultiDataset,
# i::Union{Integer,AbstractVector{<:Integer},Tuple{<:Integer}},
# j::Union{
# <:Integer,
# <:AbstractVector{<:Integer},
# <:Tuple{<:Integer},
# Symbol,
# <:AbstractVector{Symbol}
# },
# )
# return keeponlyvariables!(deepcopy(md), j)
# end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 5979 |
# -------------------------------------------------------------
# LabeledMultiDataset - utils
"""
nlabelingvariables(lmd)
Return the number of labeling variables of a labeled multimodal dataset.
"""
function nlabelingvariables(lmd::AbstractLabeledMultiDataset)
return length(labeling_variables(lmd))
end
"""
labels(lmd, i_instance)
labels(lmd)
Return the labels of instance at index `i_instance` in a labeled multimodal dataset.
A dictionary of type `labelname => value` is returned.
If only the first argument is passed then the labels for all instances are returned.
"""
function labels(lmd::AbstractLabeledMultiDataset)
return Symbol.(names(data(lmd)))[labeling_variables(lmd)]
end
function labels(lmd::AbstractLabeledMultiDataset, i_instance::Integer)
return Dict{Symbol,Any}([var => data(lmd)[i_instance,var] for var in labels(lmd)]...)
end
"""
label(lmd, j, i)
Return the value of the `i`-th labeling variable for instance
at index `i_instance` in a labeled multimodal dataset.
"""
function label(
lmd::AbstractLabeledMultiDataset,
i_instance::Integer,
i::Integer
)
return labels(lmd, i_instance)[
variables(data(lmd))[labeling_variables(lmd)[i]]
]
end
"""
labeldomain(lmd, i)
Return the domain of `i`-th label of a labeled multimodal dataset.
"""
function labeldomain(lmd::AbstractLabeledMultiDataset, i::Integer)
@assert 1 β€ i β€ nlabelingvariables(lmd) "Index ($i) must be a valid label number " *
"(1:$(nlabelingvariables(lmd)))"
if eltype(ScientificTypes.scitype(data(lmd)[:,labeling_variables(lmd)[i]])) <: Continuous
return extrema(data(lmd)[:,labeling_variables(lmd)[i]])
else
return Set(data(lmd)[:,labeling_variables(lmd)[i]])
end
end
"""
setaslabeling!(lmd, i)
setaslabeling!(lmd, var_name)
Set `i`-th variable as label.
The variable name can be passed as second argument instead of its index.
"""
function setaslabeling!(lmd::AbstractLabeledMultiDataset, i::Integer)
@assert 1 β€ i β€ nvariables(lmd) "Index ($i) must be a valid variable number " *
"(1:$(nvariables(lmd)))"
@assert !(i in labeling_variables(lmd)) "Variable at index $(i) is already a label."
push!(labeling_variables(lmd), i)
return lmd
end
function setaslabeling!(lmd::AbstractLabeledMultiDataset, var_name::Symbol)
@assert hasvariables(lmd, var_name) "LabeldMultiDataset does not contain " *
"variable $(var_name)"
return setaslabeling!(lmd, _name2index(lmd, var_name))
end
"""
unsetaslabeling!(lmd, i)
unsetaslabeling!(lmd, var_name)
Remove `i`-th labeling variable from labels list.
The variable name can be passed as second argument instead of its index.
"""
function unsetaslabeling!(lmd::AbstractLabeledMultiDataset, i::Integer)
@assert 1 β€ i β€ nvariables(lmd) "Index ($i) must be a valid variable number " *
"(1:$(nvariables(lmd)))"
@assert i in labeling_variables(lmd) "Variable at index $(i) is not a label."
deleteat!(labeling_variables(lmd), indexin(i, labeling_variables(lmd))[1])
return lmd
end
function unsetaslabeling!(lmd::AbstractLabeledMultiDataset, var_name::Symbol)
@assert hasvariables(lmd, var_name) "LabeledMultiDataset does not contain " *
"variable $(var_name)"
return unsetaslabeling!(lmd, _name2index(lmd, var_name))
end
"""
joinlabels!(lmd, [lbls...]; delim = "_")
On a labeled multimodal dataset, collapse the labeling variables identified by `lbls`
into a single labeling variable of type `String`, by means of a `join` that uses `delim`
for string delimiter.
If not specified differently this function will join all labels.
`lbls` can be an `Integer` indicating the index of the label, or a `Symbol`
indicating the name of the labeling variable.
# !!! note
# The resulting labels will always be of type `String`.
!!! note
The resulting labeling variable will always be added as last column in the underlying `DataFrame`.
# Examples
```julia-repl
julia> lmd = LabeledMultiDataset(
MultiDataset(
[[2],[4]],
DataFrame(
:id => [1, 2],
:age => [30, 9],
:name => ["Python", "Julia"],
:stat => [[sin(i) for i in 1:50000], [cos(i) for i in 1:50000]]
)
),
[1, 3],
)
β LabeledMultiDataset
ββ labels
β ββ id: Set([2, 1])
β ββ name: Set(["Julia", "Python"])
ββ dimensionalities: (0, 1)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 30
2 β 9
- Modality 2 / 2
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
julia> joinlabels!(lmd)
β LabeledMultiDataset
ββ labels
β ββ id_name: Set(["1_Python", "2_Julia"])
ββ dimensionalities: (0, 1)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 30
2 β 9
- Modality 2 / 2
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
```
"""
function joinlabels!(
lmd::AbstractLabeledMultiDataset,
lbls::Symbol... = labels(lmd)...;
delim::Union{<:AbstractString,<:AbstractChar} = '_'
)
for l in lbls
unsetaslabeling!(lmd, l)
end
new_col_name = Symbol(join(lbls, delim))
new_vals = [join(data(lmd)[i,collect(lbls)], delim) for i in 1:ninstances(lmd)]
dropvariables!(lmd, collect(lbls))
insertvariables!(lmd, new_col_name, new_vals)
setaslabeling!(lmd, nvariables(lmd))
return lmd
end
function joinlabels!(
lmd::AbstractLabeledMultiDataset,
labels::Integer...;
kwargs...
)
return joinlabels!(lmd, labels(lmd)[[labels...]]; kwargs...)
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 30514 |
# -------------------------------------------------------------
# AbstractMultiDataset - modalities
"""
modality(md, i)
Return the `i`-th modality of a multimodal dataset.
modality(md, indices)
Return a `Vector` of modalities at `indices` of a multimodal dataset.
"""
function modality(md::AbstractMultiDataset, i::Integer)
@assert 1 β€ i β€ nmodalities(md) "Index ($i) must be a valid modality number " *
"(1:$(nmodalities(md)))"
return @view data(md)[:,grouped_variables(md)[i]]
end
function modality(md::AbstractMultiDataset, indices::AbstractVector{<:Integer})
return [modality(md, i) for i in indices]
end
"""
eachmodality(md)
Return a (lazy) iterator of the modalities of a multimodal dataset.
"""
function eachmodality(md::AbstractMultiDataset)
df = data(md)
Iterators.map(group->(@view df[:,group]), grouped_variables(md))
end
"""
nmodalities(md)
Return the number of modalities of a multimodal dataset.
"""
nmodalities(md::AbstractMultiDataset) = length(grouped_variables(md))
"""
addmodality!(md, indices)
addmodality!(md, index)
addmodality!(md, variable_names)
addmodality!(md, variable_name)
Create a new modality in a multimodal dataset using variables at `indices`
or `index`, and return the dataset itself.
Alternatively to the `indices` and the `index`, the variable name(s) can be used.
Note: to add a new modality with new variables see [`insertmodality!`](@ref).
# Arguments
* `md` is a `MultiDataset`;
* `indices` is an `AbstractVector{Integer}` that indicates which indices of the multimodal
dataset's corresponding dataframe to add to the new modality;
* `index` is an `Integer` that indicates the index of the multimodal dataset's corresponding
dataframe to add to the new modality;
* `variable_names` is an `AbstractVector{Symbol}` that indicates which variables of the
multimodal dataset's corresponding dataframe to add to the new modality;
* `variable_name` is a `Symbol` that indicates the variable of the multimodal dataset's
corresponding dataframe to add to the new modality;
# Examples
```julia-repl
julia> df = DataFrame(:name => ["Python", "Julia"], :age => [25, 26], :sex => ['M', 'F'], :height => [180, 175], :weight => [80, 60])
2Γ5 DataFrame
Row β name age sex height weight
β String Int64 Char Int64 Int64
ββββββΌβββββββββββββββββββββββββββββββββββββ
1 β Python 25 M 180 80
2 β Julia 26 F 175 60
julia> md = MultiDataset([[1]], df)
β MultiDataset
ββ dimensionalities: (0,)
- Modality 1 / 1
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
- Spare variables
ββ dimensionality: 0
2Γ4 SubDataFrame
Row β age sex height weight
β Int64 Char Int64 Int64
ββββββΌβββββββββββββββββββββββββββββ
1 β 25 M 180 80
2 β 26 F 175 60
julia> addmodality!(md, [:age, :sex])
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
- Modality 2 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β age sex
β Int64 Char
ββββββΌβββββββββββββ
1 β 25 M
2 β 26 F
- Spare variables
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β height weight
β Int64 Int64
ββββββΌββββββββββββββββ
1 β 180 80
2 β 175 60
julia> addmodality!(md, 5)
β MultiDataset
ββ dimensionalities: (0, 0, 0)
- Modality 1 / 3
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
- Modality 2 / 3
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β age sex
β Int64 Char
ββββββΌβββββββββββββ
1 β 25 M
2 β 26 F
- Modality 3 / 3
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β weight
β Int64
ββββββΌββββββββ
1 β 80
2 β 60
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β height
β Int64
ββββββΌββββββββ
1 β 180
2 β 175
```
"""
function addmodality!(md::AbstractMultiDataset, indices::AbstractVector{<:Integer})
@assert length(indices) > 0 "Cannot add an empty modality to dataset"
for i in indices
@assert i in 1:nvariables(md) "Index $(i) is out of range 1:nvariables " *
"(1:$(nvariables(md)))"
end
push!(grouped_variables(md), indices)
return md
end
addmodality!(md::AbstractMultiDataset, index::Integer) = addmodality!(md, [index])
function addmodality!(md::AbstractMultiDataset, variable_names::AbstractVector{Symbol})
for var_name in variable_names
@assert hasvariables(md, var_name) "MultiDataset does not contain " *
"variable $(var_name)"
end
return addmodality!(md, _name2index(md, variable_names))
end
function addmodality!(md::AbstractMultiDataset, variable_name::Symbol)
return addmodality!(md, [variable_name])
end
"""
removemodality!(md, indices)
removemodality!(md, index)
Remove `i`-th modality from a multimodal dataset, and return the dataset.
Note: to completely remove a modality and all variables in it use [`dropmodalities!`](@ref)
instead.
# Arguments
* `md` is a `MultiDataset`;
* `index` is an `Integer` that indicates which modality to remove from the multimodal dataset;
* `indices` is an `AbstractVector{Integer}` that indicates the modalities to remove from the
multimodal dataset;
# Examples
```julia-repl
julia> df = DataFrame(:name => ["Python", "Julia"],
:age => [25, 26],
:sex => ['M', 'F'],
:height => [180, 175],
:weight => [80, 60])
)
2Γ5 DataFrame
Row β name age sex height weight
β String Int64 Char Int64 Int64
ββββββΌβββββββββββββββββββββββββββββββββββββ
1 β Python 25 M 180 80
2 β Julia 26 F 175 60
julia> md = MultiDataset([[1, 2],[3],[4],[5]], df)
β MultiDataset
ββ dimensionalities: (0, 0, 0, 0)
- Modality 1 / 4
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 4
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
- Modality 3 / 4
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β height
β Int64
ββββββΌββββββββ
1 β 180
2 β 175
- Modality 4 / 4
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β weight
β Int64
ββββββΌββββββββ
1 β 80
2 β 60
julia> removemodality!(md, [3])
β MultiDataset
ββ dimensionalities: (0, 0, 0)
- Modality 1 / 3
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 3
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
- Modality 3 / 3
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β weight
β Int64
ββββββΌββββββββ
1 β 80
2 β 60
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β height
β Int64
ββββββΌββββββββ
1 β 180
2 β 175
julia> removemodality!(md, [1,2])
β MultiDataset
ββ dimensionalities: (0,)
- Modality 1 / 1
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β weight
β Int64
ββββββΌββββββββ
1 β 80
2 β 60
- Spare variables
ββ dimensionality: 0
2Γ4 SubDataFrame
Row β name age sex height
β String Int64 Char Int64
ββββββΌβββββββββββββββββββββββββββββ
1 β Python 25 M 180
2 β Julia 26 F 175
```
"""
function removemodality!(md::AbstractMultiDataset, i::Integer)
@assert 1 β€ i β€ nmodalities(md) "Index $(i) does not correspond to a modality " *
"(1:$(nmodalities(md)))"
deleteat!(grouped_variables(md), i)
return md
end
function removemodality!(md::AbstractMultiDataset, indices::AbstractVector{Integer})
for i in sort(unique(indices))
removemodality!(md, i)
end
return md
end
"""
addvariable_tomodality!(md, i_modality, var_index)
addvariable_tomodality!(md, i_modality, var_indices)
addvariable_tomodality!(md, i_modality, var_name)
addvariable_tomodality!(md, i_modality, var_names)
Add variable at index `var_index` to the modality at index `i_modality` in a
multimodal dataset, and return the dataset.
Alternatively to `var_index` the variable name can be used.
Multiple variables can be inserted into the multimodal dataset at once using `var_indices`
or `var_inames`.
Note: The function does not allow you to add a variable to a new modality, but only to add it
to an existing modality. To add a new modality use [`addmodality!`](@ref) instead.
# Arguments
* `md` is a `MultiDataset`;
* `i_modality` is an `Integer` indicating the modality in which the variable(s)
will be added;
* `var_index` is an `Integer` that indicates the index of the variable to add to a specific
modality of the multimodal dataset;
* `var_indices` is an `AbstractVector{Integer}` indicating the indices of the variables
to add to a specific modality of the multimodal dataset;
* `var_name` is a `Symbol` indicating the name of the variable to add to a specific
modality of the multimodal dataset;
* `var_names` is an `AbstractVector{Symbol}` indicating the name of the variables to
add to a specific modality of the multimodal dataset;
# Examples
```julia-repl
julia> df = DataFrame(:name => ["Python", "Julia"],
:age => [25, 26],
:sex => ['M', 'F'],
:height => [180, 175],
:weight => [80, 60])
)
2Γ5 DataFrame
Row β name age sex height weight
β String Int64 Char Int64 Int64
ββββββΌβββββββββββββββββββββββββββββββββββββ
1 β Python 25 M 180 80
2 β Julia 26 F 175 60
julia> md = MultiDataset([[1, 2],[3]], df)
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
- Spare variables
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β height weight
β Int64 Int64
ββββββΌββββββββββββββββ
1 β 180 80
2 β 175 60
julia> addvariable_tomodality!(md, 1, [4,5])
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ4 SubDataFrame
Row β name age height weight
β String Int64 Int64 Int64
ββββββΌβββββββββββββββββββββββββββββββ
1 β Python 25 180 80
2 β Julia 26 175 60
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
julia> addvariable_tomodality!(md, 2, [:name,:weight])
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ4 SubDataFrame
Row β name age height weight
β String Int64 Int64 Int64
ββββββΌβββββββββββββββββββββββββββββββ
1 β Python 25 180 80
2 β Julia 26 175 60
- Modality 2 / 2
ββ dimensionality: 0
2Γ3 SubDataFrame
Row β sex name weight
β Char String Int64
ββββββΌββββββββββββββββββββββ
1 β M Python 80
2 β F Julia 60
```
"""
function addvariable_tomodality!(
md::AbstractMultiDataset, i_modality::Integer, var_index::Integer
)
@assert 1 β€ i_modality β€ nmodalities(md) "Index $(i_modality) does not correspond " *
"to a modality (1:$(nmodalities(md)))"
@assert 1 β€ var_index β€ nvariables(md) "Index $(var_index) does not correspond " *
"to a variable (1:$(nvariables(md)))"
if var_index in grouped_variables(md)[i_modality]
@info "Variable $(var_index) is already part of modality $(i_modality)"
else
push!(grouped_variables(md)[i_modality], var_index)
end
return md
end
function addvariable_tomodality!(
md::AbstractMultiDataset, i_modality::Integer, var_indices::AbstractVector{<:Integer}
)
for var_index in var_indices
addvariable_tomodality!(md, i_modality, var_index)
end
return md
end
function addvariable_tomodality!(
md::AbstractMultiDataset, i_modality::Integer, var_name::Symbol
)
@assert hasvariables(md, var_name) "MultiDataset does not contain variable " *
"$(var_name)"
return addvariable_tomodality!(md, i_modality, _name2index(md, var_name))
end
function addvariable_tomodality!(
md::AbstractMultiDataset, i_modality::Integer, var_names::AbstractVector{Symbol}
)
for var_name in var_names
addvariable_tomodality!(md, i_modality, var_name)
end
return md
end
"""
removevariable_frommodality!(md, i_modality, var_indices)
removevariable_frommodality!(md, i_modality, var_index)
removevariable_frommodality!(md, i_modality, var_name)
removevariable_frommodality!(md, i_modality, var_names)
Remove variable at index `var_index` from the modality at index `i_modality` in a
multimodal dataset, and return the dataset itself.
Alternatively to `var_index` the variable name can be used.
Multiple variables can be dropped from the multimodal dataset at once,
by passing a `Vector` of
`Symbols` (for names), or a `Vector` of integers (for indices) as a last argument.
Note: when all variables are dropped from a modality, it will be removed.
# Arguments
* `md` is a `MultiDataset`;
* `i_modality` is an `Integer` indicating the modality in which the variable(s)
will be dropped;
* `var_index` is an `Integer` that indicates the index of the variable to drop from a
specific modality of the multimodal dataset;
* `var_indices` is an `AbstractVector{Integer}` indicating the indices of the variables
to drop from a specific modality of the multimodal dataset;
* `var_name` is a `Symbol` indicating the name of the variable to drop from a specific
modality of the multimodal dataset;
* `var_names` is an `AbstractVector{Symbol}` indicating the name of the variables to
drop from a specific modality of the multimodal dataset;
# Examples
```julia-repl
julia> df = DataFrame(:name => ["Python", "Julia"],
:age => [25, 26],
:sex => ['M', 'F'],
:height => [180, 175],
:weight => [80, 60])
)
2Γ5 DataFrame
Row β name age sex height weight
β String Int64 Char Int64 Int64
ββββββΌβββββββββββββββββββββββββββββββββββββ
1 β Python 25 M 180 80
2 β Julia 26 F 175 60
julia> md = MultiDataset([[1,2,4],[2,3,4],[5]], df)
β MultiDataset
ββ dimensionalities: (0, 0, 0)
- Modality 1 / 3
ββ dimensionality: 0
2Γ3 SubDataFrame
Row β name age height
β String Int64 Int64
ββββββΌβββββββββββββββββββββββ
1 β Python 25 180
2 β Julia 26 175
- Modality 2 / 3
ββ dimensionality: 0
2Γ3 SubDataFrame
Row β age sex height
β Int64 Char Int64
ββββββΌβββββββββββββββββββββ
1 β 25 M 180
2 β 26 F 175
- Modality 3 / 3
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β weight
β Int64
ββββββΌββββββββ
1 β 80
2 β 60
julia> removevariable_frommodality!(md, 3, 5)
[ Info: Variable 5 was last variable of modality 3: removing modality
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ3 SubDataFrame
Row β name age height
β String Int64 Int64
ββββββΌβββββββββββββββββββββββ
1 β Python 25 180
2 β Julia 26 175
- Modality 2 / 2
ββ dimensionality: 0
2Γ3 SubDataFrame
Row β age sex height
β Int64 Char Int64
ββββββΌβββββββββββββββββββββ
1 β 25 M 180
2 β 26 F 175
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β weight
β Int64
ββββββΌββββββββ
1 β 80
2 β 60
julia> removevariable_frommodality!(md, 1, :age)
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name height
β String Int64
ββββββΌββββββββββββββββ
1 β Python 180
2 β Julia 175
- Modality 2 / 2
ββ dimensionality: 0
2Γ3 SubDataFrame
Row β age sex height
β Int64 Char Int64
ββββββΌβββββββββββββββββββββ
1 β 25 M 180
2 β 26 F 175
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β weight
β Int64
ββββββΌββββββββ
1 β 80
2 β 60
julia> removevariable_frommodality!(md, 2, [3,4])
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name height
β String Int64
ββββββΌββββββββββββββββ
1 β Python 180
2 β Julia 175
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 25
2 β 26
- Spare variables
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β sex weight
β Char Int64
ββββββΌββββββββββββββ
1 β M 80
2 β F 60
julia> removevariable_frommodality!(md, 1, [:name,:height])
[ Info: Variable 4 was last variable of modality 1: removing modality
β MultiDataset
ββ dimensionalities: (0,)
- Modality 1 / 1
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 25
2 β 26
- Spare variables
ββ dimensionality: 0
2Γ4 SubDataFrame
Row β name sex height weight
β String Char Int64 Int64
ββββββΌββββββββββββββββββββββββββββββ
1 β Python M 180 80
2 β Julia F 175 60
```
"""
function removevariable_frommodality!(
md::AbstractMultiDataset,
i_modality::Integer,
var_index::Integer;
silent = false,
)
@assert 1 β€ i_modality β€ nmodalities(md) "Index $(i_modality) does not correspond " *
"to a modality (1:$(nmodalities(md)))"
@assert 1 β€ var_index β€ nvariables(md) "Index $(var_index) does not correspond " *
"to a variable (1:$(nvariables(md)))"
if !(var_index in grouped_variables(md)[i_modality])
if !silent
@info "Variable $(var_index) is not part of modality $(i_modality)"
end
elseif nvariables(md, i_modality) == 1
if !silent
@info "Variable $(var_index) was last variable of modality $(i_modality): " *
"removing modality"
end
removemodality!(md, i_modality)
else
deleteat!(
grouped_variables(md)[i_modality],
indexin(var_index, grouped_variables(md)[i_modality])[1]
)
end
return md
end
function removevariable_frommodality!(
md::AbstractMultiDataset,
i_modality::Integer,
var_indices::AbstractVector{<:Integer};
kwargs...
)
for i in var_indices
removevariable_frommodality!(md, i_modality, i; kwargs...)
end
return md
end
function removevariable_frommodality!(
md::AbstractMultiDataset,
i_modality::Integer,
var_name::Symbol;
kwargs...
)
@assert hasvariables(md, var_name) "MultiDataset does not contain variable " *
"$(var_name)"
return removevariable_frommodality!(md, i_modality, _name2index(md, var_name); kwargs...)
end
function removevariable_frommodality!(
md::AbstractMultiDataset,
i_modality::Integer,
var_names::AbstractVector{Symbol};
kwargs...
)
for var_name in var_names
removevariable_frommodality!(md, i_modality, var_name; kwargs...)
end
return md
end
"""
insertmodality!(md, col, new_modality, existing_variables)
insertmodality!(md, new_modality, existing_variables)
Insert `new_modality` as new modality to multimodal dataset, and return the dataset.
Existing variables can be added to the new modality while adding it to the dataset
by passing
the corresponding indices as `existing_variables`.
If `col` is specified then the variables will be inserted starting at index `col`.
# Arguments
* `md` is a `MultiDataset`;
* `col` is an `Integer` indicating the column in which to insert the columns of
`new_modality`;
* `new_modality` is an `AbstractDataFrame` which will be added to the multimodal dataset as a
sub-dataframe of a new modality;
* `existing_variables` is an `AbstractVector{Integer}` or `AbstractVector{Symbol}`. It
indicates which variables of the multimodal dataset internal dataframe structure
to insert in the new modality.
# Examples
```julia-repl
julia> df = DataFrame(
:name => ["Python", "Julia"],
:stat1 => [[sin(i) for i in 1:50000], [cos(i) for i in 1:50000]]
)
2Γ2 DataFrame
Row β name stat1
β String Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββββββββββ
1 β Python [0.841471, 0.909297, 0.14112, -0β¦
2 β Julia [0.540302, -0.416147, -0.989992,β¦
julia> md = MultiDataset(df; group = :all)
β MultiDataset
ββ dimensionalities: (0, 1)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
- Modality 2 / 2
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat1
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
julia> insertmodality!(md, DataFrame(:age => [30, 9]))
β MultiDataset
ββ dimensionalities: (0, 1, 0)
- Modality 1 / 3
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
- Modality 2 / 3
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat1
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
- Modality 3 / 3
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 30
2 β 9
julia> md.data
2Γ3 DataFrame
Row β name stat1 age
β String Arrayβ¦ Int64
ββββββΌββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β Python [0.841471, 0.909297, 0.14112, -0β¦ 30
2 β Julia [0.540302, -0.416147, -0.989992,β¦ 9
```
or, selecting the column
```julia-repl
julia> df = DataFrame(
:name => ["Python", "Julia"],
:stat1 => [[sin(i) for i in 1:50000], [cos(i) for i in 1:50000]]
)
2Γ2 DataFrame
Row β name stat1
β String Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββββββββββ
1 β Python [0.841471, 0.909297, 0.14112, -0β¦
2 β Julia [0.540302, -0.416147, -0.989992,β¦
julia> md = MultiDataset(df; group = :all)
β MultiDataset
ββ dimensionalities: (0, 1)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
- Modality 2 / 2
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat1
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
julia> insertmodality!(md, 2, DataFrame(:age => [30, 9]))
β MultiDataset
ββ dimensionalities: (1, 0)
- Modality 1 / 2
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat1
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 30
2 β 9
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
julia> md.data
2Γ3 DataFrame
Row β name age stat1
β String Int64 Arrayβ¦
ββββββΌββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β Python 30 [0.841471, 0.909297, 0.14112, -0β¦
2 β Julia 9 [0.540302, -0.416147, -0.989992,β¦
```
or, adding an existing variable:
```julia-repl
julia> df = DataFrame(
:name => ["Python", "Julia"],
:stat1 => [[sin(i) for i in 1:50000], [cos(i) for i in 1:50000]]
)
2Γ2 DataFrame
Row β name stat1
β String Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββββββββββ
1 β Python [0.841471, 0.909297, 0.14112, -0β¦
2 β Julia [0.540302, -0.416147, -0.989992,β¦
julia> md = MultiDataset([[2]], df)
β MultiDataset
ββ dimensionalities: (1,)
- Modality 1 / 1
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat1
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
julia> insertmodality!(md, DataFrame(:age => [30, 9]); existing_variables = [1])
β MultiDataset
ββ dimensionalities: (1, 0)
- Modality 1 / 2
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat1
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
- Modality 2 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β age name
β Int64 String
ββββββΌβββββββββββββββ
1 β 30 Python
2 β 9 Julia
```
"""
function insertmodality!(
md::AbstractMultiDataset,
col::Integer,
new_modality::AbstractDataFrame,
existing_variables::AbstractVector{<:Integer} = Integer[]
)
if col != nvariables(md)+1
new_indices = col:col+ncol(new_modality)-1
for (k, c) in collect(zip(keys(eachcol(new_modality)), collect(eachcol(new_modality))))
insertvariables!(md, col, k, c)
col = col + 1
end
else
new_indices = (nvariables(md)+1):(nvariables(md)+ncol(new_modality))
for (k, c) in collect(zip(keys(eachcol(new_modality)), collect(eachcol(new_modality))))
insertvariables!(md, k, c)
end
end
addmodality!(md, new_indices)
for i in existing_variables
addvariable_tomodality!(md, nmodalities(md), i)
end
return md
end
function insertmodality!(
md::AbstractMultiDataset,
col::Integer,
new_modality::AbstractDataFrame,
existing_variables::AbstractVector{Symbol}
)
for var_name in existing_variables
@assert hasvariables(md, var_name) "MultiDataset does not contain " *
"variable $(var_name)"
end
return insertmodality!(md, col, new_modality, _name2index(md, existing_variables))
end
function insertmodality!(
md::AbstractMultiDataset,
new_modality::AbstractDataFrame,
existing_variables::AbstractVector{<:Integer} = Integer[]
)
insertmodality!(md, nvariables(md)+1, new_modality, existing_variables)
end
function insertmodality!(
md::AbstractMultiDataset,
new_modality::AbstractDataFrame,
existing_variables::AbstractVector{Symbol}
)
for var_name in existing_variables
@assert hasvariables(md, var_name) "MultiDataset does not contain " *
"variable $(var_name)"
end
return insertmodality!(md, nvariables(md)+1, new_modality, _name2index(md, existing_variables))
end
"""
dropmodalities!(md, indices)
dropmodalities!(md, index)
Remove the `i`-th modality from a multimodal dataset while dropping all variables in it,
and return the dataset itself.
Note: if the dropped variables are contained in other modalities
they will also be removed from
them. This can lead to the removal of additional modalities other than the `i`-th.
If the intention is to remove a modality without dropping the variables use
[`removemodality!`](@ref) instead.
# Arguments
* `md` is a `MultiDataset`;
* `index` is an `Integer` indicating the index of the modality to drop;
* `indices` is an `AbstractVector{Integer}` indicating the indices of the modalities to drop.
# Examples
```julia-repl
julia> df = DataFrame(:name => ["Python", "Julia"], :age => [25, 26], :sex => ['M', 'F'], :height => [180, 175], :weight => [80, 60])
2Γ5 DataFrame
Row β name age sex height weight
β String Int64 Char Int64 Int64
ββββββΌβββββββββββββββββββββββββββββββββββββ
1 β Python 25 M 180 80
2 β Julia 26 F 175 60
julia> md = MultiDataset([[1, 2],[3,4],[5],[2,3]], df)
β MultiDataset
ββ dimensionalities: (0, 0, 0, 0)
- Modality 1 / 4
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 4
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β sex height
β Char Int64
ββββββΌββββββββββββββ
1 β M 180
2 β F 175
- Modality 3 / 4
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β weight
β Int64
ββββββΌββββββββ
1 β 80
2 β 60
- Modality 4 / 4
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β age sex
β Int64 Char
ββββββΌβββββββββββββ
1 β 25 M
2 β 26 F
julia> dropmodalities!(md, [2,3])
[ Info: Variable 3 was last variable of modality 2: removing modality
[ Info: Variable 3 was last variable of modality 2: removing modality
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 25
2 β 26
julia> dropmodalities!(md, 2)
[ Info: Variable 2 was last variable of modality 2: removing modality
β MultiDataset
ββ dimensionalities: (0,)
- Modality 1 / 1
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
```
"""
function dropmodalities!(md::AbstractMultiDataset, index::Integer)
@assert 1 β€ index β€ nmodalities(md) "Index $(index) does not correspond to a modality " *
"(1:$(nmodalities(md)))"
return dropvariables!(md, grouped_variables(md)[index]; silent = true)
end
function dropmodalities!(md::AbstractMultiDataset, indices::AbstractVector{<:Integer})
for i in indices
@assert 1 β€ i β€ nmodalities(md) "Index $(i) does not correspond to a modality " *
"(1:$(nmodalities(md)))"
end
return dropvariables!(md, sort!(
unique(vcat(grouped_variables(md)[indices]...)); rev = true
); silent = true)
end
"""
TODO
"""
function keeponlymodalities!(
md::AbstractMultiDataset,
indices::AbstractVector{<:Integer}
)
return dropmodalities!(md, setdiff(collect(1:nmodalities(md)), indices))
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 1695 |
# -------------------------------------------------------------
# AbstractMultiDataset - set operations
function in(instance::DataFrameRow, md::AbstractMultiDataset)
return instance in eachrow(data(md))
end
function in(instance::AbstractVector, md::AbstractMultiDataset)
if nvariables(md) != length(instance)
return false
end
dfr = eachrow(DataFrame([var_name => instance[i]
for (i, var_name) in Symbol.(names(data(md)))]))[1]
return dfr in eachrow(data(md))
end
function issubset(instances::AbstractDataFrame, md::AbstractMultiDataset)
for dfr in eachrow(instances)
if !(dfr in md)
return false
end
end
return true
end
function issubset(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
return md1 β md2 && data(md1) β md2
end
function setdiff(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
# TODO: implement setdiff
throw(Exception("Not implemented"))
end
function setdiff!(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
# TODO: implement setdiff!
throw(Exception("Not implemented"))
end
function intersect(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
# TODO: implement intersect
throw(Exception("Not implemented"))
end
function intersect!(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
# TODO: implement intersect!
throw(Exception("Not implemented"))
end
function union(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
# TODO: implement union
throw(Exception("Not implemented"))
end
function union!(md1::AbstractMultiDataset, md2::AbstractMultiDataset)
# TODO: implement union!
throw(Exception("Not implemented"))
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 11826 |
const __note_about_utils = "
!!! note
It is important to consider that this function is intended for internal use only.
It assumes that any check is performed prior its call (e.g., check if the index of an
variable is valid or not).
"
# -------------------------------------------------------------
# AbstractMultiDataset - utils
"""
_empty(md)
Return a copy of a multimodal dataset with no instances.
Note: since the returned AbstractMultiDataset will be empty its columns types will be
`Any`.
$(__note_about_utils)
"""
function _empty(md::AbstractMultiDataset)
@warn "This method for `_empty` is extremely not efficent especially for " *
"large datasets: consider providing a custom method for _empty(::$(typeof(md)))."
return _empty!(deepcopy(md))
end
"""
_empty!(md)
Remove all instances from a multimodal dataset.
Note: since the AbstractMultiDataset will be empty its columns types will become of
type `Any`.
$(__note_about_utils)
"""
function _empty!(md::AbstractMultiDataset)
return removeinstances!(md, 1:nisnstances(md))
end
"""
_same_variables(md1, md2)
Determine whether two `AbstractMultiDataset`s have the same variables.
$(__note_about_utils)
"""
function _same_variables(
md1::AbstractMultiDataset,
md2::AbstractMultiDataset
)
return isequal(
Dict{Symbol,DataType}(Symbol.(names(data(md1))) .=> eltype.(eachcol(data(md1)))),
Dict{Symbol,DataType}(Symbol.(names(data(md2))) .=> eltype.(eachcol(data(md2))))
)
end
"""
_same_dataset(md1, md2)
Determine whether two `AbstractMultiDataset`s have the same underlying
`AbstractDataFrame`, regardless of column order.
Note: the check will be performed against the instances too; if the intent is to just check
the presence of the same variables use [`_same_variables`](@ref) instead.
$(__note_about_utils)
"""
function _same_dataset(
md1::AbstractMultiDataset,
md2::AbstractMultiDataset
)
if !_same_variables(md1, md2) || ninstances(md1) != ninstances(md2)
return false
end
md1_vars = Symbol.(names(data(md1)))
md2_vars = Symbol.(names(data(md2)))
unmixed_indices = [findfirst(x -> isequal(x, name), md2_vars) for name in md1_vars]
return data(md1) == data(md2)[:,unmixed_indices]
end
"""
_same_grouped_variables(md1, md2)
Determine whether two `AbstractMultiDataset`s have the same modalities regardless of
column order.
Note: the check will be performed against the instances too; if the intent is to just check
the presence of the same variables use [`_same_variables`](@ref) instead.
$(__note_about_utils)
"""
function _same_grouped_variables(
md1::AbstractMultiDataset,
md2::AbstractMultiDataset
)
if !_same_variables(md1, md2)
return false
end
if nmodalities(md1) != nmodalities(md2) ||
[nvariables(f) for f in md1] != [nvariables(f) for f in md2]
return false
end
md1_vars = Symbol.(names(data(md1)))
md2_vars = Symbol.(names(data(md2)))
unmixed_indices = [findfirst(x -> isequal(x, name), md2_vars) for name in md1_vars]
for i in 1:nmodalities(md1)
if grouped_variables(md1)[i] != Integer[unmixed_indices[j]
for j in grouped_variables(md2)[i]]
return false
end
end
return data(md1) == data(md2)[:,unmixed_indices]
end
"""
_same_labeling_variables(md1, md2)
Determine whether two `AbstractMultiDataset`s have the same labels regardless of
column order.
Note: the check will be performed against the instances too; if the intent is to just check
the presence of the same variables use [`_same_label_names`](@ref) instead.
$(__note_about_utils)
"""
function _same_labeling_variables(
md1::AbstractMultiDataset,
md2::AbstractMultiDataset
)
return true
end
function _same_labeling_variables(
lmd1::AbstractLabeledMultiDataset,
lmd2::AbstractLabeledMultiDataset
)
!_same_label_names(lmd1, lmd2) && return false;
lmd1_lbls = labels(lmd1)
lmd2_lbls = labels(lmd2) # TODO fix?
unmixed_indices = [findfirst(x -> isequal(x, name), Symbol.(names(data(lmd2))))
for name in lmd1_lbls]
if any(isnothing.(unmixed_indices))
return false
else
return data(lmd1)[:,lmd1_lbls] == data(lmd2)[:,unmixed_indices]
end
end
"""
_same_label_names(md1, md2)
Determine whether two `AbstractMultiDataset`s have the same label names regardless of
column order.
Note: the check will not be performed against the instances; if the intent is to check
whether the two datasets have the same labels use [`_same_labeling_variables`](@ref) instead.
$(__note_about_utils)
"""
function _same_label_names(
md1::AbstractMultiDataset,
md2::AbstractMultiDataset
)
return true
end
function _same_label_names(
lmd1::AbstractLabeledMultiDataset,
lmd2::AbstractLabeledMultiDataset
)
return Set(labels(lmd1)) == Set(labels(lmd2))
end
"""
_same_instances(md1, md2)
Determine whether two `AbstractMultiDataset`s have the same instances,
regardless of their order.
$(__note_about_utils)
"""
function _same_instances(
md1::AbstractMultiDataset,
md2::AbstractMultiDataset
)
if !_same_variables(md1, md2) || ninstances(md1) != ninstances(md2)
return false
end
return md1 β md2 && md2 β md1
end
"""
_same_md(md1, md2)
Determine whether two `AbstractMultiDataset`s have the same underlying
`AbstractDataFrame` and modalities,
regardless of the column order of their `AbstractDataFrames`.
Note: the check will be performed against the instances too; if the intent is to just check
the presence of the same variables use [`_same_variables`](@ref) instead.
$(__note_about_utils)
"""
function _same_md(
md1::AbstractMultiDataset,
md2::AbstractMultiDataset
)
if !_same_variables(md1, md2) || ninstances(md1) != ninstances(md2)
return false
end
if nmodalities(md1) != nmodalities(md2) ||
[nvariables(f) for f in md1] != [nvariables(f) for f in md2]
return false
end
md1_vars = Symbol.(names(data(md1)))
md2_vars = Symbol.(names(data(md2)))
unmixed_indices = [findfirst(x -> isequal(x, name), md2_vars) for name in md1_vars]
if data(md1) != data(md2)[:,unmixed_indices]
return false
end
for i in 1:nmodalities(md1)
if grouped_variables(md1)[i] != Integer[unmixed_indices[j]
for j in grouped_variables(md2)[i]]
return false
end
end
return true
end
"""
_name2index(df, variable_name)
Return the index of the variable named `variable_name`.
If the variable does not exist `0` is returned.
_name2index(df, variable_names)
Return the indices of the variables named `variable_names`.
$(__note_about_utils)
"""
function _name2index(df::AbstractDataFrame, variable_name::Symbol)
return columnindex(df, variable_name)
end
function _name2index(md::AbstractMultiDataset, variable_name::Symbol)
return columnindex(data(md), variable_name)
end
function _name2index(df::AbstractDataFrame, variable_names::AbstractVector{Symbol})
return [_name2index(df, var_name) for var_name in variable_names]
end
function _name2index(
md::AbstractMultiDataset,
variable_names::AbstractVector{Symbol}
)
return [_name2index(md, var_name) for var_name in variable_names]
end
"""
_is_variable_in_modalities(md, i)
Check if `i`-th variable is used in any modality or not.
Alternatively to the index the `variable_name` can be passed as second argument.
$(__note_about_utils)
"""
function _is_variable_in_modalities(md::AbstractMultiDataset, i::Integer)
return i in cat(grouped_variables(md)...; dims = 1)
end
function _is_variable_in_modalities(md::AbstractMultiDataset, variable_name::Symbol)
return _is_variable_in_modalities(md, _name2index(md, variable_name))
end
function _prettyprint_header(io::IO, md::AbstractMultiDataset)
println(io, "β $(typeof(md))")
println(io, " ββ dimensionalities: $(dimensionality(md))")
end
function _prettyprint_modalities(io::IO, md::AbstractMultiDataset)
for (i, modality) in enumerate(md)
println(io, "- Modality $(i) / $(nmodalities(md))")
println(io, " ββ dimensionality: $(dimensionality(modality))")
println(io, modality)
end
end
function _prettyprint_sparevariables(io::IO, md::AbstractMultiDataset)
spare_vars = sparevariables(md)
if length(spare_vars) > 0
spare_df = @view data(md)[:,spare_vars]
println(io, "- Spare variables")
println(io, " ββ dimensionality: $(dimensionality(spare_df))")
println(io, spare_df)
end
end
function _prettyprint_domain(set::AbstractSet)
vec = collect(set)
result = "{ "
for i in 1:length(vec)
result *= string(vec[i])
if i != length(vec)
result *= ","
end
result *= " "
end
result *= "}"
end
_prettyprint_domain(dom::Tuple) = "($(dom[1]) - $(dom[end]))"
function _prettyprint_labels(io::IO, lmd::AbstractMultiDataset)
println(io, " ββ labels")
if nlabelingvariables(lmd) > 0
lbls = labels(lmd)
for i in 1:(length(lbls)-1)
println(io, " β ββ $(lbls[i]): " *
"$(labeldomain(lmd, i))")
end
println(io, " β ββ $(lbls[end]): " *
"$(labeldomain(lmd, length(lbls)))")
else
println(io, " β ββ no label selected")
end
println(io, " ββ dimensionalities: $(dimensionality(lmd))")
end
"""
paa(x; f = identity, t = (1, 0, 0))
Piecewise Aggregate Approximation
Apply `f` function to each dimensionality of `x` array divinding it in `t[1]` windows taking
`t[2]` extra points left and `t[3]` extra points right.
Note: first window will always consider `t[2] = 0` and last one will always consider
`t[3] = 0`.
"""
function paa(
x::AbstractArray{T};
f::Function = identity,
t::AbstractVector{<:NTuple{3,Integer}} = [(1, 0, 0)]
) where {T<:Real}
@assert ndims(x) == length(t) "Mismatching dims $(ndims(x)) != $(length(t)): " *
"length(t) has to be equal to ndims(x)"
N = length(x)
n_chunks = t[1][1]
@assert 1 β€ n_chunks && n_chunks β€ N "The number of chunks must be in [1,$(N)]"
@assert 0 β€ t[1][2] β€ floor(N/n_chunks) && 0 β€ t[1][3] β€ floor(N/n_chunks)
z = Array{Float64}(undef, n_chunks)
# TODO Float64? solve this? any better ideas?
Threads.@threads for i in collect(1:n_chunks)
l = ceil(Int, (N*(i-1)/n_chunks) + 1)
h = ceil(Int, N*i/n_chunks)
if i == 1
h = h + t[1][3]
elseif i == n_chunks
l = l - t[1][2]
else
h = h + t[1][3]
l = l - t[1][2]
end
z[i] = f(x[l:h])
end
return z
end
"""
linearize_data(d)
Linearize dimensional object `d`.
"""
linearize_data(d::Any) = d
linearize_data(d::AbstractVector) = d
linearize_data(d::AbstractMatrix) = reshape(m', 1, :)[:]
function linearize_data(d::AbstractArray)
return throw(ErrorExcpetion("Still cannot linearize data of dimensionality > 2"))
end
# TODO: more linearizations
"""
unlinearize_data(d, dims)
Unlinearize a vector `d` to a shape `dims`.
"""
unlinearize_data(d::Any, dims::Tuple{}) = d
function unlinearize_data(d::AbstractVector, dims::Tuple{})
return length(d) β€ 1 ? d[1] : collect(d)
end
function unlinearize_data(d::AbstractVector, dims::NTuple{1,<:Integer})
return collect(d)
end
function unlinearize_data(d::AbstractVector, dims::NTuple{2,<:Integer})
return collect(reshape(d, dims)')
end
function unlinearize_data(d::AbstractVector, dims::NTuple{N,<:Integer}) where {N<:Integer}
# TODO: implement generic way to unlinearize data
throw(ErrorException("Unlinearization of data to $(dims) still not implemented"))
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 25155 |
# -------------------------------------------------------------
# Variables manipulation
"""
nvariables(md)
nvariables(md, i)
Return the number of variables in a multimodal dataset.
If an index `i` is passed as second argument, then the number of variables
of the `i`-th modality is returned.
Alternatively, `nvariables` can be called on a single modality.
# Arguments
* `md` is a `MultiDataset`;
* `i` (optional) is an `Integer` indicating the modality of the multimodal dataset whose
number of variables you want to know.
# Examples
```julia-repl
julia> md = MultiDataset([[1],[2]], DataFrame(:age => [25, 26], :sex => ['M', 'F']))
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 25
2 β 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
julia> nvariables(md)
2
julia> nvariables(md, 2)
1
julia> mod2 = modality(md, 2)
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
julia> nvariables(mod2)
1
julia> md = MultiDataset([[1, 2],[3, 4, 5]], DataFrame(:name => ["Python", "Julia"], :age => [25, 26], :sex => ['M', 'F'], :height => [180, 175], :weight => [80, 60]))
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ3 SubDataFrame
Row β sex height weight
β Char Int64 Int64
ββββββΌββββββββββββββββββββββ
1 β M 180 80
2 β F 175 60
julia> nvariables(md)
5
julia> nvariables(md, 2)
3
julia> mod2 = modality(md,2)
2Γ3 SubDataFrame
Row β sex height weight
β Char Int64 Int64
ββββββΌββββββββββββββββββββββ
1 β M 180 80
2 β F 175 60
julia> nvariables(mod2)
3
```
"""
nvariables(df::AbstractDataFrame) = ncol(df)
nvariables(md::AbstractMultiDataset) = nvariables(data(md))
function nvariables(md::AbstractMultiDataset, i::Integer)
@assert 1 β€ i β€ nmodalities(md) "Index ($i) must be a valid modality number " *
"(1:$(nmodalities(md)))"
return nvariables(modality(md, i))
end
"""
insertvariables!(md, col, index, values)
insertvariables!(md, index, values)
insertvariables!(md, col, index, value)
insertvariables!(md, index, value)
Insert a variable in a multimodal dataset with a given index.
!!! note
Each inserted variable will be added in as a spare variables.
# Arguments
* `md` is an `AbstractMultiDataset`;
* `col` is an `Integer` indicating in which position to insert the new variable.
If no col is passed, the new variable will be placed
last in the md's underlying dataframe structure;
* `index` is a `Symbol` and denote the name of the variable to insert.
Duplicated variable names will be renamed to avoid conflicts: see `makeunique` argument
for [insertcols!](https://dataframes.juliadata.org/stable/lib/functions/#DataFrames.insertcols!)
in DataFrames documentation;
* `values` is an `AbstractVector` that indicates the values for the newly
inserted variable. The length of `values` should match `ninstances(md)`;
* `value` is a single value for the new variable. If a single `value` is passed as a last
argument this will be copied and used for each instance in the dataset.
# Examples
```julia-repl
julia> md = MultiDataset([[1, 2],[3]], DataFrame(:name => ["Python", "Julia"], :age => [25, 26], :sex => ['M', 'F']))
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
julia> insertvariables!(md, :weight, [80, 75])
2Γ4 DataFrame
Row β name age sex weight
β String Int64 Char Int64
ββββββΌβββββββββββββββββββββββββββββ
1 β Python 25 M 80
2 β Julia 26 F 75
julia> md
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β weight
β Int64
ββββββΌββββββββ
1 β 80
2 β 75
julia> insertvariables!(md, 2, :height, 180)
2Γ5 DataFrame
Row β name height age sex weight
β String Int64 Int64 Char Int64
ββββββΌβββββββββββββββββββββββββββββββββββββ
1 β Python 180 25 M 80
2 β Julia 180 26 F 75
julia> insertvariables!(md, :hair, ["brown", "blonde"])
2Γ6 DataFrame
Row β name height age sex weight hair
β String Int64 Int64 Char Int64 String
ββββββΌβββββββββββββββββββββββββββββββββββββββββββββ
1 β Python 180 25 M 80 brown
2 β Julia 180 26 F 75 blonde
julia> md
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
- Spare variables
ββ dimensionality: 0
2Γ3 SubDataFrame
Row β height weight hair
β Int64 Int64 String
ββββββΌββββββββββββββββββββββββ
1 β 180 80 brown
2 β 180 75 blonde
```
"""
function insertvariables!(
md::AbstractMultiDataset,
col::Integer,
index::Symbol,
values::AbstractVector
)
@assert length(values) == ninstances(md) "value not specified for each instance " *
"{length(values) != ninstances(md)}:{$(length(values)) != $(ninstances(md))}"
if col != nvariables(md)+1
insertcols!(data(md), col, index => values, makeunique = true)
for (i_modality, desc) in enumerate(grouped_variables(md))
for (i_var, var) in enumerate(desc)
if var >= col
grouped_variables(md)[i_modality][i_var] = var + 1
end
end
end
return md
else
insertcols!(data(md), col, index => values, makeunique = true)
end
return md
end
function insertvariables!(
md::AbstractMultiDataset,
index::Symbol,
values::AbstractVector
)
return insertvariables!(md, nvariables(md)+1, index, values)
end
function insertvariables!(
md::AbstractMultiDataset,
col::Integer,
index::Symbol,
value
)
return insertvariables!(md, col, index, [deepcopy(value) for i in 1:ninstances(md)])
end
function insertvariables!(md::AbstractMultiDataset, index::Symbol, value)
return insertvariables!(md, nvariables(md)+1, index, value)
end
"""
hasvariables(df, variable_name)
hasvariables(md, i_modality, variable_name)
hasvariables(md, variable_name)
hasvariables(df, variable_names)
hasvariables(md, i_modality, variable_names)
hasvariables(md, variable_names)
Check whether a multimodal dataset contains a variable named `variable_name`.
Instead of a single variable name a `Vector` of names can be passed. If this is the case,
this function will return `true` only if `md` contains all the specified variables.
# Arguments
* `df` is an `AbstractDataFrame`, which is one of the two structure in which you want to check
the presence of the variable;
* `md` is an `AbstractMultiDataset`, which is one of the two structure in which you want
to check the presence of the variable;
* `variable_name` is a `Symbol` indicating the variable, whose existence I want to
verify;
* `i_modality` is an `Integer` indicating in which modality to look for the variable.
# Examples
```julia-repl
julia> md = MultiDataset([[1, 2],[3]], DataFrame(:name => ["Python", "Julia"], :age => [25, 26], :sex => ['M', 'F']))
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
julia> hasvariables(md, :age)
true
julia> hasvariables(md.data, :name)
true
julia> hasvariables(md, :height)
false
julia> hasvariables(md, 1, :sex)
false
julia> hasvariables(md, 2, :sex)
true
```
```julia-repl
julia> md = MultiDataset([[1, 2],[3]], DataFrame(:name => ["Python", "Julia"], :age => [25, 26], :sex => ['M', 'F']))
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
julia> hasvariables(md, [:sex, :age])
true
julia> hasvariables(md, 1, [:sex])
false
julia> hasvariables(md, 2, [:sex])
true
julia> hasvariables(md.data, [:name, :sex])
true
```
"""
function hasvariables(df::AbstractDataFrame, variable_name::Symbol)
return _name2index(df, variable_name) > 0
end
function hasvariables(
md::AbstractMultiDataset,
i_modality::Integer,
variable_name::Symbol
)
return _name2index(modality(md, i_modality), variable_name) > 0
end
function hasvariables(md::AbstractMultiDataset, variable_name::Symbol)
return _name2index(md, variable_name) > 0
end
function hasvariables(df::AbstractDataFrame, variable_names::AbstractVector{Symbol})
return !(0 in _name2index(df, variable_names))
end
function hasvariables(
md::AbstractMultiDataset,
i_modality::Integer,
variable_names::AbstractVector{Symbol}
)
return !(0 in _name2index(modality(md, i_modality), variable_names))
end
function hasvariables(
md::AbstractMultiDataset,
variable_names::AbstractVector{Symbol}
)
return !(0 in _name2index(md, variable_names))
end
"""
variableindex(df, variable_name)
variableindex(md, i_modality, variable_name)
variableindex(md, variable_name)
Return the index of the variable.
When `i_modality` is passed, the function
returns the index of the variable in the sub-dataframe of the
modality identified by `i_modality`.
It returns `0` when the variable is not contained in the modality identified by `i_modality`.
# Arguments
* `df` is an `AbstractDataFrame`;
* `md` is an `AbstractMultiDataset`;
* `variable_name` is a `Symbol` indicating the variable whose index you want to know;
* `i_modality` is an `Integer` indicating of which modality you want to know the index of
the variable.
# Examples
```julia-repl
julia> md = MultiDataset([[1, 2],[3]], DataFrame(:name => ["Python", "Julia"], :age => [25, 26], :sex => ['M', 'F']))
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
julia> md.data
2Γ3 DataFrame
Row β name age sex
β String Int64 Char
ββββββΌβββββββββββββββββββββ
1 β Python 25 M
2 β Julia 26 F
julia> variableindex(md, :age)
2
julia> variableindex(md, :sex)
3
julia> variableindex(md, 1, :name)
1
julia> variableindex(md, 2, :name)
0
julia> variableindex(md, 2, :sex)
1
julia> variableindex(md.data, :age)
2
```
"""
function variableindex(df::AbstractDataFrame, variable_name::Symbol)
return _name2index(df, variable_name)
end
function variableindex(
md::AbstractMultiDataset,
i_modality::Integer,
variable_name::Symbol
)
return _name2index(modality(md, i_modality), variable_name)
end
function variableindex(md::AbstractMultiDataset, variable_name::Symbol)
return _name2index(md, variable_name)
end
"""
sparevariables(md)
Return the indices of all the variables that are not contained in any of the modalities of a
multimodal dataset.
# Arguments
* `md` is a `MultiDataset`, which is the structure whose indices of the sparevariables
are to be known.
# Examples
```julia-repl
julia> md = MultiDataset([[1],[3]], DataFrame(:name => ["Python", "Julia"], :age => [25, 26], :sex => ['M', 'F']))
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 25
2 β 26
julia> md.data
2Γ3 DataFrame
Row β name age sex
β String Int64 Char
ββββββΌβββββββββββββββββββββ
1 β Python 25 M
2 β Julia 26 F
julia> sparevariables(md)
1-element Vector{Int64}:
2
```
"""
function sparevariables(md::AbstractMultiDataset)::AbstractVector{<:Integer}
return Int.(setdiff(1:nvariables(md), unique(cat(grouped_variables(md)..., dims = 1))))
end
"""
variables(md, i)
Return the names as `Symbol`s of the variables in a multimodal dataset.
When called on a object of type `MultiDataset` a `Dict` is returned which will map the
modality index to an `AbstractVector{Symbol}`.
Note: the order of the variable names is granted to match the order of the variables
in the modality.
If an index `i` is passed as second argument, then the names of the variables
of the `i`-th modality are returned as an `AbstractVector`.
Alternatively, `nvariables` can be called on a single modality.
# Arguments
* `md` is an MultiDataset;
* `i` is an `Integer` indicating from which modality of the multimodal dataset to get the
names of the variables.
# Examples
```julia-repl
julia> md = MultiDataset([[2],[3]], DataFrame(:name => ["Python", "Julia"], :age => [25, 26], :sex => ['M', 'F']))
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 25
2 β 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
julia> variables(md)
Dict{Integer, AbstractVector{Symbol}} with 2 entries:
2 => [:sex]
1 => [:age]
julia> variables(md, 2)
1-element Vector{Symbol}:
:sex
julia> variables(md, 1)
1-element Vector{Symbol}:
:age
julia> mod2 = modality(md, 2)
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
julia> variables(mod2)
1-element Vector{Symbol}:
:sex
```
"""
variables(df::AbstractDataFrame) = Symbol.(names(df))
function variables(md::AbstractMultiDataset, i::Integer)
@assert 1 β€ i β€ nmodalities(md) "Index ($i) must be a valid modality number " *
"(1:$(nmodalities(md)))"
return variables(modality(md, i))
end
function variables(md::AbstractMultiDataset)
d = Dict{Integer,AbstractVector{Symbol}}()
for i in 1:nmodalities(md)
d[i] = variables(md, i)
end
return d
end
"""
dropvariables!(md, i)
dropvariables!(md, variable_name)
dropvariables!(md, indices)
dropvariables!(md, variable_names)
dropvariables!(md, i_modality, indices)
dropvariables!(md, i_modality, variable_names)
Drop the `i`-th variable from a multimodal dataset, and return the dataset itself.
# Arguments
* `md` is an MultiDataset;
* `i` is an `Integer` that indicates the index of the variable to drop;
* `variable_name` is a `Symbol` that idicates the variable to drop;
* `indices` is an `AbstractVector{Integer}` that indicates the indices of the variables to
drop;
* `variable_names` is an `AbstractVector{Symbol}` that indicates the variables to drop.
* `i_modality`: index of the modality; if this argument is specified,
`indices` are considered as relative to the `i_modality`-th modality
# Examples
```julia-repl
julia> md = MultiDataset([[1, 2],[3, 4, 5]], DataFrame(:name => ["Python", "Julia"], :age => [25, 26], :sex => ['M', 'F'], :height => [180, 175], :weight => [80, 60]))
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ3 SubDataFrame
Row β sex height weight
β Char Int64 Int64
ββββββΌββββββββββββββββββββββ
1 β M 180 80
2 β F 175 60
julia> dropvariables!(md, 4)
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β sex weight
β Char Int64
ββββββΌββββββββββββββ
1 β M 80
2 β F 60
julia> dropvariables!(md, :name)
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 25
2 β 26
- Modality 2 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β sex weight
β Char Int64
ββββββΌββββββββββββββ
1 β M 80
2 β F 60
julia> dropvariables!(md, [1,3])
[ Info: Variable 1 was last variable of modality 1: removing modality
β MultiDataset
ββ dimensionalities: (0,)
- Modality 1 / 1
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
```
TODO: To be reviewed
"""
function dropvariables!(md::AbstractMultiDataset, i::Integer; kwargs...)
@assert 1 β€ i β€ nvariables(md) "Variable $(i) is not a valid variable index " *
"(1:$(nvariables(md)))"
j = 1
while j β€ nmodalities(md)
desc = grouped_variables(md)[j]
if i in desc
removevariable_frommodality!(md, j, i; kwargs...)
else
j += 1
end
end
select!(data(md), setdiff(collect(1:nvariables(md)), i))
for (i_modality, desc) in enumerate(grouped_variables(md))
for (i_var, var) in enumerate(desc)
if var > i
grouped_variables(md)[i_modality][i_var] = var - 1
end
end
end
return md
end
function dropvariables!(md::AbstractMultiDataset, variable_name::Symbol; kwargs...)
@assert hasvariables(md, variable_name) "MultiDataset does not contain " *
"variable $(variable_name)"
return dropvariables!(md, _name2index(md, variable_name); kwargs...)
end
function dropvariables!(md::AbstractMultiDataset, indices::AbstractVector{<:Integer}; kwargs...)
for i in indices
@assert 1 β€ i β€ nvariables(md) "Index $(i) does not correspond to an " *
"variable (1:$(nvariables(md)))"
end
var_names = Symbol.(names(data(md)))
for i_var in sort!(deepcopy(indices), rev = true)
dropvariables!(md, i_var; kwargs...)
end
return md
end
function dropvariables!(
md::AbstractMultiDataset,
variable_names::AbstractVector{Symbol};
kwargs...
)
for var_name in variable_names
@assert hasvariables(md, var_name) "MultiDataset does not contain " *
"variable $(var_name)"
end
return dropvariables!(md, _name2index(md, variable_names); kwargs...)
end
function dropvariables!(
md::AbstractMultiDataset,
i_modality::Integer,
indices::Union{Integer, AbstractVector{<:Integer}};
kwargs...
)
indices = [ indices... ]
!(1 <= i_modality <= nmodalities(md)) &&
throw(DimensionMismatch("Index $(i_modality) does not correspond to a modality"))
varidx = grouped_variables(md)[i_modality][indices]
return dropvariables!(md, varidx; kwargs...)
end
function dropvariables!(
md::AbstractMultiDataset,
i_modality::Integer,
variable_names::Union{Symbol, AbstractVector{<:Symbol}};
kwargs...
)
variable_names = [ variable_names... ]
!(1 <= i_modality <= nmodalities(md)) &&
throw(DimensionMismatch("Index $(i_modality) does not correspond to a modality"))
!issubset(variable_names, variables(md, i_modality)) &&
throw(DomainError(variable_names, "One or more variables in `var_names` are not in variables modality"))
varidx = _name2index(md, variable_names)
return dropvariables!(md, varidx; kwargs...)
end
"""
keeponlyvariables!(md, indices)
keeponlyvariables!(md, variable_names)
Drop all variables that do not correspond to the indices in `indices` from a
multimodal dataset.
Note: if the dropped variables are contained in some modality they will also be removed from
them; as a side effect, this can lead to the removal of modalities.
# Arguments
* `md` is a `MultiDataset`;
* `indices` is an `AbstractVector{Integer}` that indicates which indices to keep in the
multimodal dataset;
* `variable_names` is an `AbstractVector{Symbol}` that indicates which variables to keep in
the multimodal dataset.
# Examples
```julia-repl
julia> md = MultiDataset([[1, 2],[3, 4, 5],[5]], DataFrame(:name => ["Python", "Julia"], :age => [25, 26], :sex => ['M', 'F'], :height => [180, 175], :weight => [80, 60]))
β MultiDataset
ββ dimensionalities: (0, 0, 0)
- Modality 1 / 3
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β name age
β String Int64
ββββββΌβββββββββββββββ
1 β Python 25
2 β Julia 26
- Modality 2 / 3
ββ dimensionality: 0
2Γ3 SubDataFrame
Row β sex height weight
β Char Int64 Int64
ββββββΌββββββββββββββββββββββ
1 β M 180 80
2 β F 175 60
- Modality 3 / 3
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β weight
β Int64
ββββββΌββββββββ
1 β 80
2 β 60
julia> keeponlyvariables!(md, [1,3,4])
[ Info: Variable 5 was last variable of modality 3: removing modality
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
- Modality 2 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β sex height
β Char Int64
ββββββΌββββββββββββββ
1 β M 180
2 β F 175
julia> keeponlyvariables!(md, [:name, :sex])
β MultiDataset
ββ dimensionalities: (0, 0)
- Modality 1 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
- Modality 2 / 2
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β sex
β Char
ββββββΌββββββ
1 β M
2 β F
```
TODO: review
"""
function keeponlyvariables!(
md::AbstractMultiDataset,
indices::AbstractVector{<:Integer};
kwargs...
)
return dropvariables!(md, setdiff(collect(1:nvariables(md)), indices); kwargs...)
end
keeponlyvariables!(md::AbstractMultiDataset, index::Integer) = keeponlyvariables!(md, [index])
function keeponlyvariables!(
md::AbstractMultiDataset,
variable_names::AbstractVector{Symbol};
kwargs...
)
for var_name in variable_names
@assert hasvariables(md, var_name) "MultiDataset does not contain " *
"variable $(var_name)"
end
return dropvariables!(
md,
setdiff(collect(1:nvariables(md)), _name2index(md, variable_names));
kwargs...)
end
function keeponlyvariables!(
md::AbstractMultiDataset,
variable_names::AbstractVector{<:AbstractVector{Symbol}};
kwargs...
)
for var_name in variable_names
@assert hasvariables(md, var_name) "MultiDataset does not contain " *
"variable $(var_name)"
end
return dropvariables!(
md,
setdiff(collect(1:nvariables(md)), _name2index(md, variable_names));
kwargs...)
end
"""
dropsparevariables!(md)
Drop all variables that are not contained in any of the modalities in a multimodal dataset.
# Arguments
* `md` is a `MultiDataset`, that is the structure at which sparevariables will be
dropped.
# Examples
```julia-repl
julia> md = MultiDataset([[1]], DataFrame(:age => [30, 9], :name => ["Python", "Julia"]))
β MultiDataset
ββ dimensionalities: (0,)
- Modality 1 / 1
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β age
β Int64
ββββββΌβββββββ
1 β 30
2 β 9
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
julia> dropsparevariables!(md)
2Γ1 DataFrame
Row β name
β String
ββββββΌββββββββ
1 β Python
2 β Julia
```
"""
function dropsparevariables!(md::AbstractMultiDataset; kwargs...)
spare = sort!(sparevariables(md), rev = true)
var_names = Symbol.(names(data(md)))
result = DataFrame([(var_names[i] => data(md)[:,i]) for i in reverse(spare)]...)
for i_var in spare
dropvariables!(md, i_var; kwargs...)
end
return result
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 2187 | lmd = LabeledMultiDataset(
MultiDataset([[1], [3]], deepcopy(df_langs)),
[2],
)
@test isa(lmd, LabeledMultiDataset)
@test isa(modality(lmd, 1), SubDataFrame)
@test isa(modality(lmd, 2), SubDataFrame)
@test modality(lmd, [1,2]) == [modality(lmd, 1), modality(lmd, 2)]
@test isa(first(eachmodality(lmd)), SubDataFrame)
@test length(eachmodality(lmd)) == nmodalities(lmd)
@test nmodalities(lmd) == 2
@test nvariables(lmd) == 3
@test nvariables(lmd, 1) == 1
@test nvariables(lmd, 2) == 1
@test ninstances(lmd) == length(eachinstance(lmd)) == 2
@test_throws ErrorException slicedataset(lmd, [])
@test_nowarn slicedataset(lmd, :)
@test_nowarn slicedataset(lmd, 1)
@test_nowarn slicedataset(lmd, [1])
@test ninstances(slicedataset(lmd, :)) == 2
@test ninstances(slicedataset(lmd, 1)) == 1
@test ninstances(slicedataset(lmd, [1])) == 1
@test_nowarn concatdatasets(lmd, lmd, lmd)
@test_nowarn vcat(lmd, lmd, lmd)
@test dimensionality(lmd) == (0, 1)
@test dimensionality(lmd, 1) == 0
@test dimensionality(lmd, 2) == 1
# labels
@test nlabelingvariables(lmd) == 1
@test labels(lmd) == [Symbol(names(df_langs)[2])]
@test labels(lmd, 1) == Dict(Symbol(names(df_langs)[2]) => df_langs[1, 2])
@test labels(lmd, 2) == Dict(Symbol(names(df_langs)[2]) => df_langs[2, 2])
@test labeldomain(lmd, 1) == Set(df_langs[:,2])
# remove label
unsetaslabeling!(lmd, 2)
@test nlabelingvariables(lmd) == 0
setaslabeling!(lmd, 2)
@test nlabelingvariables(lmd) == 1
# label
@test label(lmd, 1, 1) == "Python"
@test label(lmd, 2, 1) == "Julia"
# joinlabels!
lmd = LabeledMultiDataset(
MultiDataset([[1], [4]], deepcopy(df_data)),
[2, 3],
)
joinlabels!(lmd)
@test labels(lmd) == [Symbol(join([:age, :name], '_'))]
@test label(lmd, 1, 1) == string(30, '_', "Python")
@test label(lmd, 2, 1) == string(9, '_', "Julia")
# dropvariables!
lmd = LabeledMultiDataset(
MultiDataset([[2], [4]], deepcopy(df_data)),
[3],
)
@test nmodalities(lmd) == 2
@test nvariables(lmd) == 4
dropvariables!(lmd, 2)
@test MultiData.labeling_variables(lmd) == [2]
@test nvariables(lmd) == 3
@test nmodalities(lmd) == 1
@test nlabelingvariables(lmd) == 1
@test labels(lmd) == [Symbol(names(df_data)[3])]
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 8475 |
a = MultiDataset([deepcopy(df_langs), DataFrame(:id => [1, 2])])
b = MultiDataset([[2,3,4], [1]], df_data)
c = MultiDataset([[:age,:name,:stat], [:id]], df_data)
@test b == c
d = MultiDataset([[:age,:name,:stat], :id], df_data)
@test c == d
@test_throws ErrorException MultiDataset([[:age,:name,4], :id], df_data)
@test MultiData.data(a) != MultiData.data(b)
@test collect(eachmodality(a)) == collect(eachmodality(b))
md = MultiDataset([[1],[2]], deepcopy(df))
original_md = deepcopy(md)
@test isa(md, MultiDataset)
@test isa(first(eachmodality(md)), SubDataFrame)
@test length(eachmodality(md)) == nmodalities(md)
@test modality(md, [1,2]) == [modality(md, 1), modality(md, 2)]
@test isa(modality(md, 1), SubDataFrame)
@test isa(modality(md, 2), SubDataFrame)
@test nmodalities(md) == 2
@test nvariables(md) == 2
@test nvariables(md, 1) == 1
@test nvariables(md, 2) == 1
@test ninstances(md) == length(eachinstance(md)) == 3
@test_throws ErrorException slicedataset(md, [])
@test_nowarn slicedataset(md, :)
@test_nowarn slicedataset(md, 1)
@test_nowarn slicedataset(md, [1])
@test ninstances(slicedataset(md, :)) == 3
@test ninstances(slicedataset(md, 1)) == 1
@test ninstances(slicedataset(md, [1])) == 1
@test_nowarn concatdatasets(md, md, md)
@test_nowarn vcat(md, md, md)
@test dimensionality(md) == (0, 1)
@test dimensionality(md, 1) == 0
@test dimensionality(md, 2) == 1
# # test auto selection of modalities
# auto_md = MultiDataset(deepcopy(df))
# @test nmodalities(auto_md) == 0
# @test length(sparevariables(auto_md)) == nvariables(auto_md)
auto_md_all = MultiDataset(deepcopy(df); group = :all)
@test auto_md_all == md
@test !(:mixed in dimensionality(auto_md_all))
lang_md1 = MultiDataset(df_langs; group = :all)
@test nmodalities(lang_md1) == 2
@test !(:mixed in dimensionality(lang_md1))
lang_md2 = MultiDataset(df_langs; group = [1])
@test nmodalities(lang_md2) == 3
dims_md2 = dimensionality(lang_md2)
@test length(filter(x -> isequal(x, 0), dims_md2)) == 2
@test length(filter(x -> isequal(x, 1), dims_md2)) == 1
@test !(:mixed in dimensionality(lang_md2))
# test equality between mixed-columns datasets
md1_sim = MultiDataset([[1,2]], DataFrame(:b => [3,4], :a => [1,2]))
md2_sim = MultiDataset([[2,1]], DataFrame(:a => [1,2], :b => [3,4]))
@test md1_sim β md2_sim
@test md1_sim == md2_sim
# addmodality!
@test addmodality!(md, [1, 2]) == md # test return
@test nmodalities(md) == 3
@test nvariables(md) == 2
@test nvariables(md, 3) == 2
@test dimensionality(md) == (0, 1, :mixed)
@test dimensionality(md, 3) == :mixed
@test dimensionality(md, 3; force = :min) == 0
@test dimensionality(md, 3; force = :max) == 1
# removemodality!
@test removemodality!(md, 3) == md # test return
@test nmodalities(md) == 2
@test nvariables(md) == 2
@test_throws Exception nvariables(md, 3) == 2
# sparevariables
@test length(sparevariables(md)) == 0
removemodality!(md, 2)
@test length(sparevariables(md)) == 1
addmodality!(md, [2])
@test length(sparevariables(md)) == 0
# pushinstances!
new_inst = DataFrame(:sex => ["F"], :h => [deepcopy(ts_cos)])[1,:]
@test pushinstances!(md, new_inst) == md # test return
@test ninstances(md) == 4
pushinstances!(md, ["M", deepcopy(ts_cos)])
@test ninstances(md) == 5
# deleteinstances!
@test deleteinstances!(md, ninstances(md)) == md # test return
@test ninstances(md) == 4
deleteinstances!(md, ninstances(md))
@test ninstances(md) == 3
# keeponlyinstances!
pushinstances!(md, ["F", deepcopy(ts_cos)])
pushinstances!(md, ["F", deepcopy(ts_cos)])
pushinstances!(md, ["F", deepcopy(ts_cos)])
@test keeponlyinstances!(md, [1, 2, 3]) == md # test return
@test ninstances(md) == 3
for i in 1:ninstances(md)
@test instance(md, i) == instance(original_md, i)
end
# modality manipulation
@test addvariable_tomodality!(md, 1, 2) === md # test return
@test nvariables(md, 1) == 2
@test dimensionality(md, 1) == :mixed
@test removevariable_frommodality!(md, 1, 2) === md # test return
@test nvariables(md, 1) == 1
@test dimensionality(md, 1) == 0
# variables manipulation
@test insertmodality!(md, deepcopy(ages)) == md # test return
@test nmodalities(md) == 3
@test nvariables(md, 3) == 1
@test dropmodalities!(md, 3) == md # test return
@test nmodalities(md) == 2
insertmodality!(md, deepcopy(ages), [1])
@test nmodalities(md) == 3
@test nvariables(md, 3) == 2
@test dimensionality(md, 3) == 0
@test_nowarn md[:,:]
@test_nowarn md[1,:]
@test_nowarn md[:,1]
@test_nowarn md[:,1:2]
@test_nowarn md[[1,2],:]
@test_nowarn md[1,1]
@test_nowarn md[[1,2],[1,2]]
@test_nowarn md[1,[1,2]]
@test_nowarn md[[1,2],1]
# drop "inner" modality and multiple modalities in one operation
insertmodality!(md, DataFrame(:t2 => [deepcopy(ts_sin), deepcopy(ts_cos), deepcopy(ts_sin)]))
@test nmodalities(md) == 4
@test nvariables(md) == 4
@test nvariables(md, nmodalities(md)) == 1
# dropping the modality 3 should result in dropping the first too
# because the variable at index 1 is shared between them and will be
# dropped but modality 1 has just the variable at index 1 in it, this
# should result in dropping that modality too
dropmodalities!(md, 3)
@test nmodalities(md) == 2
@test nvariables(md) == 2
@test nvariables(md, nmodalities(md)) == 1
dropmodalities!(md, 2)
@test nmodalities(md) == 1
@test nvariables(md) == 1
# RESET
md = deepcopy(original_md)
# dropsparevariables!
removemodality!(md, 2)
@test dropsparevariables!(md) == DataFrame(names(df)[2] => df[:,2])
# keeponlyvariables!
md_var_manipulation = MultiDataset([[1], [2], [3, 4]],
DataFrame(
:age => [30, 9],
:name => ["Python", "Julia"],
:stat1 => [deepcopy(ts_sin), deepcopy(ts_cos)],
:stat2 => [deepcopy(ts_cos), deepcopy(ts_sin)]
)
)
md_var_manipulation_original = deepcopy(md_var_manipulation)
@test keeponlyvariables!(md_var_manipulation, [1, 3]) == md_var_manipulation
@test md_var_manipulation == MultiDataset([[1], [2]],
DataFrame(
:age => [30, 9],
:stat1 => [deepcopy(ts_sin), deepcopy(ts_cos)]
)
)
# addressing variables by name
md1 = MultiDataset([[1],[2]],
DataFrame(
:age => [30, 9],
:name => ["Python", "Julia"],
)
)
md_var_names_original = deepcopy(md1)
md2 = deepcopy(md1)
@test hasvariables(md1, :age) == true
@test hasvariables(md1, :name) == true
@test hasvariables(md1, :missing_variable) == false
@test hasvariables(md1, [:age, :name]) == true
@test hasvariables(md1, [:age, :missing_variable]) == false
@test hasvariables(md1, 1, :age) == true
@test hasvariables(md1, 1, :name) == false
@test hasvariables(md1, 1, [:age, :name]) == false
@test hasvariables(md1, 2, :name) == true
@test hasvariables(md1, 2, [:name]) == true
@test variableindex(md1, :age) == 1
@test variableindex(md1, :missing_variable) == 0
@test variableindex(md1, 1, :age) == 1
@test variableindex(md1, 2, :age) == 0
@test variableindex(md1, 2, :name) == 1
# addressing variables by name - insertmodality!
md1 = deepcopy(md_var_names_original)
md2 = deepcopy(md_var_names_original)
@test addmodality!(md1, [1]) == addmodality!(md2, [:age])
# addressing variables by name - addvariable_tomodality!
md1 = deepcopy(md_var_names_original)
md2 = deepcopy(md_var_names_original)
@test addvariable_tomodality!(md1, 2, 1) == addvariable_tomodality!(md2, 2, :age)
# addressing variables by name - removevariable_frommodality!
@test removevariable_frommodality!(md1, 2, 1) ==
removevariable_frommodality!(md2, 2, :age)
# addressing variables by name - dropvariables!
md1 = deepcopy(md_var_names_original)
md2 = deepcopy(md_var_names_original)
@test dropvariables!(md1, 1) ==
dropvariables!(md2, :age)
@test md1 == md2
# addressing variables by name - insertmodality!
md1 = deepcopy(md_var_names_original)
md2 = deepcopy(md_var_names_original)
@test insertmodality!(
md1,
DataFrame(:stat1 => [deepcopy(ts_sin), deepcopy(ts_cos)]),
[1]
) == insertmodality!(
md2,
DataFrame(:stat1 => [deepcopy(ts_sin), deepcopy(ts_cos)]),
[:age]
)
# addressing variables by name - dropvariables!
@test dropvariables!(md1, [1, 2]) ==
dropvariables!(md2, [:age, :name])
@test md1 == md2
@test nmodalities(md1) == nmodalities(md2) == 1
@test nvariables(md1) == nvariables(md2) == 1
@test nvariables(md1, 1) == nvariables(md2, 1) == 1
# addressing variables by name - keeponlyvariables!
md1 = deepcopy(md_var_names_original)
md2 = deepcopy(md_var_names_original)
@test keeponlyvariables!(md1, [1]) == keeponlyvariables!(md2, [:age])
@test md1 == md2
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 1267 | _ninstances = 4
for ((channel_size1, channel_size2), shouldfail) in [
((),()) => false,
((1),(1)) => false,
((1,2),(1,2)) => false,
((1,2),(1,)) => true,
((1,),()) => true,
((1,2),()) => true,
]
local df = DataFrame(
x=[rand(channel_size1...) for i_instance in 1:_ninstances],
y=[rand(channel_size2...) for i_instance in 1:_ninstances]
)
if shouldfail
@test_throws AssertionError MultiData.dataframe2cube(df)
else
cube, varnames = @test_nowarn MultiData.dataframe2cube(df)
end
end
begin
local df = MultiData.dimensional2dataframe(eachslice(rand(3,4); dims=2), ["a", "b", "c"])
@test first(unique(size.(dataframe2dimensional(df)[1]))) == (3,)
end
begin
local df = MultiData.dimensional2dataframe(eachslice(rand(1,3,4); dims=3), ["a", "b", "c"])
@test first(unique(size.(dataframe2dimensional(df)[1]))) == (1,3)
end
begin
local df = MultiData.dimensional2dataframe(eachslice(rand(2,3,4); dims=3), ["a", "b", "c"])
@test first(unique(size.(dataframe2dimensional(df)[1]))) == (2,3)
end
begin
local df = MultiData.dimensional2dataframe(eachslice(rand(2,2,3,4); dims=4), ["a", "b", "c"])
@test first(unique(size.(dataframe2dimensional(df)[1]))) == (2,2,3)
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 3806 | lmd = LabeledMultiDataset(
MultiDataset([[2], [4]], deepcopy(df_data)),
[3],
)
path = relpath(joinpath(testing_savedataset))
savedataset(path, lmd, force = true)
# Labels.csv
@test isfile(joinpath(path, _ds_labels))
@test length(split(readline(joinpath(path, _ds_labels)), ","))-1 == 1
df_labels = CSV.read(joinpath(path, _ds_labels), DataFrame; types = String)
df_labels[!,:id] = parse.(Int64, replace.(df_labels[!,:id], _ds_inst_prefix => ""))
@test df_labels == lmd.md.data[:,sparevariables(lmd.md)]
# Dataset Metadata.txt
@test isfile(joinpath(path, _ds_metadata))
@test "supervised=true" in readlines(joinpath(path, _ds_metadata))
@test length(
filter(
x -> startswith(x, _ds_modality_prefix),
readdir(joinpath(path, _ds_inst_prefix * "1"))
)) == 2
@test parse.(Int64,
split(filter(
(row) -> startswith(row, "num_modalities"),
readlines(joinpath(path, _ds_metadata))
)[1], "=")[2]
) == 2
@test length(
filter(
row -> startswith(row, "modality"),
readlines(joinpath(path, _ds_metadata))
)) == 2
modalities = filter(
row -> startswith(row, "modality"),
readlines(joinpath(path, _ds_metadata))
)
@test all([parse.(Int64, split(string(modality), "=")[2]) ==
dimensionality(lmd[i_modality]) for (i_modality, modality) in enumerate(modalities)])
@test parse(Int64, split(
filter(
row -> startswith(row, "num_classes"),
readlines(joinpath(path, _ds_metadata))
)[1], "=")[2]) == 1
@test length(
filter(
x -> startswith(x, _ds_inst_prefix),
readdir(joinpath(path))
)) == 2
# instances Metadata.txt
@test all([isfile(joinpath(path, _ds_inst_prefix * string(i), _ds_metadata))
for i in 1:nrow(lmd[1])])
for i_inst in 1:ninstances(lmd)
dim_modality_rows = filter(
row -> startswith(row, "dim_modality"),
readlines(joinpath(path, string(_ds_inst_prefix, i_inst), _ds_metadata))
)
# for each modality check the proper dimensionality was saved
for (i_modality, dim_modality) in enumerate(dim_modality_rows)
@test strip(split(dim_modality, "=")[2]) == string(
size(first(first(lmd[i_modality])))
)
end
end
@test length([filter(
row -> occursin(string(labels), row),
readlines(joinpath(path, string(_ds_inst_prefix, modality), _ds_metadata))
) for labels in labels(lmd) for modality in 1:nmodalities(lmd)]) == 2
@test [filter(
row -> occursin(string(labels), row),
readlines(joinpath(path, string(_ds_inst_prefix, modality), _ds_metadata))
) for labels in labels(lmd) for modality in 1:nmodalities(lmd)] == [
["name=Python"],
["name=Julia"]
]
# Example
@test all([isdir(joinpath(path, string(_ds_inst_prefix, instance)))
for instance in 1:nrow(lmd[1])])
@test all([isfile(joinpath(
path,
string(_ds_inst_prefix, instance),
string(_ds_modality_prefix, i_modality, ".csv")
)) for i_modality in 1:length(lmd) for instance in 1:nrow(lmd[1])])
saved_lmd = loaddataset(path)
@test saved_lmd == lmd
# load MD (a dataset without Labels.csv isa an MD)
rm(joinpath(path, _ds_labels))
ds_metadata_lines = readlines(joinpath(path, _ds_metadata))
rm(joinpath(path, _ds_metadata))
file = open(joinpath(path, _ds_metadata), "w+")
for line in ds_metadata_lines
if occursin("supervised", line)
println(file, "supervised=false")
elseif occursin("num_classes", line)
else
println(file, line)
end
end
close(file)
md = loaddataset(path)
@test md isa MultiDataset
# saving an MD should not generate a Labels.csv
savedataset(path, md, force = true)
@test !isfile(joinpath(path, _ds_labels))
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | code | 1210 | using MultiData
using Test
using CSV
const testing_savedataset = mktempdir(prefix = "saved_dataset")
const _ds_inst_prefix = MultiData._ds_inst_prefix
const _ds_modality_prefix = MultiData._ds_modality_prefix
const _ds_metadata = MultiData._ds_metadata
const _ds_labels = MultiData._ds_labels
const ts_sin = [sin(i) for i in 1:50000]
const ts_cos = [cos(i) for i in 1:50000]
const df = DataFrame(
:sex => ["F", "F", "M"],
:h => [deepcopy(ts_sin), deepcopy(ts_cos), deepcopy(ts_sin)]
)
const df_langs = DataFrame(
:age => [30, 9],
:name => ["Python", "Julia"],
:stat => [deepcopy(ts_sin), deepcopy(ts_cos)]
)
const df_data = DataFrame(
:id => [1, 2],
:age => [30, 9],
:name => ["Python", "Julia"],
:stat => [deepcopy(ts_sin), deepcopy(ts_cos)]
)
const ages = DataFrame(:age => [35, 38, 37])
@testset "MultiData.jl" begin
@testset "MultiDataset" begin
include("MultiDataset.jl")
end
@testset "LabeledMultiDataset" begin
include("LabeledMultiDataset.jl")
end
@testset "Filesystem operations" begin
include("filesystem.jl")
end
@testset "Dimensional data" begin
include("dimensional-data.jl")
end
end
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | docs | 2126 | <div align="center"><a href="https://github.com/aclai-lab/Sole.jl"><img src="logo.png" alt="" title="This package is part of Sole.jl" width="200"></a></div>
# MultiData.jl β Multimodal datasets
[](https://aclai-lab.github.io/MultiData.jl)
[](https://aclai-lab.github.io/MultiData.jl/dev)
[](https://cirrus-ci.com/github/aclai-lab/MultiData.jl)
[](https://codecov.io/gh/aclai-lab/MultiData.jl)
<!-- [](https://mybinder.org/v2/gh/aclai-lab/MultiData.jl/HEAD?labpath=pluto-demo.jl) -->
<!-- [](https://aclai-lab.github.io/MultiData.jl/dev) -->
## In a nutshell
*MultiData* provides a **machine learning oriented** data layer on top of DataFrames.jl for:
- Instantiating and manipulating [*multimodal*](https://en.wikipedia.org/wiki/Multimodal_learning) datasets for (un)supervised machine learning;
- Describing datasets via basic statistical measures;
- Saving to/loading from *npy/npz* format, as well as a custom CSV-based format (with interesting features such as *lazy loading* of datasets);
- Performing basic data processing operations (e.g., windowing, moving average, etc.).
<!-- - Dealing with [*(non-)tabular* data](https://en.wikipedia.org/wiki/Unstructured_data) (e.g., graphs, images, time-series, etc.); -->
<!--
If you are used to dealing with unstructured/multimodal data, but cannot find the right
tools in Julia, you will find
[*SoleFeatures.jl*](https://github.com/aclai-lab/SoleFeatures.jl/) useful!
-->
## About
The package is developed by the [ACLAI Lab](https://aclai.unife.it/en/) @ University of
Ferrara.
*MultiData.jl* was originally built for representing multimodal datasets in
[*Sole.jl*](https://github.com/aclai-lab/Sole.jl), an open-source framework for
*symbolic machine learning*.
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | docs | 2476 | ```@meta
CurrentModule = MultiData
```
# [Datasets](@id man-datasets)
```@contents
Pages = ["datasets.md"]
```
A machine learning dataset are a collection of instances (or samples),
each one described by a number of variables.
In the case of *tabular* data, a dataset looks like
a database table, where every column is a variable,
and each row corresponds to a given instance. However, a dataset can also be *non-tabular*;
for example, each instance can consist of a multivariate time-series, or
an image.
When data is composed of different
[modalities](https://en.wikipedia.org/wiki/Multimodal_learning))
combining their statistical properties is non-trivial, since they may be quite different in nature
one another.
The abstract representation of a multimodal dataset provided by this package is the
[`AbstractMultiDataset`](@ref).
```@docs
AbstractMultiDataset
grouped_variables
data
dimensionality
```
## [Unlabeled Datasets](@id man-unlabeled-datasets)
In *unlabeled datasets*
there is no labeling variable, and all of the variables (also called *feature variables*,
or *features*) have equal role in the representation.
These datasets are used in
[unsupervised learning](https://en.wikipedia.org/wiki/Unsupervised_learning) contexts,
for discovering internal correlation patterns between the features.
Multimodal *unlabeled* datasets can be instantiated with [`MultiDataset`](@ref).
```@autodocs
Modules = [MultiData]
Pages = ["src/MultiDataset.jl"]
```
## [Labeled Datasets](@id man-supervised-datasets)
In *labeled datasets*, one or more variables are considered to have special semantics
with respect to the other variables;
each of these *labeling variables* (or *target variables*) can be thought as assigning
a label to each instance, which is typically a categorical value (*classification label*)
or a numerical value (*regression label*).
[Supervised learning](https://en.wikipedia.org/wiki/Unsupervised_learning) methods
can be applied on these datasets
for modeling the target variables as a function of the feature variables.
As an extension of the [`AbstractMultiDataset`](@ref),
[`AbstractLabeledMultiDataset`](@ref) has an interface that can be implemented to
represent multimodal labeled datasets.
```@docs
AbstractLabeledMultiDataset
labeling_variables
dataset
```
Multimodal *labeled* datasets can be instantiated with [`LabeledMultiDataset`](@ref).
```@autodocs
Modules = [MultiData]
Pages = ["LabeledMultiDataset.jl", "labels.jl"]
```
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | docs | 1925 | ```@meta
CurrentModule = MultiData
```
# [Description](@id man-description)
Just like `DataFrame`s, `MultiDataset`s can be described using the method
[`describe`](@ref):
```julia-repl
julia> ts_cos = [cos(i) for i in 1:50000];
julia> ts_sin = [sin(i) for i in 1:50000];
julia> df_data = DataFrame(
:id => [1, 2],
:age => [30, 9],
:name => ["Python", "Julia"],
:stat => [deepcopy(ts_sin), deepcopy(ts_cos)]
);
julia> md = MultiDataset([[2,3], [4]], df_data);
julia> description = describe(md)
2-element Vector{DataFrame}:
2Γ7 DataFrame
Row β variable mean min median max nmissing eltype
β Symbol Unionβ¦ Any Unionβ¦ Any Int64 DataType
ββββββΌβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β age 19.5 9 19.5 30 0 Int64
2 β name Julia Python 0 String
1Γ7 DataFrame
Row β Variables mean min β―
β Symbol Arrayβ¦ Arrayβ¦ β―
ββββββΌββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β stat AbstractFloat[8.63372e-6; -2.848β¦ AbstractFloat[-1.0; -1.0 β―
5 columns omitted
```
the `describe` implementation for `MultiDataset`s will try to find the best
_statistical measures_ that can be used to the type of data the modality contains.
In the example the 2nd modality, which contains variables (just one in the example) of data
of type `Vector{Float64}`, was described by applying the well known 22 features from
the package [Catch22.jl](https://github.com/brendanjohnharris/Catch22.jl) plus `maximum`,
`minimum` and `mean` as the vectors were time series.
```@docs
describe
```
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | docs | 185 | ```@meta
CurrentModule = MultiData
```
# [Filesystem](@id man-filesystem)
```@contents
Pages = ["filesystem.md"]
```
```@autodocs
Modules = [MultiData]
Pages = ["filesystem.jl"]
```
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | docs | 3505 | ```@meta
CurrentModule = MultiData
```
# MultiData
The aim of this package is to provide a simple and comfortable interface for managing
multimodal data.
It is built
on top of
[DataFrames.jl](https://github.com/JuliaData/DataFrames.jl/)
with Machine learning applications in mind.
```@contents
```
## Installation
Currently this packages is still not registered so you need to run the following
commands in a Julia REPL to install it:
```julia
import Pkg
Pkg.add("MultiData")
```
To install the developement version, run:
```julia
import Pkg
Pkg.add("https://github.com/aclai-lab/MultiData.jl#dev")
```
## Usage
To instantiate a multimodal dataset, use the [`MultiDataset`](@ref)
constructor by providing: *a)* a
`DataFrame` containing all variables from different modalities, and
*b)* a
`Vector{Vector{Union{Symbol,String,Int64}}}` object representing a
grouping of some of the variables (identified by column index or name)
into different modalities.
```julia-repl
julia> using MultiData
julia> ts_cos = [cos(i) for i in 1:50000];
julia> ts_sin = [sin(i) for i in 1:50000];
julia> df_data = DataFrame(
:id => [1, 2],
:age => [30, 9],
:name => ["Python", "Julia"],
:stat => [deepcopy(ts_sin), deepcopy(ts_cos)]
)
2Γ4 DataFrame
Row β id age name stat
β Int64 Int64 String Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
1 β 1 30 Python [0.841471, 0.909297, 0.14112, -0β¦
2 β 2 9 Julia [0.540302, -0.416147, -0.989992,β¦
julia> grouped_variables = [[2,3], [4]]; # group 2nd and 3rd variables in the first modality
# the 4th variable in the second modality and
# leave the first variable as a "spare variable"
julia> md = MultiDataset(df_data, grouped_variables)
β MultiDataset
ββ dimensionalities: (0, 1)
- Modality 1 / 2
ββ dimensionality: 0
2Γ2 SubDataFrame
Row β age name
β Int64 String
ββββββΌβββββββββββββββ
1 β 30 Python
2 β 9 Julia
- Modality 2 / 2
ββ dimensionality: 1
2Γ1 SubDataFrame
Row β stat
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
- Spare variables
ββ dimensionality: 0
2Γ1 SubDataFrame
Row β id
β Int64
ββββββΌβββββββ
1 β 1
2 β 2
```
Now `md` holds a `MultiDataset` and all of its modalities can be
conveniently iterated as elements of a `Vector`:
```julia-repl
julia> for (i, f) in enumerate(md)
println("Modality: ", i)
println(f)
println()
end
Modality: 1
2Γ2 SubDataFrame
Row β age name
β Int64 String
ββββββΌβββββββββββββββ
1 β 30 Python
2 β 9 Julia
Modality: 2
2Γ1 SubDataFrame
Row β stat
β Arrayβ¦
ββββββΌβββββββββββββββββββββββββββββββββββ
1 β [0.841471, 0.909297, 0.14112, -0β¦
2 β [0.540302, -0.416147, -0.989992,β¦
```
Note that each element of a `MultiDataset` is a `SubDataFrame`:
```julia-repl
julia> eltype(md)
SubDataFrame
```
!!! note "Spare variables"
Spare variables will never be seen when accessing a `MultiDataset` through its
iterator interface. To access them see [`sparevariables`](@ref).
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | docs | 428 | ```@meta
CurrentModule = MultiData
```
# [Manipulation](@id man-manipulation)
```@contents
Pages = ["manipulation.md"]
```
## [Modalities](@id man-modalities)
```@autodocs
Modules = [MultiData]
Pages = ["modalities.jl"]
```
## [Variables](@id man-variables)
```@autodocs
Modules = [MultiData]
Pages = ["variables.jl"]
```
## [Instances](@id man-instances)
```@autodocs
Modules = [MultiData]
Pages = ["instances.jl"]
```
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 0.1.2 | c1697bb6dc6f2b4fbaf8df3a3f23bf06a900acad | docs | 154 | ```@meta
CurrentModule = MultiData
```
# [Utils](@id man-utils)
```@contents
Pages = ["utils.md"]
```
```@docs
paa
linearize_data
unlinearize_data
```
| MultiData | https://github.com/aclai-lab/MultiData.jl.git |
|
[
"MIT"
] | 1.4.0 | 427421c277f82c5749002c1b23cb1aac91beb0a8 | code | 1481 | module LogarithmicNumbersForwardDiffExt
using ForwardDiff: ForwardDiff, Dual, partials
using LogarithmicNumbers: LogarithmicNumbers, AnyLogarithmic, Logarithmic, ULogarithmic
## Promotion rules
function Base.promote_rule(::Type{Logarithmic{R}}, ::Type{Dual{T,V,N}}) where {R<:Real,T,V,N}
return Dual{T,promote_rule(Logarithmic{R}, V),N}
end
function Base.promote_rule(::Type{ULogarithmic{R}}, ::Type{Dual{T,V,N}}) where {R<:Real,T,V,N}
return Dual{T,promote_rule(ULogarithmic{R}, V),N}
end
## Constructors
# Based on the unary_definition macro in ForwardDiff.jl (https://github.com/JuliaDiff/ForwardDiff.jl/blob/6a6443b754b0fcfb4d671c9a3d01776df801f498/src/dual.jl#L230-L244)
function Base.exp(::Type{ULogarithmic{R}}, d::Dual{T,V,N}) where {R<:Real,T,V,N}
x = ForwardDiff.value(d)
val = exp(ULogarithmic{R}, x)
deriv = exp(ULogarithmic{R}, x)
return ForwardDiff.dual_definition_retval(Val{T}(), val, deriv, partials(d))
end
function Base.exp(::Type{Logarithmic{R}}, d::Dual{T,V,N}) where {R<:Real,T,V,N}
x = ForwardDiff.value(d)
val = exp(Logarithmic{R}, x)
deriv = exp(Logarithmic{R}, x)
return ForwardDiff.dual_definition_retval(Val{T}(), val, deriv, partials(d))
end
function Base.exp(::Type{ULogarithmic}, d::Dual{T,V,N}) where {T,V,N}
return exp(ULogarithmic{V}, d)
end
function Base.exp(::Type{Logarithmic}, d::Dual{T,V,N}) where {T,V,N}
return exp(Logarithmic{V}, d)
end
# TODO: do we need more constructors?
end
| LogarithmicNumbers | https://github.com/cjdoris/LogarithmicNumbers.jl.git |
|
[
"MIT"
] | 1.4.0 | 427421c277f82c5749002c1b23cb1aac91beb0a8 | code | 15470 | """
A [logarithmic number system](https://en.wikipedia.org/wiki/Logarithmic_number_system).
Provides the signed [`Logarithmic`](@ref) and unsigned [`ULogarithmic`](@ref) types, which represent real numbers and positive real numbers respectively.
"""
module LogarithmicNumbers
using Random
export ULogarithmic, ULogFloat16, ULogFloat32, ULogFloat64
export Logarithmic, LogFloat16, LogFloat32, LogFloat64
### Types
"""
ULogarithmic(x)
Represents the positive real number `x` by storing its logarithm.
!!! tip
If you know `logx=log(x)` then use [`exp(ULogarithmic, logx)`](@ref exp(::Type{ULogarithmic},::Real)) instead.
"""
struct ULogarithmic{T} <: Real
log::T
Base.exp(::Type{ULogarithmic{T}}, x::T) where {T<:Real} = new{T}(x)
end
"""
Logarithmic(x)
Represents the real number `x` by storing its absolute value as a [`ULogarithmic`](@ref) and its sign bit.
!!! tip
If you know `logx=log(abs(x))` then use [`exp(Logarithmic, logx)`](@ref exp(::Type{Logarithmic},::Real)) instead.
"""
struct Logarithmic{T} <: Real
abs::ULogarithmic{T}
signbit::Bool
Logarithmic{T}(abs::ULogarithmic{T}, signbit::Bool=false) where {T<:Real} = new{T}(abs, signbit)
end
const ULogFloat16 = ULogarithmic{Float16}
const ULogFloat32 = ULogarithmic{Float32}
const ULogFloat64 = ULogarithmic{Float64}
const LogFloat16 = Logarithmic{Float16}
const LogFloat32 = Logarithmic{Float32}
const LogFloat64 = Logarithmic{Float64}
const AnyLogarithmic{T} = Union{ULogarithmic{T}, Logarithmic{T}}
### Constructors
function Base.exp(::Type{ULogarithmic{T}}, x::Real) where {T<:Real}
exp(ULogarithmic{T}, convert(T, x))
end
function Base.exp(::Type{ULogarithmic}, x::T) where {T<:Real}
exp(ULogarithmic{T}, x)
end
function Base.exp(::Type{Logarithmic{T}}, x::Real) where {T<:Real}
Logarithmic{T}(exp(ULogarithmic{T}, x))
end
function Base.exp(::Type{Logarithmic}, x::T) where {T<:Real}
exp(Logarithmic{T}, x)
end
uexp(x) = exp(ULogarithmic, x)
uexp(T,x) = exp(ULogarithmic{T}, x)
function ULogarithmic{T}(x::Real) where {T<:Real}
exp(ULogarithmic{T}, log(x))
end
function ULogarithmic{T}(x::ULogarithmic{T}) where {T<:Real}
x
end
function ULogarithmic(x::Real)
exp(ULogarithmic, log(x))
end
function ULogarithmic(x::ULogarithmic)
x
end
function Logarithmic{T}(x::Real) where {T<:Real}
Logarithmic{T}(ULogarithmic{T}(abs(x)), signbit(x))
end
function Logarithmic{T}(x::Logarithmic{T}) where {T<:Real}
x
end
function Logarithmic{T}(abs::ULogarithmic, signbit::Bool=false) where {T<:Real}
Logarithmic{T}(ULogarithmic{T}(abs), signbit)
end
function Logarithmic(x::Real)
Logarithmic(ULogarithmic(abs(x)), signbit(x))
end
function Logarithmic(abs::ULogarithmic{T}, signbit::Bool=false) where {T<:Real}
Logarithmic{T}(abs, signbit)
end
function Logarithmic(x::Logarithmic)
x
end
### float / big / signed / unsigned
function Base.float(x::AnyLogarithmic)
AbstractFloat(x)
end
function (::Type{T})(x::ULogarithmic) where {T<:AbstractFloat}
T(exp(float(x.log)))
end
function (::Type{T})(x::Logarithmic) where {T<:AbstractFloat}
y = float(x.abs)
x.signbit ? T(-y) : T(y)
end
function Base.big(x::ULogarithmic{T}) where {T}
uexp(big(x.log))
end
function Base.big(x::Logarithmic)
Logarithmic(big(x.abs), x.signbit)
end
function Base.unsigned(x::ULogarithmic)
x
end
function Base.unsigned(x::Logarithmic)
x.signbit && !iszero(x) && throw(DomainError(x))
x.abs
end
function Base.signed(x::ULogarithmic)
Logarithmic(x)
end
function Base.signed(x::Logarithmic)
x
end
### Type functions
function Base.float(::Type{T}) where {T<:AnyLogarithmic}
typeof(float(one(T)))
end
function Base.widen(::Type{ULogarithmic{T}}) where {T}
ULogarithmic{widen(T)}
end
function Base.widen(::Type{Logarithmic{T}}) where {T}
Logarithmic{widen(T)}
end
function Base.big(::Type{ULogarithmic{T}}) where {T}
ULogarithmic{big(T)}
end
function Base.big(::Type{Logarithmic{T}}) where {T}
Logarithmic{big(T)}
end
function Base.unsigned(::Type{ULogarithmic{T}}) where {T}
ULogarithmic{T}
end
function Base.unsigned(::Type{Logarithmic{T}}) where {T}
ULogarithmic{T}
end
function Base.signed(::Type{ULogarithmic{T}}) where {T}
Logarithmic{T}
end
function Base.signed(::Type{Logarithmic{T}}) where {T}
Logarithmic{T}
end
### Special values
function Base.zero(::Type{ULogarithmic{T}}) where {T}
uexp(T, -Inf)
end
function Base.zero(::Type{ULogarithmic})
uexp(-Inf)
end
function Base.zero(::Type{Logarithmic{T}}) where {T}
Logarithmic(zero(ULogarithmic{T}))
end
function Base.zero(::Type{Logarithmic})
Logarithmic(zero(ULogarithmic))
end
function Base.one(::Type{ULogarithmic{T}}) where {T}
uexp(T, zero(T))
end
function Base.one(::Type{ULogarithmic})
uexp(0.0)
end
function Base.one(::Type{Logarithmic{T}}) where {T}
Logarithmic(one(ULogarithmic{T}))
end
function Base.one(::Type{Logarithmic})
Logarithmic(one(ULogarithmic))
end
function Base.typemin(::Type{ULogarithmic{T}}) where {T}
uexp(typemin(T))
end
function Base.typemin(::Type{Logarithmic{T}}) where {T}
Logarithmic{T}(typemax(ULogarithmic{T}), true)
end
function Base.typemax(::Type{ULogarithmic{T}}) where {T}
uexp(typemax(T))
end
function Base.typemax(::Type{Logarithmic{T}}) where {T}
Logarithmic{T}(typemax(ULogarithmic{T}))
end
### Predicates
function Base.iszero(x::ULogarithmic)
isinf(x.log) && signbit(x.log)
end
function Base.iszero(x::Logarithmic)
iszero(x.abs)
end
function Base.isone(x::ULogarithmic)
iszero(x.log)
end
function Base.isone(x::Logarithmic)
isone(x.abs) && !x.signbit
end
function Base.isinf(x::ULogarithmic)
isinf(x.log) && !signbit(x.log)
end
function Base.isinf(x::Logarithmic)
isinf(x.abs)
end
function Base.isfinite(x::ULogarithmic)
isfinite(x.log) || signbit(x.log)
end
function Base.isfinite(x::Logarithmic)
isfinite(x.abs)
end
function Base.isnan(x::ULogarithmic)
isnan(x.log)
end
function Base.isnan(x::Logarithmic)
isnan(x.abs)
end
### Ordering
function Base.sign(x::ULogarithmic)
isnan(x) ? x : iszero(x) ? zero(x) : one(x)
end
function Base.sign(x::Logarithmic)
isnan(x) ? x : iszero(x) ? zero(x) : x.signbit ? -one(x) : one(x)
end
function Base.signbit(x::ULogarithmic)
false
end
function Base.signbit(x::Logarithmic)
x.signbit
end
function Base.abs(x::ULogarithmic)
x
end
function Base.abs(x::Logarithmic)
x.abs
end
function Base.:(==)(x::ULogarithmic, y::ULogarithmic)
x.log == y.log
end
function Base.:(==)(x::Logarithmic, y::Logarithmic)
(iszero(x) && iszero(y)) || (x.abs==y.abs && x.signbit==y.signbit)
end
function Base.isequal(x::ULogarithmic, y::ULogarithmic)
isequal(x.log, y.log)
end
function Base.isequal(x::Logarithmic, y::Logarithmic)
isequal(x.abs, y.abs) && isequal(x.signbit, y.signbit)
end
function Base.isapprox(x::ULogarithmic, y::ULogarithmic; kwargs...)
isapprox(Logarithmic(x), Logarithmic(y); kwargs...)
end
function Base.:(<)(x::ULogarithmic, y::ULogarithmic)
x.log < y.log
end
function Base.:(<)(x::Logarithmic, y::Logarithmic)
if isnan(x) || isnan(y)
false
elseif x.signbit
if y.signbit
y.abs < x.abs
else
!iszero(x) || !iszero(y)
end
else
if y.signbit
false
else
x.abs < y.abs
end
end
end
function Base.:(β€)(x::ULogarithmic, y::ULogarithmic)
x.log β€ y.log
end
function Base.:(β€)(x::Logarithmic, y::Logarithmic)
if isnan(x) || isnan(y)
false
elseif x.signbit
if y.signbit
y.abs β€ x.abs
else
true
end
else
if y.signbit
iszero(x) && iszero(y)
else
x.abs β€ y.abs
end
end
end
function Base.cmp(x::ULogarithmic, y::ULogarithmic)
cmp(x.log, y.log)
end
function Base.isless(x::ULogarithmic, y::ULogarithmic)
isless(x.log, y.log)
end
function Base.isless(x::Logarithmic, y::Logarithmic)
if x.signbit
if y.signbit
isless(y.abs, x.abs)
else
true
end
else
if y.signbit
false
else
isless(x.abs, y.abs)
end
end
end
function Base.nextfloat(x::ULogarithmic)
uexp(nextfloat(x.log))
end
function Base.nextfloat(x::Logarithmic)
if x.signbit && !iszero(x)
Logarithmic(prevfloat(x.abs), true)
else
Logarithmic(nextfloat(x.abs))
end
end
function Base.prevfloat(x::ULogarithmic)
uexp(prevfloat(x.log))
end
function Base.prevfloat(x::Logarithmic)
if x.signbit || iszero(x)
Logarithmic(nextfloat(x.abs), true)
else
Logarithmic(prevfloat(x.abs))
end
end
### Promotion
_promote_rule(::Type, ::Type) = Union{}
for (i, A) in enumerate([ULogarithmic, Logarithmic])
for (j, B) in enumerate([ULogarithmic, Logarithmic])
C = i > j ? A : B
@eval begin
_promote_rule(::Type{$A}, ::Type{$B}) = $C
_promote_rule(::Type{$A}, ::Type{$B{T}}) where {T} = $C{T}
_promote_rule(::Type{$A{S}}, ::Type{$B}) where {S} = $C{S}
_promote_rule(::Type{$A{S}}, ::Type{$B{T}}) where {S,T} = $C{promote_type(S,T)}
end
end
end
function Base.promote_rule(::Type{T}, ::Type{R}) where {T<:AnyLogarithmic, R<:AnyLogarithmic}
_promote_rule(T, R)
end
@generated function Base.promote_rule(::Type{T}, ::Type{R}) where {T<:AnyLogarithmic, R<:Real}
# TODO: Think about this some more. Always return ULogarithmic? Always Logarithmic?
isunsigned = try
typemin(R) β₯ 0
catch
false
end
L = isunsigned ? ULogarithmic : Logarithmic
R2 = try
typeof(L(one(R)))
catch
Union{}
end
promote_type(T, R2)
end
# override the default promote_rule(BigFloat, <:Real) -> BigFloat
# which contradicts promote_rule(Logarithmic{BigFloat}, BigFloat) -> Logarithmic{BigFloat}
# and causes a stack overflow
Base.promote_rule(::Type{BigFloat}, ::Type{T}) where {T<:AnyLogarithmic} = promote_rule(ULogarithmic{BigFloat}, T)
### Arithmetic
Base.:(+)(x::AnyLogarithmic) = x
Base.:(-)(x::ULogarithmic) = Logarithmic(x, true)
Base.:(-)(x::Logarithmic) = Logarithmic(x.abs, !x.signbit)
function Base.:(+)(x::T, y::T) where {T<:ULogarithmic}
if x.log == y.log
uexp(x.log + log1p(exp(zero(y.log) - zero(x.log))))
elseif x.log β₯ y.log
uexp(x.log + log1p(exp(y.log - x.log)))
else
uexp(y.log + log1p(exp(x.log - y.log)))
end
end
function Base.:(+)(x::T, y::T) where {T<:Logarithmic}
if x.signbit == y.signbit
Logarithmic(x.abs + y.abs, x.signbit)
elseif x.abs β₯ y.abs
Logarithmic(x.abs - y.abs, x.signbit)
else
Logarithmic(y.abs - x.abs, y.signbit)
end
end
function Base.:(-)(x::T, y::T) where {T<:ULogarithmic}
if x.log < y.log
throw(DomainError((x, y), "difference is negative"))
else
d = y.log - x.log
if isnan(d) && iszero(x) && iszero(y)
d = zero(y.log) - zero(x.log)
end
if d < -1
c = log1p(-exp(d))
else
# accurate when d is small
# e.g. exp(1e-100) - exp(-1e-100) β exp(-229.56536)
c = log(-expm1(d))
end
uexp(x.log + c)
end
end
function Base.:(-)(x::T, y::T) where {T<:Logarithmic}
if x.signbit == y.signbit
if x.abs β₯ y.abs
Logarithmic(x.abs - y.abs, x.signbit)
else
Logarithmic(y.abs - x.abs, !y.signbit)
end
else
Logarithmic(x.abs + y.abs, x.signbit)
end
end
function Base.:(*)(x::T, y::T) where {T<:ULogarithmic}
uexp(x.log + y.log)
end
function Base.:(*)(x::T, y::T) where {T<:Logarithmic}
Logarithmic(x.abs * y.abs, x.signbit β» y.signbit)
end
function Base.:(/)(x::T, y::T) where {T<:ULogarithmic}
uexp(x.log - y.log)
end
function Base.:(/)(x::T, y::T) where {T<:Logarithmic}
Logarithmic(x.abs / y.abs, x.signbit β» y.signbit)
end
Base.:(^)(x::ULogarithmic, n::Real) = _pow(x, n)
Base.:(^)(x::ULogarithmic, n::Rational) = _pow(x, n)
Base.:(^)(x::ULogarithmic, n::Integer) = _pow(x, n)
function _pow(x::ULogarithmic, n::Real)
if n == 0
uexp(zero(x.log) * n)
else
uexp(x.log * n)
end
end
Base.:(^)(x::Logarithmic, n::Real) = _pow(x, n)
Base.:(^)(x::Logarithmic, n::Rational) = _pow(x, n)
Base.:(^)(x::Logarithmic, n::Integer) = _pow(x, n)
function _pow(x::Logarithmic, n::Integer)
Logarithmic(x.abs^n, x.signbit & isodd(n))
end
function _pow(x::Logarithmic, n::Real)
x.signbit && !iszero(x) && throw(DomainError(x))
Logarithmic(x.abs^n)
end
function Base.inv(x::ULogarithmic)
uexp(-x.log)
end
function Base.inv(x::Logarithmic)
Logarithmic(inv(x.abs), x.signbit)
end
function Base.log(x::ULogarithmic)
x.log
end
function Base.log(x::Logarithmic)
x.signbit && !iszero(x) && throw(DomainError(x))
log(x.abs)
end
function Base.log2(x::AnyLogarithmic)
logx = log(x)
log2 = log(oftype(logx, 2))
logx / log2
end
function Base.log10(x::AnyLogarithmic)
logx = log(x)
log10 = log(oftype(logx, 10))
logx / log10
end
function Base.log1p(x::AnyLogarithmic)
log(one(x) + x)
end
function Base.exp(x::ULogarithmic)
uexp(exp(x.log))
end
function Base.exp(x::Logarithmic)
x.signbit ? inv(exp(x.abs)) : exp(x.abs)
end
function Base.sqrt(x::ULogarithmic)
uexp(x.log / oftype(x.log, 2))
end
function Base.sqrt(x::Logarithmic)
if !x.signbit || iszero(x)
Logarithmic(sqrt(x.abs))
else
throw(DomainError(x))
end
end
function Base.cbrt(x::ULogarithmic)
uexp(x.log / oftype(x.log, 3))
end
function Base.cbrt(x::Logarithmic)
Logarithmic(cbrt(x.abs), x.signbit)
end
if hasproperty(Base, :fourthroot)
# fourthroot was introduced in julia 1.10
function Base.fourthroot(x::ULogarithmic)
uexp(x.log / oftype(x.log, 4))
end
function Base.fourthroot(x::Logarithmic)
if !x.signbit || iszero(x)
Logarithmic(fourthroot(x.abs))
else
throw(DomainError(x))
end
end
end
### Hash
const _HASH = hash(ULogarithmic)
function Base.hash(x::ULogarithmic, h::UInt)
hash(x.log, hash(_HASH, h))
end
function Base.hash(x::Logarithmic, h::UInt)
# hash the same as ULogarithmic when signbit==false
# TODO: hash special values (-Inf, -1, 0, 1, Inf) the same as Float64?
hash(x.abs, x.signbit ? hash(_HASH, h) : h)
end
### Random
function Base.rand(rng::AbstractRNG, ::Random.SamplerType{E}) where {T<:AbstractFloat, E<:AnyLogarithmic{T}}
exp(E, -randexp(rng, T))
end
function Base.rand(rng::AbstractRNG, ::Random.SamplerType{E}) where {E<:AnyLogarithmic}
exp(E, -randexp(rng))
end
### IO
function Base.show(io::IO, x::ULogarithmic)
print(io, "exp(")
show(io, x.log)
print(io, ")")
end
function Base.show(io::IO, x::Logarithmic)
print(io, x.signbit ? "-" : "+")
show(io, x.abs)
end
function Base.write(io::IO, x::ULogarithmic)
write(io, x.log)
end
function Base.write(io::IO, x::Logarithmic)
write(io, x.abs, x.signbit)
end
function Base.read(io::IO, ::Type{ULogarithmic{T}}) where {T}
uexp(T, read(io, T))
end
function Base.read(io::IO, ::Type{Logarithmic{T}}) where {T}
abs = read(io, ULogarithmic{T})
signbit = read(io, Bool)
Logarithmic{T}(abs, signbit)
end
end
| LogarithmicNumbers | https://github.com/cjdoris/LogarithmicNumbers.jl.git |
|
[
"MIT"
] | 1.4.0 | 427421c277f82c5749002c1b23cb1aac91beb0a8 | code | 495 | using ForwardDiff: derivative, gradient
using LogarithmicNumbers
using Test
f(x) = log(exp(x) * x)
g1(x) = log(exp(ULogarithmic, x) * x)
g2(x) = log(exp(ULogFloat64, x) * x)
h1(x) = log(exp(Logarithmic, x) * x)
h2(x) = log(exp(LogFloat64, x) * x)
x = 1000
d = 1 + inv(x)
@test isnan(derivative(f, x))
@test derivative(f, LogFloat64(x)) β d
@test derivative(f, ULogFloat64(x)) β d
@test derivative(g1, x) β d
@test derivative(g2, x) β d
@test derivative(h1, x) β d
@test derivative(h2, x) β d
| LogarithmicNumbers | https://github.com/cjdoris/LogarithmicNumbers.jl.git |
|
[
"MIT"
] | 1.4.0 | 427421c277f82c5749002c1b23cb1aac91beb0a8 | code | 20645 | using LogarithmicNumbers, Test, Aqua
function _approx(x,y)
ans = isapprox(x, y, atol=1e-3) || (isnan(x) && isnan(y))
ans || @show x y
ans
end
# use these to check for type stability
_exp(args...) = @inferred exp(args...)
_log(args...) = @inferred log(args...)
_add(args...) = @inferred +(args...)
_sub(args...) = @inferred -(args...)
_mul(args...) = @inferred *(args...)
_div(args...) = @inferred /(args...)
_pow(args...) = @inferred ^(args...)
_sqrt(args...) = @inferred sqrt(args...)
_cbrt(args...) = @inferred cbrt(args...)
_fourthroot(args...) = @inferred fourthroot(args...)
_float(args...) = @inferred float(args...)
_inv(args...) = @inferred inv(args...)
_prod(args...) = @inferred prod(args...)
_sum(args...) = @inferred sum(args...)
# sample values
vals = Any[-Inf, -20, -20.0, -2, -2.0, -1, -1.0, -0.5, 0, 0.0, 0.5, 1, 1.0, 2, 2.0, 20, 20.0, Inf, NaN]
# sample vectors
vecs = (
Float64[x for x in vals if x isa Float64],
Float64[x for x in vals if x isa Float64 && !isinf(x)],
Float64[x for x in vals if x isa Float64 && !iszero(x)],
Float64[x for x in vals if x isa Float64 && !isinf(x) && x β₯ 0],
Int[x for x in vals if x isa Int],
Int[x for x in vals if x isa Int && x β₯ 0],
)
atypes = (ULogarithmic, Logarithmic)
atypes2 = (ULogarithmic, ULogFloat32, Logarithmic, LogFloat32)
@testset verbose=true "LogarithmicNumbers" begin
@testset verbose=true "Aqua" begin
Aqua.test_all(LogarithmicNumbers)
end
@testset "types" begin
@test @isdefined ULogarithmic
@test @isdefined Logarithmic
@test @isdefined ULogFloat16
@test @isdefined ULogFloat32
@test @isdefined ULogFloat64
@test @isdefined LogFloat16
@test @isdefined LogFloat32
@test @isdefined LogFloat64
end
@testset "exp" begin
for A in atypes2, x in vals
y = _exp(A, x)
if A in (ULogarithmic, Logarithmic)
@test y isa A{typeof(x)}
else
@test y isa A
end
end
end
@testset "construct" begin
for A in atypes2, x in vals
if A <: ULogarithmic && x < 0
@test_throws DomainError @inferred A(x)
else
y = @inferred A(x)
@test y isa A
if A <: ULogarithmic
@test _approx(y.log, log(x))
else
@test _approx(y.abs.log, log(abs(x)))
@test _approx(y.signbit, signbit(x))
end
end
end
@test ULogFloat64(ULogFloat64(0)) === ULogFloat64(0)
@test ULogFloat64(ULogFloat32(0)) === ULogFloat64(0)
@test ULogarithmic(ULogFloat32(0)) === ULogFloat32(0)
@test LogFloat64(LogFloat64(0)) === LogFloat64(0)
@test LogFloat64(LogFloat32(0)) === LogFloat64(0)
@test Logarithmic(LogFloat32(0)) === LogFloat32(0)
@test LogFloat64(ULogarithmic(0)) == LogFloat64(0)
@test Logarithmic(ULogarithmic(0)) === Logarithmic(0)
end
@testset "float" begin
for A in atypes2, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
@test _float(y) isa AbstractFloat
@test _approx(_float(y), x)
@test @inferred(AbstractFloat(y)) isa AbstractFloat
@test _approx(@inferred(AbstractFloat(y)), x)
@test @inferred(Float64(y)) isa Float64
@test _approx(@inferred(Float64(y)), x)
@test @inferred(Float32(y)) isa Float32
@test _approx(@inferred(Float32(y)), x)
end
end
@testset "promote" begin
for A1 in atypes, A2 in atypes, T1 in (nothing, Int16, Int32, Float32, Float64, BigFloat), T2 in (nothing, Int16, Float32, BigFloat)
A3 = A1 <: Logarithmic || A2 <: Logarithmic ? Logarithmic : ULogarithmic
T3 = T1 === nothing ? T2 === nothing ? nothing : T2 : T2 === nothing ? T1 : promote_type(T1, T2)
B1 = T1 === nothing ? A1 : A1{T1}
B2 = T2 === nothing ? A2 : A2{T2}
B3 = T3 === nothing ? A3 : A3{T3}
@test promote_type(B1, B2) == B3
end
for A1 in atypes, T1 in (nothing, Int16, Int32, Float32, Float64, BigFloat), T2 in (Int16, Float32, BigFloat)
T3 = float(T2)
B1 = T1 === nothing ? A1 : A1{T1}
B3 = Logarithmic{T1 === nothing ? T3 : promote_type(T1, T3)}
@test promote_type(B1, T2) == B3
end
end
@testset "big" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
z = @inferred big(y)
B = A <: ULogarithmic ? ULogarithmic{BigFloat} : Logarithmic{BigFloat}
@test z isa B
@test _approx(_float(z), x)
end
end
@testset "signed" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
@test signed(y) isa Logarithmic
if x < 0
@test_throws DomainError @inferred(unsigned(y))
else
@test @inferred(unsigned(y)) isa ULogarithmic
end
end
end
@testset "type functions" begin
@testset "float" begin
@test float(ULogFloat64) == Float64
@test float(ULogFloat32) == Float32
@test float(LogFloat64) == Float64
@test float(LogFloat32) == Float32
end
@testset "widen" begin
@test widen(ULogFloat64) == ULogarithmic{BigFloat}
@test widen(ULogFloat32) == ULogFloat64
@test widen(LogFloat64) == Logarithmic{BigFloat}
@test widen(LogFloat32) == LogFloat64
end
@testset "big" begin
@test big(ULogFloat64) == ULogarithmic{BigFloat}
@test big(ULogFloat32) == ULogarithmic{BigFloat}
@test big(LogFloat64) == Logarithmic{BigFloat}
@test big(LogFloat32) == Logarithmic{BigFloat}
end
@testset "unsigned" begin
@test unsigned(ULogFloat64) == ULogFloat64
@test unsigned(ULogFloat32) == ULogFloat32
@test unsigned(LogFloat64) == ULogFloat64
@test unsigned(LogFloat32) == ULogFloat32
end
@testset "signed" begin
@test signed(ULogFloat64) == LogFloat64
@test signed(ULogFloat32) == LogFloat32
@test signed(LogFloat64) == LogFloat64
@test signed(LogFloat32) == LogFloat32
end
end
@testset "special values" begin
@testset "zero" begin
@test zero(ULogarithmic) === exp(ULogarithmic, -Inf)
@test zero(ULogFloat64) === exp(ULogFloat64, -Inf)
@test zero(ULogFloat32) === exp(ULogFloat32, -Inf)
@test zero(Logarithmic) === Logarithmic(zero(ULogarithmic))
@test zero(LogFloat64) === LogFloat64(zero(ULogFloat64))
@test zero(LogFloat32) === LogFloat32(zero(ULogFloat32))
end
@testset "one" begin
@test one(ULogarithmic) === exp(ULogarithmic, 0.0)
@test one(ULogFloat64) === exp(ULogFloat64, 0.0)
@test one(ULogFloat32) === exp(ULogFloat32, 0.0)
@test one(Logarithmic) === Logarithmic(one(ULogarithmic))
@test one(LogFloat64) === LogFloat64(one(ULogFloat64))
@test one(LogFloat32) === LogFloat32(one(ULogFloat32))
end
@testset "typemin" begin
@test typemin(ULogFloat64) === ULogFloat64(0.0)
@test typemin(ULogFloat32) === ULogFloat32(0.0)
@test typemin(LogFloat64) === LogFloat64(-Inf)
@test typemin(LogFloat32) === LogFloat32(-Inf)
end
@testset "typemax" begin
@test typemax(ULogFloat64) === ULogFloat64(Inf)
@test typemax(ULogFloat32) === ULogFloat32(Inf)
@test typemax(LogFloat64) === LogFloat64(Inf)
@test typemax(LogFloat32) === LogFloat32(Inf)
end
end
@testset "predicates" begin
@testset "iszero" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
@test @inferred(iszero(y)) == iszero(x)
end
end
@testset "isone" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
@test @inferred(isone(y)) == isone(x)
end
end
@testset "isinf" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
@test @inferred(isinf(y)) == isinf(x)
end
end
@testset "isfinite" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
@test @inferred(isfinite(y)) == isfinite(x)
end
end
@testset "isnan" begin
for A in atypes, x in (vals..., NaN, -NaN)
A <: ULogarithmic && x < 0 && continue
y = A(x)
@test @inferred(isnan(y)) == isnan(x)
end
end
end
@testset "ordering" begin
@testset "sign" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
@test _float(@inferred(sign(y))) === float(sign(x))
end
end
@testset "signbit" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
@test @inferred(signbit(y)) == signbit(x)
end
end
@testset "abs" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
@test _approx(_float(@inferred(abs(y))), abs(x))
end
end
@testset "==" begin
for A in atypes, x1 in vals, x2 in vals
A <: ULogarithmic && (x1 < 0 || x2 < 0) && continue
y1 = A(x1)
y2 = A(x2)
@test @inferred(y1 == y2) == (x1 == x2)
end
end
@testset "isequal" begin
for A in atypes, x1 in vals, x2 in vals
A <: ULogarithmic && (x1 < 0 || x2 < 0) && continue
y1 = A(x1)
y2 = A(x2)
@test @inferred(isequal(y1, y2)) == isequal(x1, x2)
end
end
@testset "isapprox" begin
for A in atypes, x1 in vals
A <: ULogarithmic && x1 < 0 && continue
x2 = x1 + 1e-12
y1 = A(x1)
y2 = A(x2)
@test @inferred(isapprox(y1, y2; atol=1e-11)) == isapprox(x1,x2; atol=1e-11)
end
end
@testset "<" begin
for A in atypes, x1 in vals, x2 in vals
A <: ULogarithmic && (x1 < 0 || x2 < 0) && continue
y1 = A(x1)
y2 = A(x2)
@test @inferred(y1 < y2) == (x1 < x2)
end
end
@testset "β€" begin
for A in atypes, x1 in vals, x2 in vals
A <: ULogarithmic && (x1 < 0 || x2 < 0) && continue
y1 = A(x1)
y2 = A(x2)
@test @inferred(y1 β€ y2) == (x1 β€ x2)
end
end
@testset "cmp" begin
for A in atypes, x1 in vals, x2 in vals
A <: ULogarithmic && (x1 < 0 || x2 < 0) && continue
y1 = A(x1)
y2 = A(x2)
@test @inferred(cmp(y1, y2)) == cmp(x1, x2)
end
end
@testset "isless" begin
for A in atypes, x1 in vals, x2 in vals
A <: ULogarithmic && (x1 < 0 || x2 < 0) && continue
y1 = A(x1)
y2 = A(x2)
@test @inferred(isless(y1, y2)) == isless(x1, x2)
end
end
@testset "nextfloat" begin
for A in atypes, x in vals
x isa AbstractFloat || continue
@test @inferred(nextfloat(_exp(A, x))) === _exp(A, nextfloat(x))
end
end
@testset "prevfloat" begin
for A in atypes, x in vals
x isa AbstractFloat || continue
if A <: Logarithmic && x == -Inf
@test @inferred(prevfloat(_exp(A, x))) === -_exp(A, nextfloat(-Inf))
else
@test @inferred(prevfloat(_exp(A, x))) === _exp(A, prevfloat(x))
end
end
end
end
@testset "arithmetic" begin
@testset "pos" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
@test _approx(_float(+A(x)), float(x))
end
end
@testset "neg" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
@test _approx(_float(-A(x)), float(-x))
end
end
@testset "add" begin
for A in atypes, x in vals, y in vals
A <: ULogarithmic && (x < 0 || y < 0) && continue
@test _approx(_float(_add(A(x), A(y))), x+y)
end
# check accuracy
# log(exp(1000) + exp(1001)) = 1000 + log(1 + exp(1))
@test _approx(log(exp(Logarithmic, 1000) + exp(Logarithmic, 1001)), 1001.313261)
end
@testset "sum" begin
for A in atypes, xs in vecs
A <: ULogarithmic && any(x<0 for x in xs) && continue
if eltype(xs) <: AbstractFloat
@test _approx(_float(_sum(map(x->A(x),xs))), sum(xs))
else
# sum is not type-stable because typeof(xs[1]+xs[2]) != typeof(xs[1]).
# hence this test is the same as above but without the stability check.
# we don't promise type stability unless the base type is a float, so
# this isn't a broken test.
@test _approx(_float(sum(map(x->A(x),xs))), sum(xs))
end
end
end
@testset "sub" begin
for A in atypes, x in vals, y in vals
A <: ULogarithmic && (x < 0 || y < 0) && continue
if A <: ULogarithmic && x < y
@test_throws DomainError _float(_sub(A(x), A(y)))
else
@test _approx(_float(_sub(A(x), A(y))), x-y)
end
end
# check accuracy
# log(exp(1001) - exp(1000)) == 1000 + log(exp(1) - 1)
@test _approx(log(exp(Logarithmic, 1001) - exp(Logarithmic, 1000)), 1000.541324)
# log(exp(x) - exp(-x)) == x + log(1 - exp(-2x)) == x + log(-expm1(-2x))
@test _approx(log(exp(Logarithmic, 1e-100) - exp(Logarithmic, -1e-100)), -229.565362)
end
@testset "mul" begin
for A in atypes, x in vals, y in vals
A <: ULogarithmic && (x < 0 || y < 0) && continue
@test _approx(_float(_mul(A(x), A(y))), x * y)
end
end
@testset "prod" begin
for A in atypes, xs in vecs
A <: ULogarithmic && any(x<0 for x in xs) && continue
@test _approx(_float(_prod(map(x->A(x), xs))), prod(xs))
end
end
@testset "div" begin
for A in atypes, x in vals, y in vals
A <: ULogarithmic && (x < 0 || y < 0) && continue
@test _approx(_float(_div(A(x), A(y))), x / y)
end
end
@testset "pow" begin
for A in atypes, x in vals, n in (-2,-1,0,1,2,-1.1,0.0,2.3)
x < 0 && continue
@test _approx(_float(_pow(A(x), n)), float(x)^n)
end
end
@testset "sqrt" begin
for A in atypes, x in vals
if x < 0
A <: ULogarithmic && continue
@test_throws DomainError _sqrt(A(x))
else
@test _approx(_float(_sqrt(A(x))), sqrt(float(x)))
end
end
end
@testset "cbrt" begin
for A in atypes, x in vals
x < 0 && A <: ULogarithmic && continue
@test _approx(_float(_cbrt(A(x))), cbrt(float(x)))
end
end
@testset "fourthroot" begin
if hasproperty(Base, :fourthroot)
for A in atypes, x in vals
if x < 0
A <: ULogarithmic && continue
@test_throws DomainError _fourthroot(A(x))
else
@test _approx(_float(_fourthroot(A(x))), fourthroot(float(x)))
end
end
end
end
@testset "inv" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
@test _approx(_float(_inv(A(x))), inv(x))
end
end
@testset "log" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
@test _log(_exp(A, x)) === x
if x < 0
@test_throws DomainError _log(A(x))
else
@test _approx(_log(A(x)), log(x))
end
end
@test _approx(log(exp(Logarithmic, 1000)), 1000)
end
@testset "log2" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
if x < 0
@test_throws DomainError log2(A(x))
else
@test _approx(log2(A(x)), log2(x))
end
end
# log2(exp(x)) = x / log(2)
@test _approx(log2(exp(Logarithmic, 1000)), 1442.695040)
end
@testset "log10" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
if x < 0
@test_throws DomainError log10(A(x))
else
@test _approx(log10(A(x)), log10(x))
end
end
# log10(exp(x)) = x / log(10)
@test _approx(log10(exp(Logarithmic, 1000)), 434.294481)
end
@testset "log1p" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
if x < -1
@test_throws DomainError log1p(A(x))
else
@test _approx(log1p(A(x)), log1p(x))
end
end
@test _approx(log1p(exp(Logarithmic, 1000)), 1000.0)
end
@testset "exp" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
@test _approx(_float(exp(A(x))), exp(x))
end
end
end
@testset "hash" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
@test hash(y) isa UInt
end
for x in vals
x < 0 && continue
y1 = ULogarithmic(x)
y2 = Logarithmic(x)
@test hash(y1) == hash(y2)
@test hash(-y1) == hash(-y2)
@test hash(y1) != hash(-y1)
end
end
@testset "random" begin
for A in atypes2
xs = rand(A, 1000)
@test all(x isa A for x in xs)
@test all(0 β€ x β€ 1 for x in xs)
end
end
@testset "IO" begin
@testset "show" begin
@test repr(_exp(ULogarithmic, -12)) == "exp(-12)"
@test repr(_exp(Logarithmic, -34)) == "+exp(-34)"
@test repr(-_exp(Logarithmic, -45)) == "-exp(-45)"
end
@testset "read / write" begin
for A in atypes, x in vals
A <: ULogarithmic && x < 0 && continue
y = A(x)
io = IOBuffer()
write(io, y)
seekstart(io)
z = read(io, typeof(y))
@test y === z
end
end
end
@testset "ForwardDiff" begin
if VERSION >= v"1.9"
include("forwarddiff.jl")
end
end
end
| LogarithmicNumbers | https://github.com/cjdoris/LogarithmicNumbers.jl.git |
|
[
"MIT"
] | 1.4.0 | 427421c277f82c5749002c1b23cb1aac91beb0a8 | docs | 3724 | # LogarithmicNumbers.jl
[](https://www.repostatus.org/#active)
[](https://github.com/cjdoris/LogarithmicNumbers.jl/actions?query=workflow%3ATests)
[](https://codecov.io/gh/cjdoris/LogarithmicNumbers.jl)
A [**logarithmic number system**](https://en.wikipedia.org/wiki/Logarithmic_number_system)
for Julia.
Provides the signed `Logarithmic` and unsigned `ULogarithmic` types for representing real
numbers on a logarithmic scale.
This is useful when numbers are too big or small to fit accurately into a `Float64` and you
only really care about magnitude.
For example, it can be useful to represent probabilities in this form, and you don't need to
worry about getting zero when multiplying many of them together.
## Installation
```
pkg> add LogarithmicNumbers
```
## Example
```julia
julia> using LogarithmicNumbers
julia> ULogarithmic(2.7)
exp(0.9932517730102834)
julia> float(ans)
2.7
julia> x = exp(ULogarithmic, 1000) - exp(ULogarithmic, 998)
exp(999.8545865421312)
julia> float(x) # overflows
Inf
julia> log(x)
999.8545865421312
```
## Documentation
### Exported types
* `ULogarithmic{T}` represents a non-negative real number by its logarithm of type `T`.
* `Logarithmic{T}` represents a real number by its absolute value as a `ULogarithmic{T}` and
a sign bit.
* `LogFloat64` is an alias for `Logarithmic{Float64}`. There are also `ULogFloat16`,
`ULogFloat32`, `ULogFloat64`, `LogFloat16`, and `LogFloat32`.
### Constructors
* `ULogarithmic(x)` and `Logarithmic(x)` represent the number `x`.
* `exp(ULogarithmic, logx)` represents `exp(logx)`, and `logx` can be huge. Use this when
you already know the logarithm `logx` of your number `x`.
### Functions in Base
* **Arithmetic:** `+`, `-`, `*`, `/`, `^`, `inv`, `prod`, `sum`, `sqrt`, `cbrt`, `fourthroot`.
* **Ordering:** `==`, `<`, `β€`, `cmp`, `isless`, `isequal`, `sign`, `signbit`, `abs`.
* **Logarithm:** `log`, `log2`, `log10`, `log1p`. These are returned as the base (non-logarithmic) type.
* **Conversion:** `float`, `unsigned`, `signed`, `widen`, `big`. These also operate on types.
* **Special values:** `zero`, `one`, `typemin`, `typemax`.
* **Predicates:** `iszero`, `isone`, `isinf`, `isfinite`, `isnan`.
* **IO:** `show`, `write`, `read`.
* **Random:** `rand(ULogarithmic)` is a random number in the unit interval.
* **Misc:** `nextfloat`, `prevfloat`, `hash`.
* **Note:** Any functions not mentioned here might be inaccurate.
### Interoperability with other packages
It is natural to use this package in conjunction with other packages which return
logarithms. The general pattern is that you can use `exp(ULogarithmic, logfunc(args...))`
instead of `func(args...)` to get the answer as a logarithmic number. Here are some
possibilities for `func`:
- [StatsFuns.jl](https://github.com/JuliaStats/StatsFuns.jl):
`normpdf`, `normcdf`, `normccdf`, plus equivalents for other distributions.
- [Distributions.jl](https://github.com/JuliaStats/Distributions.jl):
`pdf`, `cdf`, `ccdf`.
- [SpecialFunctions.jl](https://github.com/JuliaMath/SpecialFunctions.jl):
`gamma`, `factorial`, `beta`, `erfc`, `erfcx`.
#### ForwardDiff.jl
On Julia 1.9+, if you load [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl), you should be allowed to compute
- derivatives of functions involving `exp(Logarithmic, x)`
- derivatives of functions evaluated at `Logarithmic(x)`
| LogarithmicNumbers | https://github.com/cjdoris/LogarithmicNumbers.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 96 | using Pkg
Pkg.add(["Revise", "TestEnv", "JuliaFormatter"])
Pkg.activate(".")
Pkg.instantiate()
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 583 | using BenchmarkTools
using Random: seed!, default_rng
using ForwardDiff, Zygote
using TaylorSeries: Taylor1
using TaylorDiff
rng = default_rng()
seed!(rng, 19260817)
using Logging
Logging.disable_logging(Logging.Warn)
include("groups/scalar.jl")
include("groups/mlp.jl")
include("groups/taylor_expansion.jl")
include("groups/pinn.jl")
scalar = create_benchmark_scalar_function(sin, 0.1)
mlp = create_benchmark_mlp((2, 16), [2.0, 3.0], [1.0, 1.0])
const SUITE = BenchmarkGroup("scalar" => scalar,
"mlp" => mlp,
"taylor_expansion" => taylor_expansion,
"pinn" => pinn)
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 1189 | using Pkg
Pkg.instantiate()
using TaylorDiff
using BenchmarkTools, PkgBenchmark
using BenchmarkTools: Trial, TrialEstimate, Parameters
import JSON: lower, json
using Dates
using HTTP: put
dict(x) = Dict(name => lower(getfield(x, name)) for name in fieldnames(typeof(x)))
lower(results::BenchmarkResults) = dict(results)
function lower(group::BenchmarkGroup)
Dict(:tags => group.tags,
:data => [Dict(lower(value)..., "name" => key) for (key, value) in group.data])
end
lower(trial::Trial) = lower(minimum(trial))
lower(estimate::TrialEstimate) = dict(estimate)
lower(parameters::Parameters) = dict(parameters)
getenv(name::String) = String(strip(ENV[name]))
body = Dict("name" => "TaylorDiff.jl", "datetime" => now())
if "BUILDKITE" in keys(ENV)
body["commit"] = getenv("BUILDKITE_COMMIT")
body["branch"] = getenv("BUILDKITE_BRANCH")
body["tag"] = getenv("BUILDKITE_TAG")
else
body["commit"] = "abcdef123456"
body["branch"] = "dummy"
end
(; benchmarkgroup, benchmarkconfig) = benchmarkpkg(TaylorDiff)
body["config"] = benchmarkconfig
body["result"] = lower(benchmarkgroup)[:data]
put("https://benchmark-data.tansongchen.workers.dev"; body = json(body))
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 1108 | function create_benchmark_mlp(mlp_conf::Tuple{Int, Int}, x::Vector{T},
l::Vector{T}) where {T <: Number}
input, hidden = mlp_conf
Wβ, Wβ, bβ, bβ = rand(hidden, input), rand(1, hidden), rand(hidden), rand(1)
Ο = exp
mlp(x) = first(Wβ * Ο.(Wβ * x + bβ) + bβ)
f1 = z -> ForwardDiff.derivative(t -> mlp(x + t * l), z)
f2 = x -> ForwardDiff.derivative(f1, x)
f3 = x -> ForwardDiff.derivative(f2, x)
f4 = x -> ForwardDiff.derivative(f3, x)
f5 = x -> ForwardDiff.derivative(f4, x)
f6 = x -> ForwardDiff.derivative(f5, x)
f7 = x -> ForwardDiff.derivative(f6, x)
functions = Function[f1, f2, f3, f4, f5, f6, f7]
forwarddiff, taylordiff = BenchmarkGroup(), BenchmarkGroup()
for (index, func) in enumerate(functions)
forwarddiff[index] = @benchmarkable $func(0)
end
Ns = [Val{order + 1}() for order in 1:7]
for (index, N) in enumerate(Ns)
taylordiff[index] = @benchmarkable derivative($mlp, $x, $l, $N)
end
return BenchmarkGroup(["vector"],
"forwarddiff" => forwarddiff,
"taylordiff" => taylordiff)
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 1310 | using Lux, Zygote
const input = 2
const hidden = 16
model = Chain(Dense(input => hidden, Lux.relu),
Dense(hidden => hidden, Lux.relu),
Dense(hidden => 1),
first)
ps, st = Lux.setup(rng, model)
trial(model, x) = x[1] * (1 - x[1]) * x[2] * (1 - x[2]) * model(x, ps, st)[1]
x = rand(Float32, input)
trial(model, x)
function loss_by_finitediff(model, x)
Ξ΅ = cbrt(eps(Float32))
Ξ΅β = [Ξ΅, 0]
Ξ΅β = [0, Ξ΅]
error = (trial(model, x + Ξ΅β) + trial(model, x - Ξ΅β) + trial(model, x + Ξ΅β) +
trial(model, x - Ξ΅β) - 4 * trial(model, x)) /
Ξ΅^2 + sin(Ο * x[1]) * sin(Ο * x[2])
abs2(error)
end
function loss_by_taylordiff(model, x)
f(x) = trial(model, x)
error = derivative(f, x, Float32[1, 0], Val(3)) +
derivative(f, x, Float32[0, 1], Val(3)) +
sin(Ο * x[1]) * sin(Ο * x[2])
abs2(error)
end
pinn_t = BenchmarkGroup("primal" => (@benchmarkable loss_by_taylordiff($model, $x)),
"gradient" => (@benchmarkable gradient(loss_by_taylordiff, $model,
$x)))
pinn_f = BenchmarkGroup("primal" => (@benchmarkable loss_by_finitediff($model, $x)),
"gradient" => (@benchmarkable gradient($loss_by_finitediff, $model,
$x)))
pinn = BenchmarkGroup(["vector", "physical"], "taylordiff" => pinn_t,
"finitediff" => pinn_f)
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 1041 | function create_benchmark_scalar_function(f::F, x::T) where {F, T <: Number}
f1 = x -> ForwardDiff.derivative(f, x)
f2 = x -> ForwardDiff.derivative(f1, x)
f3 = x -> ForwardDiff.derivative(f2, x)
f4 = x -> ForwardDiff.derivative(f3, x)
f5 = x -> ForwardDiff.derivative(f4, x)
f6 = x -> ForwardDiff.derivative(f5, x)
f7 = x -> ForwardDiff.derivative(f6, x)
f8 = x -> ForwardDiff.derivative(f7, x)
f9 = x -> ForwardDiff.derivative(f8, x)
functions = Function[f1, f2, f3, f4, f5, f6, f7, f8, f9]
forwarddiff_group = BenchmarkGroup([index => @benchmarkable $func($(Ref(x))[])
for (index, func) in enumerate(functions)]...)
taylordiff_group = BenchmarkGroup()
Ns = [Val{order + 1}() for order in 1:9]
for (index, N) in enumerate(Ns)
taylordiff_group[index] = @benchmarkable derivative($f, $x, one($x), $N)
end
return BenchmarkGroup(["scalar"],
"forwarddiff" => forwarddiff_group,
"taylordiff" => taylordiff_group)
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 576 | function my_calculation(t, p, Ξ±, s)
x = 1.0 / (1.0 - s * (t + 1) / (t - 1))
rez = zero(x)
for i in eachindex(p)
rez += p[i] * x^Ξ±[i]
end
return rez * sqrt(2) / (1 - t)
end
N, m = 100, 20
p, Ξ±, s = rand(N), rand(N), rand()
p ./= sum(p)
t_ts = Taylor1(eltype(p), m)
t_td = TaylorScalar{eltype(p), m + 1}(0.0, 1.0)
taylor_expansion = BenchmarkGroup(["scalar", "very-high-order"],
"taylorseries" => (@benchmarkable my_calculation($t_ts,
$p, $Ξ±,
$s)),
"taylordiff" => (@benchmarkable my_calculation($t_td, $p,
$Ξ±, $s)))
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 710 | using TaylorDiff
using Documenter
DocMeta.setdocmeta!(TaylorDiff, :DocTestSetup, :(using TaylorDiff); recursive = true)
makedocs(;
modules = [TaylorDiff],
authors = "Songchen Tan <[email protected]> and contributors",
repo = "https://github.com/JuliaDiff/TaylorDiff.jl/blob/{commit}{path}#{line}",
sitename = "TaylorDiff.jl",
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", "false") == "true",
canonical = "https://juliadiff.org/TaylorDiff.jl",
edit_link = "main",
assets = String[]),
pages = [
"Home" => "index.md",
"API" => "api.md"
])
deploydocs(;
repo = "github.com/JuliaDiff/TaylorDiff.jl",
devbranch = "main")
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 1425 | # The Jacobi- and Hessian-free Halley method for solving nonlinear equations
using TaylorDiff
using LinearAlgebra
using LinearSolve
function newton(f, x0, p; tol = 1e-10, maxiter = 100)
x = x0
for i in 1:maxiter
fx = f(x, p)
error = norm(fx)
println("Iteration $i: x = $x, f(x) = $fx, error = $error")
if error < tol
return x
end
get_derivative = (v, u, a, b) -> v .= derivative(x -> f(x, p), x, u, 1)
operator = FunctionOperator(get_derivative, similar(x), similar(x))
problem = LinearProblem(operator, -fx)
sol = solve(problem, KrylovJL_GMRES())
x += sol.u
end
return x
end
function halley(f, x0, p; tol = 1e-10, maxiter = 100)
x = x0
for i in 1:maxiter
fx = f(x, p)
error = norm(fx)
println("Iteration $i: x = $x, f(x) = $fx, error = $error")
if error < tol
return x
end
get_derivative = (v, u, a, b) -> v .= derivative(x -> f(x, p), x, u, 1)
operator = FunctionOperator(get_derivative, similar(x), similar(x))
problem = LinearProblem(operator, -fx)
a = solve(problem, KrylovJL_GMRES()).u
Haa = derivative(x -> f(x, p), x, a, 2)
problem2 = LinearProblem(operator, Haa)
b = solve(problem2, KrylovJL_GMRES()).u
x += (a .* a) ./ (a .+ b ./ 2)
end
return x
end
f(x, p) = x .* x - p
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 3316 | using TaylorDiff
using TaylorSeries
using TaylorIntegration: jetcoeffs!
using BenchmarkTools
"""
No magic, just a type-stable way to generate a new object since TaylorScalar is immutable
"""
function update_coefficient(x::TaylorScalar{T, N}, index::Integer, value::T) where {T, N}
return TaylorScalar(ntuple(i -> (i == index ? value : x.value[i]), Val{N}()))
end
"""
Computes the taylor integration of order N - 1, i.e. N = order + 1
eqsdiff: RHS
t: constructed by TaylorScalar{T, N}(t0, 1), which means unit perturbation
x0: initial value
"""
function jetcoeffs_taylordiff(eqsdiff::Function, t::TaylorScalar{T, N}, x0::U,
params) where
{T <: Real, U <: Number, N}
x = TaylorScalar{U, N}(x0) # x.values[1] is defined, others are 0
for index in 1:(N - 1) # computes x.values[index + 1]
f = eqsdiff(x, params, t)
df = TaylorDiff.extract_derivative(f, index)
x = update_coefficient(x, index + 1, df)
end
x
end
"""
Computes the taylor integration of order N - 1, i.e. N = order + 1
eqsdiff!: RHS, in non-allocation form
t: constructed by TaylorScalar{T, N}(t0, 1), which means unit perturbation
x0: initial value
"""
function jetcoeffs_array_taylordiff(
eqsdiff!::Function, t::TaylorScalar{T, N}, x0::AbstractArray{U, D},
params) where
{T <: Real, U <: Number, N, D}
x = map(TaylorScalar{U, N}, x0) # x.values[1] is defined, others are 0
f = similar(x)
for index in 1:(N - 1) # computes x.values[index + 1]
eqsdiff!(f, x, params, t)
df = TaylorDiff.extract_derivative.(f, index)
x = update_coefficient.(x, index + 1, df)
end
x
end
"""
In TaylorDiff.jl, the polynomial coefficients are just the n-th order derivatives,
not normalized by n!. So to compare with TaylorSeries.jl, one need to normalize
"""
function normalize_taylordiff_coeffs(t::TaylorScalar)
return [x / factorial(i - 1) for (i, x) in enumerate(t.value)]
end
function scalar_test()
rhs(x, p, t) = x * x
x0 = 0.1
t0 = 0.0
order = 6
N = 7 # N = order + 1
# TaylorIntegration test
t = t0 + Taylor1(typeof(t0), order)
x = Taylor1(x0, order)
@btime jetcoeffs!($rhs, $t, $x, nothing)
# TaylorDiff test
td = TaylorScalar{typeof(t0), N}(t0, one(t0))
@btime jetcoeffs_taylordiff($rhs, $td, $x0, nothing)
result = jetcoeffs_taylordiff(rhs, td, x0, nothing)
normalized = normalize_taylordiff_coeffs(result)
@assert x.coeffs β normalized
end
function array_test()
function lorenz(du, u, p, t)
du[1] = 10.0(u[2] - u[1])
du[2] = u[1] * (28.0 - u[3]) - u[2]
du[3] = u[1] * u[2] - (8 / 3) * u[3]
return nothing
end
u0 = [1.0; 0.0; 0.0]
t0 = 0.0
order = 6
N = 7
# TaylorIntegration test
t = t0 + Taylor1(typeof(t0), order)
u = [Taylor1(x, order) for x in u0]
du = similar(u)
uaux = similar(u)
@btime jetcoeffs!($lorenz, $t, $u, $du, $uaux, nothing)
# TaylorDiff test
td = TaylorScalar{typeof(t0), N}(t0, one(t0))
@btime jetcoeffs_array_taylordiff($lorenz, $td, $u0, nothing)
result = jetcoeffs_array_taylordiff(lorenz, td, u0, nothing)
normalized = normalize_taylordiff_coeffs.(result)
for i in eachindex(u)
@assert u[i].coeffs β normalized[i]
end
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 1684 | using ADTypes
using DifferentiationInterface
using ModelingToolkit, DifferentialEquations
using TaylorDiff, ForwardDiff
using Enzyme, Zygote, ReverseDiff
using SciMLSensitivity
@parameters a
@variables t x1(t)
D = Differential(t)
states = [x1]
parameters = [a]
@named pre_model = ODESystem([D(x1) ~ a * x1], t, states, parameters)
model = structural_simplify(pre_model)
ic = Dict(x1 => 1.0)
p_true = Dict(a => 2.0)
problem = ODEProblem{true, SciMLBase.FullSpecialize}(model, ic, [0.0, 1.0], p_true)
soln = ModelingToolkit.solve(problem, Tsit5(), abstol = 1e-12, reltol = 1e-12)
display(soln(0.5, idxs = [x1]))
function different_time(new_ic, new_params, new_t)
#newprob = ODEProblem{true, SciMLBase.FullSpecialize}(model, new_ic, [0.0, new_t*2], new_params)
#newprob = remake(problem, u0=new_ic, tspan = [0.0, new_t], p = new_params)
newprob = remake(problem, u0 = new_ic, tspan = [0.0, new_t], p = new_params)
newprob = remake(newprob, u0 = typeof(new_t).(newprob.u0))
new_soln = ModelingToolkit.solve(newprob, Tsit5(), abstol = 1e-12, reltol = 1e-12)
return (soln(new_t, idxs = [x1]))
end
function just_t(new_t)
return different_time(ic, p_true, new_t)[1]
end
display(different_time(ic, p_true, 2e-5))
display(just_t(0.5))
#display(ForwardDiff.derivative(just_t,1.0))
display(TaylorDiff.derivative(just_t, 1.0, 1)) #isnan error
#display(value_and_gradient(just_t, AutoForwardDiff(), 1.0))
#display(value_and_gradient(just_t, AutoReverseDiff(), 1.0))
#display(value_and_gradient(just_t, AutoEnzyme(Enzyme.Reverse), 1.0))
#display(value_and_gradient(just_t, AutoEnzyme(Enzyme.Forward), 1.0))
#display(value_and_gradient(just_t, AutoZygote(), 1.0))
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 423 | module TaylorDiffNNlibExt
using TaylorDiff
import NNlib: oftf
import NNlib: sigmoid_fast, tanh_fast, rrelu, leakyrelu
@inline sigmoid_fast(t::TaylorScalar) = one(t) / (one(t) + exp(-t))
@inline tanh_fast(t::TaylorScalar) = tanh(t)
@inline function rrelu(t::TaylorScalar{T, N},
l = oftf(t, 1 / 8),
u = oftf(t, 1 / 3)) where {T, N}
a = (u - l) * rand(float(T)) + l
return leakyrelu(t, a)
end
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 174 | module TaylorDiffSFExt
using TaylorDiff, SpecialFunctions
for func in (erf, erfc, erfcinv, erfcx, erfi)
TaylorDiff.define_unary_function(func, TaylorDiffSFExt)
end
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 768 | module TaylorDiffZygoteExt
using TaylorDiff
import Zygote: @adjoint, Numeric, _dual_safearg, ZygoteRuleConfig
using ChainRulesCore: @opt_out
# Zygote can't infer this constructor function
# defining rrule for this doesn't seem to work for Zygote
# so need to use @adjoint
@adjoint TaylorScalar{T, N}(t::TaylorScalar{T, M}) where {T, N, M} = TaylorScalar{T, N}(t),
xΜ -> (TaylorScalar{T, M}(xΜ),)
# Zygote will try to use ForwardDiff to compute broadcast functions
# However, TaylorScalar is not dual safe, so we opt out of this
_dual_safearg(::Numeric{<:TaylorScalar}) = false
# Zygote has a rule for literal power, need to opt out of this
@opt_out rrule(
::ZygoteRuleConfig, ::typeof(Base.literal_pow), ::typeof(^), x::TaylorScalar, ::Val{p}
) where {p}
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 161 | module TaylorDiff
include("scalar.jl")
include("primitive.jl")
include("utils.jl")
include("codegen.jl")
include("derivative.jl")
include("chainrules.jl")
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 3194 | import ChainRulesCore: rrule, RuleConfig, ProjectTo, backing, @opt_out
using Base.Broadcast: broadcasted
function contract(a::TaylorScalar{T, N}, b::TaylorScalar{S, N}) where {T, S, N}
mapreduce(*, +, value(a), value(b))
end
function rrule(::Type{TaylorScalar{T, N}}, v::NTuple{N, T}) where {N, T}
taylor_scalar_pullback(tΜ) = NoTangent(), value(tΜ)
return TaylorScalar(v), taylor_scalar_pullback
end
function rrule(::typeof(value), t::TaylorScalar{T, N}) where {N, T}
value_pullback(vΜ::NTuple{N, T}) = NoTangent(), TaylorScalar(vΜ)
# for structural tangent, convert to tuple
function value_pullback(vΜ::Tangent{P, NTuple{N, T}}) where {P}
NoTangent(), TaylorScalar{T, N}(backing(vΜ))
end
value_pullback(vΜ) = NoTangent(), TaylorScalar{T, N}(map(x -> convert(T, x), Tuple(vΜ)))
return value(t), value_pullback
end
function rrule(::typeof(extract_derivative), t::TaylorScalar{T, N},
i::Integer) where {N, T}
function extract_derivative_pullback(dΜ)
NoTangent(), TaylorScalar{T, N}(ntuple(j -> j === i ? dΜ : zero(T), Val(N))),
NoTangent()
end
return extract_derivative(t, i), extract_derivative_pullback
end
function rrule(::typeof(*), A::AbstractMatrix{S},
t::AbstractVector{TaylorScalar{T, N}}) where {N, S <: Real, T <: Real}
project_A = ProjectTo(A)
function gemv_pullback(xΜ)
xΜ = reinterpret(reshape, T, xΜ)
tΜ = reinterpret(reshape, T, t)
NoTangent(), @thunk(project_A(transpose(xΜ) * tΜ)), @thunk(transpose(A)*xΜ)
end
return A * t, gemv_pullback
end
function rrule(::typeof(*), A::AbstractMatrix{S},
B::AbstractMatrix{TaylorScalar{T, N}}) where {N, S <: Real, T <: Real}
project_A = ProjectTo(A)
project_B = ProjectTo(B)
function gemm_pullback(xΜ)
XΜ = unthunk(xΜ)
NoTangent(),
@thunk(project_A(XΜ * transpose(B))),
@thunk(project_B(transpose(A) * XΜ))
end
return A * B, gemm_pullback
end
(project::ProjectTo{T})(dx::TaylorScalar{T, N}) where {N, T <: Number} = primal(dx)
# opt-outs
# Unary functions
for f in (
exp, exp10, exp2, expm1,
sin, cos, tan, sec, csc, cot,
sinh, cosh, tanh, sech, csch, coth,
log, log10, log2, log1p,
asin, acos, atan, asec, acsc, acot,
asinh, acosh, atanh, asech, acsch, acoth,
sqrt, cbrt, inv
)
@eval @opt_out frule(::typeof($f), x::TaylorScalar)
@eval @opt_out rrule(::typeof($f), x::TaylorScalar)
end
# Binary functions
for f in (
*, /, ^
)
for (tlhs, trhs) in (
(TaylorScalar, TaylorScalar),
(TaylorScalar, Number),
(Number, TaylorScalar)
)
@eval @opt_out frule(::typeof($f), x::$tlhs, y::$trhs)
@eval @opt_out rrule(::typeof($f), x::$tlhs, y::$trhs)
end
end
# Multi-argument functions
@opt_out frule(::typeof(*), x::TaylorScalar, y::TaylorScalar, z::TaylorScalar)
@opt_out rrule(::typeof(*), x::TaylorScalar, y::TaylorScalar, z::TaylorScalar)
@opt_out frule(
::typeof(*), x::TaylorScalar, y::TaylorScalar, z::TaylorScalar, more::TaylorScalar...)
@opt_out rrule(
::typeof(*), x::TaylorScalar, y::TaylorScalar, z::TaylorScalar, more::TaylorScalar...)
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 258 | for unary_func in (
+, -, deg2rad, rad2deg,
sinh, cosh, tanh,
asin, acos, atan, asec, acsc, acot,
log, log10, log1p, log2,
asinh, acosh, atanh, asech, acsch,
acoth,
abs, sign)
define_unary_function(unary_func, TaylorDiff)
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 2535 |
export derivative, derivative!, derivatives, make_seed
"""
derivative(f, x, l, ::Val{N})
derivative(f!, y, x, l, ::Val{N})
Computes `order`-th directional derivative of `f` w.r.t. vector `x` in direction `l`.
"""
function derivative end
"""
derivative!(result, f, x, l, ::Val{N})
derivative!(result, f!, y, x, l, ::Val{N})
In-place derivative calculation APIs. `result` is expected to be pre-allocated and have the same shape as `y`.
"""
function derivative! end
"""
derivatives(f, x, l, ::Val{N})
derivatives(f!, y, x, l, ::Val{N})
Computes all derivatives of `f` at `x` up to order `N - 1`.
"""
function derivatives end
# Convenience wrapper for adding unit seed to the input
@inline derivative(f, x, order::Int64) = derivative(f, x, one(eltype(x)), order)
# Convenience wrappers for converting orders to value types
# and forward work to core APIs
@inline derivative(f, x, l, order::Int64) = derivative(f, x, l, Val{order + 1}())
@inline derivative(f!, y, x, l, order::Int64) = derivative(f!, y, x, l, Val{order + 1}())
@inline derivative!(result, f, x, l, order::Int64) = derivative!(
result, f, x, l, Val{order + 1}())
@inline derivative!(result, f!, y, x, l, order::Int64) = derivative!(
result, f!, y, x, l, Val{order + 1}())
# Core APIs
# Added to help Zygote infer types
@inline function make_seed(x::T, l::S, ::Val{N}) where {T <: Real, S <: Real, N}
TaylorScalar{T, N}(x, convert(T, l))
end
@inline function make_seed(x::AbstractArray{T}, l, vN::Val{N}) where {T <: Real, N}
broadcast(make_seed, x, l, vN)
end
# `derivative` API: computes the `N - 1`-th derivative of `f` at `x`
@inline derivative(f, x, l, vN::Val{N}) where {N} = extract_derivative(
derivatives(f, x, l, vN), N)
@inline derivative(f!, y, x, l, vN::Val{N}) where {N} = extract_derivative(
derivatives(f!, y, x, l, vN), N)
@inline derivative!(result, f, x, l, vN::Val{N}) where {N} = extract_derivative!(
result, derivatives(f, x, l, vN), N)
@inline derivative!(result, f!, y, x, l, vN::Val{N}) where {N} = extract_derivative!(
result, derivatives(f!, y, x, l, vN), N)
# `derivatives` API: computes all derivatives of `f` at `x` up to order `N - 1`
# Out-of-place function
@inline derivatives(f, x, l, vN::Val{N}) where {N} = f(make_seed(x, l, vN))
# In-place function
@inline function derivatives(f!, y::AbstractArray{T}, x, l, vN::Val{N}) where {T, N}
buffer = similar(y, TaylorScalar{T, N})
f!(buffer, make_seed(x, l, vN))
map!(primal, y, buffer)
return buffer
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 7459 | import Base: abs, abs2
import Base: exp, exp2, exp10, expm1, log, log2, log10, log1p, inv, sqrt, cbrt
import Base: sin, cos, tan, cot, sec, csc, sinh, cosh, tanh, coth, sech, csch, sinpi, cospi
import Base: asin, acos, atan, acot, asec, acsc, asinh, acosh, atanh, acoth, asech, acsch
import Base: sinc, cosc
import Base: +, -, *, /, \, ^, >, <, >=, <=, ==
import Base: hypot, max, min
import Base: tail
# Unary
@inline +(a::Number, b::TaylorScalar) = TaylorScalar((a + value(b)[1]), tail(value(b))...)
@inline -(a::Number, b::TaylorScalar) = TaylorScalar((a - value(b)[1]), .-tail(value(b))...)
@inline *(a::Number, b::TaylorScalar) = TaylorScalar((a .* value(b))...)
@inline /(a::Number, b::TaylorScalar) = /(promote(a, b)...)
@inline +(a::TaylorScalar, b::Number) = TaylorScalar((value(a)[1] + b), tail(value(a))...)
@inline -(a::TaylorScalar, b::Number) = TaylorScalar((value(a)[1] - b), tail(value(a))...)
@inline *(a::TaylorScalar, b::Number) = TaylorScalar((value(a) .* b)...)
@inline /(a::TaylorScalar, b::Number) = TaylorScalar((value(a) ./ b)...)
## Delegated
@inline sqrt(t::TaylorScalar) = t^0.5
@inline cbrt(t::TaylorScalar) = ^(t, 1 / 3)
@inline inv(t::TaylorScalar) = one(t) / t
for func in (:exp, :expm1, :exp2, :exp10)
@eval @generated function $func(t::TaylorScalar{T, N}) where {T, N}
ex = quote
v = value(t)
v1 = $($(QuoteNode(func)) == :expm1 ? :(exp(v[1])) : :($$func(v[1])))
end
for i in 2:N
ex = quote
$ex
$(Symbol('v', i)) = +($([:($(binomial(i - 2, j - 1)) * $(Symbol('v', j)) *
v[$(i + 1 - j)])
for j in 1:(i - 1)]...))
end
if $(QuoteNode(func)) == :exp2
ex = :($ex; $(Symbol('v', i)) *= $(log(2)))
elseif $(QuoteNode(func)) == :exp10
ex = :($ex; $(Symbol('v', i)) *= $(log(10)))
end
end
if $(QuoteNode(func)) == :expm1
ex = :($ex; v1 = expm1(v[1]))
end
ex = :($ex; TaylorScalar{T, N}(tuple($([Symbol('v', i) for i in 1:N]...))))
return :(@inbounds $ex)
end
end
for func in (:sin, :cos)
@eval @generated function $func(t::TaylorScalar{T, N}) where {T, N}
ex = quote
v = value(t)
s1 = sin(v[1])
c1 = cos(v[1])
end
for i in 2:N
ex = :($ex;
$(Symbol('s', i)) = +($([:($(binomial(i - 2, j - 1)) *
$(Symbol('c', j)) *
v[$(i + 1 - j)]) for j in 1:(i - 1)]...)))
ex = :($ex;
$(Symbol('c', i)) = +($([:($(-binomial(i - 2, j - 1)) *
$(Symbol('s', j)) *
v[$(i + 1 - j)]) for j in 1:(i - 1)]...)))
end
if $(QuoteNode(func)) == :sin
ex = :($ex; TaylorScalar($([Symbol('s', i) for i in 1:N]...)))
else
ex = :($ex; TaylorScalar($([Symbol('c', i) for i in 1:N]...)))
end
return quote
@inbounds $ex
end
end
end
@inline sinpi(t::TaylorScalar) = sin(Ο * t)
@inline cospi(t::TaylorScalar) = cos(Ο * t)
# Binary
const AMBIGUOUS_TYPES = (AbstractFloat, Irrational, Integer, Rational, Real, RoundingMode)
for op in [:>, :<, :(==), :(>=), :(<=)]
for R in AMBIGUOUS_TYPES
@eval @inline $op(a::TaylorScalar, b::$R) = $op(value(a)[1], b)
@eval @inline $op(a::$R, b::TaylorScalar) = $op(a, value(b)[1])
end
@eval @inline $op(a::TaylorScalar, b::TaylorScalar) = $op(value(a)[1], value(b)[1])
end
@inline +(a::TaylorScalar, b::TaylorScalar) = TaylorScalar(map(+, value(a), value(b)))
@inline -(a::TaylorScalar, b::TaylorScalar) = TaylorScalar(map(-, value(a), value(b)))
@generated function *(a::TaylorScalar{T, N}, b::TaylorScalar{T, N}) where {T, N}
return quote
va, vb = value(a), value(b)
@inbounds TaylorScalar($([:(+($([:($(binomial(i - 1, j - 1)) * va[$j] *
vb[$(i + 1 - j)]) for j in 1:i]...)))
for i in 1:N]...))
end
end
@generated function /(a::TaylorScalar{T, N}, b::TaylorScalar{T, N}) where {T, N}
ex = quote
va, vb = value(a), value(b)
v1 = va[1] / vb[1]
end
for i in 2:N
ex = quote
$ex
$(Symbol('v', i)) = (va[$i] -
+($([:($(binomial(i - 1, j - 1)) * $(Symbol('v', j)) *
vb[$(i + 1 - j)])
for j in 1:(i - 1)]...))) / vb[1]
end
end
ex = :($ex; TaylorScalar($([Symbol('v', i) for i in 1:N]...)))
return :(@inbounds $ex)
end
for R in (Integer, Real)
@eval @generated function ^(t::TaylorScalar{T, N}, n::S) where {S <: $R, T, N}
ex = quote
v = value(t)
w11 = 1
u1 = ^(v[1], n)
end
for k in 1:N
ex = quote
$ex
$(Symbol('p', k)) = ^(v[1], n - $(k - 1))
end
end
for i in 2:N
subex = quote
$(Symbol('w', i, 1)) = 0
end
for k in 2:i
subex = quote
$subex
$(Symbol('w', i, k)) = +($([:((n * $(binomial(i - 2, j - 1)) -
$(binomial(i - 2, j - 2))) *
$(Symbol('w', j, k - 1)) *
v[$(i + 1 - j)])
for j in (k - 1):(i - 1)]...))
end
end
ex = quote
$ex
$subex
$(Symbol('u', i)) = +($([:($(Symbol('w', i, k)) * $(Symbol('p', k)))
for k in 2:i]...))
end
end
ex = :($ex; TaylorScalar($([Symbol('u', i) for i in 1:N]...)))
return :(@inbounds $ex)
end
@eval function ^(a::S, t::TaylorScalar{T, N}) where {S <: $R, T, N}
exp(t * log(a))
end
end
^(t::TaylorScalar, s::TaylorScalar) = exp(s * log(t))
@generated function raise(f::T, df::TaylorScalar{T, M},
t::TaylorScalar{T, N}) where {T, M, N} # M + 1 == N
return quote
$(Expr(:meta, :inline))
vdf, vt = value(df), value(t)
@inbounds TaylorScalar(f,
$([:(+($([:($(binomial(i - 1, j - 1)) * vdf[$j] *
vt[$(i + 2 - j)]) for j in 1:i]...)))
for i in 1:M]...))
end
end
raise(::T, df::S, t::TaylorScalar{T, N}) where {S <: Number, T, N} = df * t
@generated function raiseinv(f::T, df::TaylorScalar{T, M},
t::TaylorScalar{T, N}) where {T, M, N} # M + 1 == N
ex = quote
vdf, vt = value(df), value(t)
v1 = vt[2] / vdf[1]
end
for i in 2:M
ex = quote
$ex
$(Symbol('v', i)) = (vt[$(i + 1)] -
+($([:($(binomial(i - 1, j - 1)) * $(Symbol('v', j)) *
vdf[$(i + 1 - j)])
for j in 1:(i - 1)]...))) / vdf[1]
end
end
ex = :($ex; TaylorScalar(f, $([Symbol('v', i) for i in 1:M]...)))
return :(@inbounds $ex)
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 3336 | import Base: zero, one, adjoint, conj, transpose
import Base: +, -, *, /
import Base: convert, promote_rule
export TaylorScalar
"""
TaylorDiff.can_taylor(V::Type)
Determines whether the type V is allowed as the scalar type in a
Dual. By default, only `<:Real` types are allowed.
"""
can_taylorize(::Type{<:Real}) = true
can_taylorize(::Type) = false
@noinline function throw_cannot_taylorize(V::Type)
throw(ArgumentError("Cannot create a Taylor polynomial over scalar type $V." *
" If the type behaves as a scalar, define TaylorDiff.can_taylorize(::Type{$V}) = true."))
end
"""
TaylorScalar{T, N}
Representation of Taylor polynomials.
# Fields
- `value::NTuple{N, T}`: i-th element of this stores the (i-1)-th derivative
"""
struct TaylorScalar{T, N} <: Real
value::NTuple{N, T}
function TaylorScalar{T, N}(value::NTuple{N, T}) where {T, N}
can_taylorize(T) || throw_cannot_taylorize(T)
new{T, N}(value)
end
end
TaylorScalar(value::NTuple{N, T}) where {T, N} = TaylorScalar{T, N}(value)
TaylorScalar(value::Vararg{T, N}) where {T, N} = TaylorScalar{T, N}(value)
"""
TaylorScalar{T, N}(x::T) where {T, N}
Construct a Taylor polynomial with zeroth order coefficient.
"""
@generated function TaylorScalar{T, N}(x::S) where {T, S <: Real, N}
return quote
$(Expr(:meta, :inline))
TaylorScalar((T(x), $(zeros(T, N - 1)...)))
end
end
"""
TaylorScalar{T, N}(x::T, d::T) where {T, N}
Construct a Taylor polynomial with zeroth and first order coefficient, acting as a seed.
"""
@generated function TaylorScalar{T, N}(x::S, d::S) where {T, S <: Real, N}
return quote
$(Expr(:meta, :inline))
TaylorScalar((T(x), T(d), $(zeros(T, N - 2)...)))
end
end
@generated function TaylorScalar{T, N}(t::TaylorScalar{T, M}) where {T, N, M}
N <= M ? quote
$(Expr(:meta, :inline))
TaylorScalar(value(t)[1:N])
end : quote
$(Expr(:meta, :inline))
TaylorScalar((value(t)..., $(zeros(T, N - M)...)))
end
end
@inline value(t::TaylorScalar) = t.value
@inline extract_derivative(t::TaylorScalar, i::Integer) = t.value[i]
@inline function extract_derivative(v::AbstractArray{T},
i::Integer) where {T <: TaylorScalar}
map(t -> extract_derivative(t, i), v)
end
@inline extract_derivative(r, i::Integer) = false
@inline function extract_derivative!(result::AbstractArray, v::AbstractArray{T},
i::Integer) where {T <: TaylorScalar}
map!(t -> extract_derivative(t, i), result, v)
end
@inline primal(t::TaylorScalar) = extract_derivative(t, 1)
function promote_rule(::Type{TaylorScalar{T, N}},
::Type{S}) where {T, S, N}
TaylorScalar{promote_type(T, S), N}
end
function (::Type{F})(x::TaylorScalar{T, N}) where {T, N, F <: AbstractFloat}
F(primal(x))
end
function Base.nextfloat(x::TaylorScalar{T, N}) where {T, N}
TaylorScalar{T, N}(ntuple(i -> i == 1 ? nextfloat(value(x)[i]) : value(x)[i], N))
end
function Base.prevfloat(x::TaylorScalar{T, N}) where {T, N}
TaylorScalar{T, N}(ntuple(i -> i == 1 ? prevfloat(value(x)[i]) : value(x)[i], N))
end
const UNARY_PREDICATES = Symbol[
:isinf, :isnan, :isfinite, :iseven, :isodd, :isreal, :isinteger]
for pred in UNARY_PREDICATES
@eval Base.$(pred)(x::TaylorScalar) = $(pred)(primal(x))
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 964 | using ChainRules
using ChainRulesCore
using Symbolics: @variables
using SymbolicUtils, SymbolicUtils.Code
using SymbolicUtils: Pow
dummy = (NoTangent(), 1)
@variables z
function define_unary_function(func, m)
F = typeof(func)
# base case
@eval m function (op::$F)(t::TaylorScalar{T, 2}) where {T}
t0, t1 = value(t)
f0, f1 = frule((NoTangent(), t1), op, t0)
TaylorScalar{T, 2}(f0, zero_tangent(f0) + f1)
end
der = frule(dummy, func, z)[2]
term, raiser = der isa Pow && der.exp == -1 ? (der.base, raiseinv) : (der, raise)
# recursion by raising
@eval m @generated function (op::$F)(t::TaylorScalar{T, N}) where {T, N}
der_expr = $(QuoteNode(toexpr(term)))
f = $func
quote
$(Expr(:meta, :inline))
z = TaylorScalar{T, N - 1}(t)
f0 = $f(value(t)[1])
df = zero_tangent(z) + $der_expr
$$raiser(f0, df, t)
end
end
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 968 |
@testset "O-function, O-derivative" begin
g(x) = x^3
@test derivative(g, 1.0, 1) β 3
h(x) = x .^ 3
@test derivative(h, [2.0 3.0], 1) β [12.0 27.0]
g1(x) = x[1] * x[1] + x[2] * x[2]
@test derivative(g1, [1.0, 2.0], [1.0, 0.0], 1) β 2.0
h1(x) = sum(x, dims = 1)
@test derivative(h1, [1.0 2.0; 2.0 3.0], [1.0, 1.0], 1) β [2.0 2.0]
end
@testset "I-function, O-derivative" begin
g!(y, x) = begin
y[1] = x * x
y[2] = x + 1
end
x = 2.0
y = [0.0, 0.0]
@test derivative(g!, y, x, 1.0, Val{2}()) β [4.0, 1.0]
end
@testset "O-function, I-derivative" begin
g(x) = x .^ 2
@test derivative!(zeros(2), g, [1.0, 2.0], [1.0, 0.0], Val{2}()) β [2.0, 0.0]
end
@testset "I-function, I-derivative" begin
g!(y, x) = begin
y[1] = x[1] * x[1]
y[2] = x[2] * x[2]
end
x = [2.0, 3.0]
y = [0.0, 0.0]
@test derivative!(y, g!, zeros(2), x, [1.0, 0.0], Val{2}()) β [4.0, 0.0]
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 1938 | using LinearAlgebra
import DifferentiationInterface
using DifferentiationInterface: AutoZygote, AutoEnzyme
import Zygote, Enzyme
using FiniteDiff: finite_difference_derivative
DI = DifferentiationInterface
backend = AutoZygote()
# backend = AutoEnzyme(; mode = Enzyme.Reverse, function_annotation = Enzyme.Const)
@testset "Zygote-over-TaylorDiff on same variable" begin
# Scalar functions
some_number = 0.7
some_numbers = [0.3, 0.4, 0.1]
for f in (exp, log, sqrt, sin, asin, sinh, asinh, x -> x^3)
@test DI.derivative(x -> derivative(f, x, 2), backend, some_number) β
derivative(f, some_number, 3)
@test DI.jacobian(x -> derivative.(f, x, 2), backend, some_numbers) β
diagm(derivative.(f, some_numbers, 3))
end
# Vector functions
g(x) = x[1] * x[1] + x[2] * x[2]
@test DI.gradient(x -> derivative(g, x, [1.0, 0.0], 1), backend, [1.0, 2.0]) β
[2.0, 0.0]
# Matrix functions
some_matrix = [0.7 0.1; 0.4 0.2]
f(x) = sum(exp.(x), dims = 1)
dfdx1(x) = derivative(f, x, [1.0, 0.0], 1)
dfdx2(x) = derivative(f, x, [0.0, 1.0], 1)
res(x) = sum(dfdx1(x) .+ 2 * dfdx2(x))
grad = DI.gradient(res, backend, some_matrix)
@test grad β [1 0; 0 2] * exp.(some_matrix)
end
@testset "Zygote-over-TaylorDiff on different variable" begin
linear_model(x, p, b) = exp.(b + p * x + b)[1]
loss_taylor(x, p, b, v) = derivative(x -> linear_model(x, p, b), x, v, 1)
Ξ΅ = cbrt(eps(Float64))
loss_finite(x, p, b, v) = (linear_model(x + Ξ΅ * v, p, b) -
linear_model(x - Ξ΅ * v, p, b)) / (2 * Ξ΅)
let some_x = [0.58, 0.36], some_v = [0.23, 0.11], some_p = [0.49 0.96], some_b = [0.88]
@test DI.gradient(
p -> loss_taylor(some_x, p, some_b, some_v), backend, some_p) β
DI.gradient(
p -> loss_finite(some_x, p, some_b, some_v), backend, some_p)
end
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 751 | using Lux, Random
@testset "Lux forward evaluation" begin
# Construct the layer
model = Chain(Dense(2, 16, Lux.relu), Dense(16, 1))
# Seeding
rng = Random.default_rng()
Random.seed!(rng, 0)
# Parameter and State Variables
ps, st = Lux.setup(rng, model)
# Dummy Input
x = TaylorVector([1.0, 1.0], [1.0, 0.0])
# Run the model
y, st = Lux.apply(model, x, ps, st)
end
@testset "Lux gradient" begin
# # Gradients
# ## Pullback API to capture change in state
# (l, st_), pb = pullback(p -> Lux.apply(model, x, p, st), ps)
# gs = pb((one.(l), nothing))[1]
# # Optimization
# st_opt = Optimisers.setup(Optimisers.ADAM(0.0001), ps)
# st_opt, ps = Optimisers.update(st_opt, ps, gs)
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 2524 | using FiniteDifferences
@testset "No derivative or linear" begin
some_number, another_number = 1.9, 2.6
for f in (+, -, zero, one, adjoint, conj, deg2rad, rad2deg, abs, sign), order in (2,)
@test derivative(f, some_number, order) β 0.0
end
for f in (+, -, <, <=, >, >=, ==), order in (2,)
@test derivative(x -> f(x, another_number), some_number, order) β 0.0
@test derivative(x -> f(another_number, x), some_number, order) β 0.0
@test derivative(x -> f(x, x), some_number, order) β 0.0
end
end
@testset "Unary functions" begin
some_number = 3.7
for f in (
x -> exp(x^2), expm1, exp2, exp10, x -> sin(x^2), x -> cos(x^2), sinpi, cospi,
sqrt, cbrt,
inv), order in (1, 4)
fdm = central_fdm(12, order)
@test derivative(f, some_number, order)βfdm(f, some_number) rtol=1e-6
end
end
@testset "Codegen" begin
some_number = 0.6
for f in (log, sinh), order in (1, 4)
fdm = central_fdm(12, order, max_range = 0.5)
@test derivative(f, some_number, order)βfdm(f, some_number) rtol=1e-6
end
end
@testset "Binary functions" begin
some_number, another_number = 1.9, 5.6
for f in (*, /), order in (1, 4)
fdm = central_fdm(12, order)
closure = x -> exp(f(x, another_number))
@test derivative(closure, some_number, order)βfdm(closure, some_number) rtol=1e-6
end
for f in (x -> x^7, x -> x^another_number), order in (1, 2, 4)
fdm = central_fdm(12, order)
@test derivative(f, some_number, order)βfdm(f, some_number) rtol=1e-6
end
for f in (x -> x^7, x -> x^another_number), order in (1, 2)
fdm = forward_fdm(12, order)
@test derivative(f, 0, order)βfdm(f, 0) atol=1e-6
end
end
@testset "Corner cases" begin
offenders = (
TaylorDiff.TaylorScalar{Float64, 4}((Inf, 1.0, 0.0, 0.0)),
TaylorDiff.TaylorScalar{Float64, 4}((Inf, 0.0, 0.0, 0.0)),
TaylorDiff.TaylorScalar{Float64, 4}((1.0, 0.0, 0.0, 0.0)),
TaylorDiff.TaylorScalar{Float64, 4}((1.0, Inf, 0.0, 0.0)),
TaylorDiff.TaylorScalar{Float64, 4}((0.0, 1.0, 0.0, 0.0)),
TaylorDiff.TaylorScalar{Float64, 4}((0.0, Inf, 0.0, 0.0)) # Others ?
)
f_id = (
:id => x -> x,
:add0 => x -> x + 0,
:sub0 => x -> x - 0,
:mul1 => x -> x * 1,
:div1 => x -> x / 1,
:pow1 => x -> x^1
)
for (name, f) in f_id, t in offenders
@test f(t) == t
end
end
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | code | 123 | using TaylorDiff
using Test
include("primitive.jl")
include("derivative.jl")
include("downstream.jl")
# include("lux.jl")
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | docs | 5011 | <h1 align=center>TaylorDiff.jl</h1>
<p align=center>
<a href="https://www.repostatus.org/#active"><img src="https://www.repostatus.org/badges/latest/active.svg" alt="Project Status: Active β The project has reached a stable, usable state and is being actively developed." /></a>
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/license-MIT-blue.svg" alt="License: MIT" /></a>
<a href="https://juliadiff.org/TaylorDiff.jl/stable/"><img src="https://img.shields.io/badge/docs-stable-blue.svg" alt="Stable" /></a>
<a href="https://juliadiff.org/TaylorDiff.jl/dev/"><img src="https://img.shields.io/badge/docs-dev-blue.svg" alt="Dev" /></a>
<br />
<a href="https://github.com/JuliaDiff/TaylorDiff.jl/actions/workflows/Test.yml?query=branch%3Amain"><img src="https://img.shields.io/github/actions/workflow/status/JuliaDiff/TaylorDiff.jl/Test.yml?branch=main&label=test" alt="Build Status" /></a>
<a href="https://codecov.io/gh/JuliaDiff/TaylorDiff.jl"><img src="https://img.shields.io/codecov/c/gh/JuliaDiff/TaylorDiff.jl/main?token=5KYP7K71VQ"/></a>
<a href="https://benchmark.tansongchen.com/TaylorDiff.jl"><img src="https://img.shields.io/buildkite/2c801728055463e7c8baeeb3cc187b964587235a49b3ed39ab/main.svg?label=benchmark" alt="Benchmark Status" /></a>
<br />
<a href="https://github.com/SciML/ColPrac"><img src="https://img.shields.io/badge/contributor's%20guide-ColPrac-blueviolet" alt="ColPrac: Contributor's Guide on Collaborative Practices for Community Packages" /></a>
<a href="https://github.com/SciML/SciMLStyle"><img src="https://img.shields.io/badge/code%20style-SciML-blueviolet" alt="SciML Code Style" /></a>
</p>
<p align=center>
<a href="https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=563952901&machine=standardLinux32gb&devcontainer_path=.devcontainer%2Fdevcontainer.json&location=EastUshttps://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=563952901&machine=standardLinux32gb&devcontainer_path=.devcontainer%2Fdevcontainer.json&location=EastUs"><img src="https://github.com/codespaces/badge.svg" alt="Open in GitHub Codespaces" /></a>
</p>
[TaylorDiff.jl](https://github.com/JuliaDiff/TaylorDiff.jl) is an automatic differentiation (AD) package for efficient and composable higher-order derivatives, implemented with operator-overloading on Taylor polynomials.
Disclaimer: this project is still in early alpha stage, and APIs can change any time in the future. Discussions and potential use cases are extremely welcome!
## Features
TaylorDiff.jl is designed with the following goals in head:
- Linear scaling with the order of differentiation (while naively composing first-order differentiation would result in exponential scaling)
- Same performance with [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) on first order and second order, so there is no penalty in drop-in replacement
- Capable for calculating exact derivatives in physical models with ODEs and PDEs
- Composable with other AD systems like [Zygote.jl](https://github.com/FluxML/Zygote.jl), so that the above models evaluated with TaylorDiff can be further optimized with gradient-based optimization techniques
TaylorDiff.jl is fast! See our dedicated [benchmarks](https://benchmark.tansongchen.com/TaylorDiff.jl) page for comparison with other packages in various tasks.
## Installation
```bash
] add TaylorDiff
```
## Usage
```julia
using TaylorDiff
x = 0.1
derivative(sin, x, 10) # scalar derivative
v, direction = [3.0, 4.0], [1.0, 0.0]
derivative(x -> sum(exp.(x)), v, direction, 2) # directional derivative
```
Please see our [documentation](https://juliadiff.org/TaylorDiff.jl) for more details.
## Related Projects
- [TaylorSeries.jl](https://github.com/JuliaDiff/TaylorSeries.jl): a systematic treatment of Taylor polynomials in one and several variables, but its mutating and scalar code isn't great for speed and composability with other packages
- [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl): well-established and robust operator-overloading based forward-mode AD, where higher-order derivatives can be achieved by nesting first-order derivatives
- [Diffractor.jl](https://github.com/JuliaDiff/Diffractor.jl): next-generation source-code transformation based forward-mode and reverse-mode AD, designed with support for higher-order derivatives in mind; but the higher-order functionality is currently only a proof-of-concept
- [`jax.jet`](https://jax.readthedocs.io/en/latest/jax.experimental.jet.html): an experimental (and unmaintained) implementation of Taylor-mode automatic differentiation in JAX, sharing the same underlying algorithm with this project
## Citation
```bibtex
@software{tan2022taylordiff,
author = {Tan, Songchen},
title = {TaylorDiff.jl: Fast Higher-order Automatic Differentiation in Julia},
year = {2022},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/JuliaDiff/TaylorDiff.jl}}
}
```
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | docs | 157 | ```@meta
CurrentModule = TaylorDiff
```
# API
API for [TaylorDiff](https://github.com/tansongchen/TaylorDiff.jl).
```@autodocs
Modules = [TaylorDiff]
```
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.5 | d09a7ac90d8067a7e619b2daa8efaba019bbb665 | docs | 2909 | ```@meta
CurrentModule = TaylorDiff
```
# TaylorDiff.jl
[TaylorDiff.jl](https://github.com/JuliaDiff/TaylorDiff.jl) is an automatic differentiation (AD) package for efficient and composable higher-order derivatives, implemented with operator-overloading on Taylor polynomials.
Disclaimer: this project is still in early alpha stage, and APIs can change any time in the future. Discussions and potential use cases are extremely welcome!
## Features
TaylorDiff.jl is designed with the following goals in head:
- Linear scaling with the order of differentiation (while naively composing first-order differentiation would result in exponential scaling)
- Same performance with [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl) on first order and second order, so there is no penalty in drop-in replacement
- Capable for calculating exact derivatives in physical models with ODEs and PDEs
- Composable with other AD systems like [Zygote.jl](https://github.com/FluxML/Zygote.jl), so that the above models evaluated with TaylorDiff can be further optimized with gradient-based optimization techniques
TaylorDiff.jl is fast! See our dedicated [benchmarks](https://benchmark.tansongchen.com/TaylorDiff.jl) page for comparison with other packages in various tasks.
## Installation
```bash
] add TaylorDiff
```
## Usage
```julia
using TaylorDiff
x = 0.1
derivative(sin, x, 10) # scalar derivative
v, direction = [3.0, 4.0], [1.0, 0.0]
derivative(x -> sum(exp.(x)), v, direction, 2) # directional derivative
```
Please see our [documentation](https://juliadiff.org/TaylorDiff.jl) for more details.
## Related Projects
- [TaylorSeries.jl](https://github.com/JuliaDiff/TaylorSeries.jl): a systematic treatment of Taylor polynomials in one and several variables, but its mutating and scalar code isn't great for speed and composability with other packages
- [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl): well-established and robust operator-overloading based forward-mode AD, where higher-order derivatives can be achieved by nesting first-order derivatives
- [Diffractor.jl](https://github.com/JuliaDiff/Diffractor.jl): next-generation source-code transformation based forward-mode and reverse-mode AD, designed with support for higher-order derivatives in mind; but the higher-order functionality is currently only a proof-of-concept
- [`jax.jet`](https://jax.readthedocs.io/en/latest/jax.experimental.jet.html): an experimental (and unmaintained) implementation of Taylor-mode automatic differentiation in JAX, sharing the same underlying algorithm with this project
## Citation
```bibtex
@software{tan2022taylordiff,
author = {Tan, Songchen},
title = {TaylorDiff.jl: Fast Higher-order Automatic Differentiation in Julia},
year = {2022},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/JuliaDiff/TaylorDiff.jl}}
}
```
| TaylorDiff | https://github.com/JuliaDiff/TaylorDiff.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | code | 277 | using Documenter, CoordinatedSupplyChains
makedocs(sitename="CoordinatedSupplyChains.jl Documentation",
pages = [
"Home" => "index.md",
"Tutorials" => "tutorial.md"
]
)
deploydocs(
repo = "github.com/Tominapa/CoordinatedSupplyChains.jl.git",
) | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | code | 26001 | ################################################################################
### DATA STRUCTURE BUILDING FUNCTIONS - NOT FOR CALLING BY USER
##############################
### Time
function TimeGen(time_id, time_dur)
"""
Generates time data strucures
Inputs:
- time point IDs
- time period durations
Outputs:
- time data structure (T)
- sets T1, Tt, and TT (provide convenient reference sets)
- sets Tprior and Tpost (provide references to prior and subsequent time points)
"""
### Part 0 - Setup
CardT = length(time_id)
### Part 1 - Build time data structure
T = TimeDataStruct(
time_id, # ID
time_dur # dt
)
### Part 2 - Build time data indexing sets
T1 = [time_id[1]] # first time point as a 1D array with length 1
Tt = time_id[1:(end-1)] # all time points EXCEPT terminal time point
TT = time_id[2:end] # all time points EXCEPT initial time point
Tprior = Dict{String,String}()
for t = 2:CardT
Tprior[time_id[t]] = time_id[t-1]
end
Tpost = Dict{String,String}()
for t = 1:(CardT-1)
Tpost[time_id[t]] = time_id[t+1]
end
### Return
return T, T1, Tt, TT, Tprior, Tpost
end
# Nodes
function NodeGen(node_id, node_alias, node_lon, node_lat)
"""
Generates node data strucures
Inputs:
- spatial node IDs
- node names
- node longitudes
- node latitudes
Outputs:
- time data structure (T)
- sets T1, Tt, and TT (provide convenient reference sets)
- sets Tprior and Tpost (provide references to prior and subsequent time points)
"""
### Part 1 - Build node data structure
N = NodeDataStruct(
node_id, # ID
node_alias, # alias
node_lon, # lon
node_lat # lat
)
### Return
return N
end
# Products
function ProductGen(product_id, product_alias, product_transport_cost, product_storage_cost)
"""
Generates product data structure
Inputs:
- product IDs
- product names
- product transportation costs (i.e., spatial)
- product storage costs (i.e., temporal)
Outputs:
- data structure (P)
"""
### Part 1 - Build product data structure
P = ProductDataStruct(
product_id, # ID
product_alias, # alias
product_transport_cost, # transport_cost
product_storage_cost # storage_cost
)
### Return
return P
end
# Impacts
function ImpactGen(impact_id, impact_alias, impact_transport_coeff, impact_storage_coeff)
"""
Generates product data structure
Inputs:
- impact IDs
- impact names
- impact transportation coefficients (i.e., spatial generation)
- impact storage coefficients (i.e., temporal generation)
Outputs:
- data structure (Q)
"""
### Part 1 - Build impact data strucure
Q = ImpactDataStruct(
impact_id, # ID
impact_alias, # alias
impact_transport_coeff, # transport_coeff
impact_storage_coeff # storage_coeff
)
### Return
return Q
end
# Arcs
function ArcGen(time_id, time_dur, node_id, product_id, product_transport_cost, product_storage_cost, arc_id, arc_n, arc_m, arc_cap, arc_len; M=1E6)
"""
Generates arc data structures
Inputs:
- time node ids
- time period durations
- spatial node ids
- spatial node positions (lon,lat)
- product ids
- spatial arc ids
- spatial arc nodes (sending, receiving)
- spatial arc capacities
Outputs:
- arc data structure (A)
- set Ain; returns all arcs inbound on node n
- set Aout; returns all arcs outbound from node n
Update notes:
- for now, assume FULL TIME CONNECTION
- TO DO: add MODE keyword argument; allow full time connection or sequential time connection
"""
### Part 1 - Build Arc Data Structure
# Calculate number of arcs to define
CardA = 2*length(arc_id) # The number of geographical arcs defined; *2 because they define both directions
CardN = length(node_id) # number of nodes
CardT = length(time_id) # number of time points
CardSTA = Int(0.5*CardA*CardT*(CardT+1) + 0.5*CardN*CardT*(CardT-1)) # number of spatio-temporal arcs
# Number of digits in CardSTA; use in lpad()
pad = ndigits(CardSTA)
# Define arc set and declare data arrays
STarc_id = Array{String}(undef, CardSTA) # Spatio-temporal (ST) arc IDs
STarc_n_send = Array{String}(undef, CardSTA) # sending node
STarc_n_recv = Array{String}(undef, CardSTA) # receiving node
STarc_t_send = Array{String}(undef, CardSTA) # sending time point
STarc_t_recv = Array{String}(undef, CardSTA) # receving time point
STarc_cap_key = Array{String}(undef, CardSTA) # capacity key
STarc_len = Dict() # length
STarc_dur = Dict() # duration
STarc_id_s = Array{String}(undef, 0) # list of purely spatial arcs
STarc_id_t = Array{String}(undef, 0) # list of purely temporal arcs
STarc_id_st = Array{String}(undef, 0) # list of spatio-temporal arcs
# Define a counter to track STarc_id position
global OrdSTA = 0 # ordinate within ST arc set
# Add purely spatial arcs to STarc set
for t = 1:CardT
for a = 1:length(arc_id)
# Index update
global OrdSTA += 1
# Add forward arc
STarc_id[OrdSTA] = "A"*lpad(OrdSTA,pad,"0")
STarc_n_send[OrdSTA] = arc_n[arc_id[a]]
STarc_n_recv[OrdSTA] = arc_m[arc_id[a]]
STarc_t_send[OrdSTA] = time_id[t]
STarc_t_recv[OrdSTA] = time_id[t]
STarc_cap_key[OrdSTA] = arc_id[a]
STarc_len[STarc_id[OrdSTA]] = arc_len[arc_id[a]]
STarc_dur[STarc_id[OrdSTA]] = 0.0
push!(STarc_id_s, STarc_id[OrdSTA])
# Index update
global OrdSTA += 1
# Add reverse arc
STarc_id[OrdSTA] = "A"*lpad(OrdSTA,pad,"0")
STarc_n_send[OrdSTA] = arc_m[arc_id[a]]
STarc_n_recv[OrdSTA] = arc_n[arc_id[a]]
STarc_t_send[OrdSTA] = time_id[t]
STarc_t_recv[OrdSTA] = time_id[t]
STarc_cap_key[OrdSTA] = arc_id[a]
STarc_len[STarc_id[OrdSTA]] = arc_len[arc_id[a]]
STarc_dur[STarc_id[OrdSTA]] = 0.0
push!(STarc_id_s, STarc_id[OrdSTA])
end
end
# Add purely temporal arcs to STarc set
for t_send = 1:(CardT-1)
for t_recv = (t_send+1):CardT
for n = 1:length(node_id)
# Index update
global OrdSTA += 1
# Add temporal arc connecting node to future time points
STarc_id[OrdSTA] = "A"*lpad(OrdSTA,pad,"0")
STarc_n_send[OrdSTA] = node_id[n]
STarc_n_recv[OrdSTA] = node_id[n]
STarc_t_send[OrdSTA] = time_id[t_send]
STarc_t_recv[OrdSTA] = time_id[t_recv]
STarc_cap_key[OrdSTA] = "M"
STarc_len[STarc_id[OrdSTA]] = 0.0
STarc_dur[STarc_id[OrdSTA]] = sum([time_dur[time_id[t]] for t = t_send:(t_recv-1)])
push!(STarc_id_t, STarc_id[OrdSTA])
end
end
end
# Add spatio-temporal arcs to STarc set
for t_send = 1:(CardT-1)
for t_recv = (t_send+1):CardT
for a = 1:length(arc_id)
# Index update
global OrdSTA += 1
# Add n->m spatio-temporal arc
STarc_id[OrdSTA] = "A"*lpad(OrdSTA,pad,"0")
STarc_n_send[OrdSTA] = arc_n[arc_id[a]]
STarc_n_recv[OrdSTA] = arc_m[arc_id[a]]
STarc_t_send[OrdSTA] = time_id[t_send]
STarc_t_recv[OrdSTA] = time_id[t_recv]
STarc_cap_key[OrdSTA] = arc_id[a]
STarc_len[STarc_id[OrdSTA]] = arc_len[arc_id[a]]
STarc_dur[STarc_id[OrdSTA]] = sum([time_dur[time_id[t]] for t = t_send:(t_recv-1)])
push!(STarc_id_st, STarc_id[OrdSTA])
# Index update
global OrdSTA += 1
# Add m->n spatio-temporal arc
STarc_id[OrdSTA] = "A"*lpad(OrdSTA,pad,"0")
STarc_n_send[OrdSTA] = arc_m[arc_id[a]]
STarc_n_recv[OrdSTA] = arc_n[arc_id[a]]
STarc_t_send[OrdSTA] = time_id[t_send]
STarc_t_recv[OrdSTA] = time_id[t_recv]
STarc_cap_key[OrdSTA] = arc_id[a]
STarc_len[STarc_id[OrdSTA]] = arc_len[arc_id[a]]
STarc_dur[STarc_id[OrdSTA]] = sum([time_dur[time_id[t]] for t = t_send:(t_recv-1)])
push!(STarc_id_st, STarc_id[OrdSTA])
end
end
end
# Build STarc bid dictionary
STarc_bid = DictInit([STarc_id,product_id], 0.0)
for a in STarc_id
for p = product_id
STarc_bid[a,p] = product_transport_cost[p]*STarc_len[a] + product_storage_cost[p]*STarc_dur[a]
end
end
# Build STarc capacity dictionary
STarc_cap = DictInit([STarc_id,product_id], 0.0)
for a = 1:CardSTA
for p = 1:length(product_id)
if STarc_cap_key[a] == "M"
# These are the purely temporal arcs; no connection to a physical arc, so use a big M value
STarc_cap[STarc_id[a],product_id[p]] = M
else
# Apply geographic arc capacities to all arcs (spatio-temporal will have same)
STarc_cap[STarc_id[a],product_id[p]] = arc_cap[STarc_cap_key[a],product_id[p]]
end
end
end
A = ArcDataStruct(STarc_id,
Dict{String,String}(zip(STarc_id, STarc_n_send)), # sending nodes
Dict{String,String}(zip(STarc_id, STarc_n_recv)), # receiving nodes
Dict{String,String}(zip(STarc_id, STarc_t_send)), # sending times
Dict{String,String}(zip(STarc_id, STarc_t_recv)), # receiving times
STarc_bid, # bids by product
STarc_cap, # capacities by product
STarc_len, # lengths
STarc_dur, # durations
STarc_id_s, # spatial arcs
STarc_id_t, # temporal arcs
STarc_id_st # spatio-temporal arcs
)
### Part 2 - Build Arc Truth Tables
# Ain and Aout
Ain = Dict() # given node s(n,t), provides all arcs a in A directed towards node s
Aout = Dict() # given node s(n,t), provides all arcs a in A directed out of node s
[Ain[n,t] = Vector{String}(undef,0) for n in node_id, t in time_id]
[Aout[n,t] = Vector{String}(undef,0) for n in node_id, t in time_id]
for a = 1:CardSTA
push!(Ain[STarc_n_recv[a], STarc_t_recv[a]], STarc_id[a])
push!(Aout[STarc_n_send[a], STarc_t_send[a]], STarc_id[a])
end
### Return
return A, Ain, Aout
end
# Demand
function DemandGen(demand_id, demand_node, demand_time, demand_prod, demand_bid, demand_cap, demand_impact, demand_impact_yield)
"""
Generates node data strucures
Inputs:
- demand IDs
- demand nodes
- demand time periods
- demand products
- demand bids
- demand capacities
- demand impacts
- demand impact yields
Outputs:
- demand data structure (D)
"""
### Part 1 - Build demand data structure
D = DemandDataStruct(
demand_id, # ID
demand_node, # node
demand_time, # time
demand_prod, # prod
demand_bid, # bid
demand_cap, # cap
demand_impact, # impacts
demand_impact_yield #impact yield cooefficients
)
### Return
return D
end
# Supply
function SupplyGen(supply_id, supply_node, supply_time, supply_prod, supply_bid, supply_cap, supply_impact, supply_impact_yeild)
"""
Generates supply data strucures
Inputs:
- supply IDs
- supply node
- supply time period
- supply product
- supply bid
- supply capacity
- supply impacts
- supply impact yields
Outputs:
- supply data structure (G)
"""
### Part 1 - Build supply data structure
G = SupplyDataStruct(
supply_id, # ID
supply_node, # node
supply_time, # time
supply_prod, # prod
supply_bid, # bid
supply_cap, # cap
supply_impact, # impacts
supply_impact_yeild # supply impact yield coefficients
)
### Return
return G
end
# Environmental stakeholder
function EnvGen(env_id, env_node, env_time, env_impact, env_bid, env_cap)
"""
Generates environmental stakeholder data strucures
Inputs:
- env IDs
- env node
- env time period
- env impact
- env bid
- env cap
Outputs:
- env data structure (V)
"""
### Part 1 - Build environmental stakeholder data structure
V = EnvDataStruct(
env_id, # ID
env_node, # node
env_time, # time
env_impact, # impact
env_bid, # bid
env_cap # capacity
)
### Return
return V
end
# Technologies
function TechGen(tech_id, tech_output, tech_input, tech_impact, tech_output_yield, tech_input_yield, tech_impact_yield, tech_ref, tech_bid, tech_cap, tech_alias)
"""
Generates technology data strucures
Inputs:
- technology IDs
- technology outputs
- technology inputs
- technology impacts
- technology output yields
- technology input yields
- technology impact yields
- technology reference product
- technology bid
- technology capacity
- technology alias
Outputs:
- technology data structure (M)
- technology index set MPQ (commented, for now as uneeded)
"""
### Part 1 - buiild technology data structure
M = TechDataStruct(
tech_id, # ID
tech_output, # Outputs
tech_input, # Inputs
tech_impact, # Impacts
tech_output_yield, # OutputYields
tech_input_yield, # InputYields
tech_impact_yield, # ImpactYields
tech_ref, # InputRef
tech_bid, # bid
tech_cap, # cap
tech_alias # alias
)
### Return
return M
end
# Technology mapping
function TechMapGen(techmap_id, techmap_node, techmap_time, techmap_tech)
"""
Generates technology mapping data strucures
Inputs:
- technology mapping IDs
- technology map nodes
- technology map times
- technology map technology IDs
Outputs:
- techmap data structure (L)
"""
### Part 1 - build technology map data structure
L = TechmapDataStruct(
techmap_id, # ID
techmap_node, # node
techmap_time, # time
techmap_tech # tech
)
### Return
return L
end
# Supplier mapping
function SupplyIndexGen(node_id,time_id,product_id,supply_id,supply_node,supply_time,supply_prod)
"""
Generates supplier index set mapping iβG to nβN,tβT,pβP
Inputs:
- node IDs
- time IDs
- product IDs
- supplier IDs
- supplier nodes
- supplier times
- supplier products
Outputs:
- Supplier indexing set Gntp
"""
Gntp = DictListInit([node_id,time_id,product_id],InitStringArray)
for i in supply_id
n = supply_node[i]
t = supply_time[i]
p = supply_prod[i]
push!(Gntp[n,t,p], i)
end
return Gntp
end
# Consumer mapping
function DemandIndexGen(node_id,time_id,product_id,demand_id,demand_node,demand_time,demand_prod)
"""
Generates consumer index set mapping jβD to nβN,tβT,pβP
Inputs:
- node IDs
- time IDs
- product IDs
- consumer IDs
- consumer nodes
- consumer times
- consumer products
Outputs:
- Consumer indexing set Dntp
"""
Dntp = DictListInit([node_id,time_id,product_id],InitStringArray)
for j in demand_id
n = demand_node[j]
t = demand_time[j]
p = demand_prod[j]
push!(Dntp[n,t,p], j)
end
return Dntp
end
# Environmental consumer mapping
function EnvIndexGen(node_id,time_id,impact_id,supply_id,supply_node,supply_time,supply_impact,demand_id,demand_node,demand_time,demand_impact,env_id,env_node,env_time,env_impact)
"""
Generates environmental consumer index set mapping vβV to nβN,tβT,qβQ
Inputs:
- node IDs
- time IDs
- impact IDs
- supplier IDs
- supplier nodes
- supplier times
- supplier impacts
- consumer IDs
- consumer nodes
- consumer times
- consumer impacts
- env. consumer IDs
- env. consumer nodes
- env. consumer times
- env. consumer impacts
Outputs:
- Supplier indexing set Gntq
- Consumer indexing set Dntq
- Env. consumer indexing set Vntq
"""
Gntq = DictListInit([node_id,time_id,impact_id],InitStringArray)
for i in supply_id
ques = supply_impact[i]
if ques != [""] # allow impactless suppliers; they are just not included in this set
n = supply_node[i]
t = supply_time[i]
for q in ques
push!(Gntq[n,t,q], i)
end
end
end
Dntq = DictListInit([node_id,time_id,impact_id],InitStringArray)
for j in demand_id
ques = demand_impact[j]
if ques != [""] # allow impactless consumers; they are just not included in this set
n = demand_node[j]
t = demand_time[j]
for q in ques
push!(Dntq[n,t,q], j)
end
end
end
Vntq = DictListInit([node_id,time_id,impact_id],InitStringArray)
for v in env_id
n = env_node[v]
t = env_time[v]
q = env_impact[v]
push!(Vntq[n,t,q], v)
end
return Gntq, Dntq, Vntq
end
# Subset for suppliers with impacts
function SuppliersWithImpacts(supply_id, supply_impact)
"""
Generates subset of suppliers i β G with associated impacts q in Q != Ο
Inputs:
- supplier IDs
- supplier impacts
Outputs:
- Set of suppliers with environmental impacts, GQ
"""
GQ = []
for i in supply_id
if supply_impact[i] != [""]
push!(GQ, i)
end
end
return GQ
end
# Subset for consumers with impacts
function ConsumersWithImpacts(demand_id, demand_impact)
"""
Generates subset of consumers j β D with associated impacts q in Q != Ο
Inputs:
- consumer IDs
- consumer impacts
Outputs:
- Set of consumers with environmental impacts, DQ
"""
DQ = []
for j in demand_id
if demand_impact[j] != [""]
push!(DQ, j)
end
end
return DQ
end
# Technology input/output mapping index sets
function TechProductIndexSetGen(node_id, time_id, product_id, tech_output, tech_input, techmap_id, techmap_node, techmap_time, techmap_tech)
"""
Generates technology mapping data strucures
Inputs:
- node IDs
- time point IDs
- product IDs
- technology inputs
- technology outputs
- techmap IDs
- techmap nodes
- techmap times
- techmap technology types
Outputs:
- Index sets NTPgenl and NTPconl for product output/input
"""
### Part 1 - Product output/input index sets
# Index set, given (n,t,p) provide all l β L satisfying n(l)=n,t(l)=t,pβtech_output[m(l)]
NTPgenl = DictListInit([node_id,time_id,product_id], InitStringArray)
# Index set, given (n,t,p) provide all l β L satisfying n(l)=n,t(l)=t,pβtech_input[m(l)]
NTPconl = DictListInit([node_id,time_id,product_id], InitStringArray)
for l in techmap_id
m = techmap_tech[l]
n = techmap_node[l]
t = techmap_time[l]
for p in tech_output[m]
push!(NTPgenl[n,t,p], l)
end
for p in tech_input[m]
push!(NTPconl[n,t,p], l)
end
end
### Return
return NTPgenl, NTPconl
end
# Technology input/output mapping index sets
function TechImpactIndexSetGen(node_id, time_id, impact_id, tech_impact, techmap_id, techmap_node, techmap_time, techmap_tech)
"""
Generates technology mapping data strucures
Inputs:
- node IDs
- time point IDs
- impact IDs
- technology impacts
- techmap IDs
- techmap nodes
- techmap times
- techmap technology types
Outputs:
- Index set NTQgenl for impact generation
"""
### Part 1 - Product output/input index sets
# Index set, given (n,t,q) provide all l β L satisfying n(l)=n,t(l)=t,qβtech_impact[m(l)]
NTQgenl = DictListInit([node_id,time_id,impact_id], InitStringArray)
for l in techmap_id
m = techmap_tech[l]
n = techmap_node[l]
t = techmap_time[l]
for q in tech_impact[m]
push!(NTQgenl[n,t,q], l)
end
end
### Return
return NTQgenl
end
### Parameter generation functions
function par_gMAX(node_id,time_id,product_id,supply_id,supply_node,supply_time,supply_prod,supply_cap)
### nodal supply capacity
gMAX = DictInit([node_id,time_id,product_id], 0.0)
for n in node_id
for t in time_id
for p in product_id
for i in supply_id
if supply_node[i] == n && supply_time[i] == t && supply_prod[i] == p
gMAX[n,t,p] += supply_cap[i]
end
end
end
end
end
return gMAX
end
function par_dMAX(node_id,time_id,product_id,demand_id,demand_node,demand_time,demand_prod,demand_cap)
### nodal demand capacity
dMAX = DictInit([node_id,time_id,product_id], 0.0)
for n in node_id
for t in time_id
for p in product_id
for j in demand_id
if demand_node[j] == n && demand_time[j] == t && demand_prod[j] == p
dMAX[n,t,p] += demand_cap[j]
end
end
end
end
end
return dMAX
end
function par_eMAX(node_id,time_id,impact_id,env_id,env_node,env_time,env_impact,env_cap)
### nodal impact capacity
eMAX = DictInit([node_id,time_id,impact_id], 0.0)
for n in node_id
for t in time_id
for q in impact_id
for v in env_id
if env_node[v] == n && env_time[v] == t && env_impact[v] == q
eMAX[n,t,q] += env_cap[v]
end
end
end
end
end
return eMAX
end
function par_Ξ³iq(GQ,supply_impact,supply_impact_yield)
### yield of impact q from supplier i
Ξ³iq = Dict()
for i in GQ
for q in supply_impact[i] # list of impacts q generated by supplying i
Ξ³iq[i,q] = supply_impact_yield[i,q]
end
end
return Ξ³iq
end
function par_Ξ³jq(DQ,demand_impact,demand_impact_yield)
### yield of impact q from consumer j
Ξ³jq = Dict()
for j in DQ
for q in demand_impact[j] # list of impacts q generated by consuming j
Ξ³jq[j,q] = demand_impact_yield[j,q]
end
end
return Ξ³jq
end
function par_Ξ³aq(arc_id,impact_id,arc_len,arc_dur,impact_transport_coeff,impact_storage_coeff)
### yield of impact q from transport across arc a
Ξ³aq = Dict()
for a in arc_id
for q in impact_id
# includes both spatial and temporal dimensions
# from model implementation: (Q.transport_coeff[q]*A.len[a] + Q.storage_coeff[q]*A.dur[a])
Ξ³aq[a,q] = impact_transport_coeff[q]*arc_len[a] + impact_storage_coeff[q]*arc_dur[a]
end
end
return Ξ³aq
end
function par_Ξ³mp(tech_id,tech_output,tech_input,tech_output_yield,tech_input_yield)
### yield of product p in technology m
Ξ³mp = Dict()
for m in tech_id
for p_gen in tech_output[m] # list of products made by m
Ξ³mp[m,p_gen] = tech_output_yield[m,p_gen]
end
for p_con in tech_input[m] # list of products consumed by m
Ξ³mp[m,p_con] = tech_input_yield[m,p_con]
end
end
return Ξ³mp
end
function par_Ξ³mq(tech_id,tech_impact,tech_impact_yield)
### yield of impact q from technology m
Ξ³mq = Dict() # yield of i from q in technology t
for m in tech_id
for q in tech_impact[m]
Ξ³mq[m,q] = tech_impact_yield[m,q]# == tech_impact_stoich[m,q]/tech_input_stoich[m,pref]
end
end
return Ξ³mq
end
function par_ΞΎgenMAX(techmap_id,techmap_tech,product_id,tech_output,tech_output_yield,tech_cap)
### Maximum production levels
#ΞΎgenMAX = DictInit([tech_id,node_id,time_id,product_id],0.0)
ΞΎgenMAX = DictInit([techmap_id,product_id],0.0)
for l in techmap_id
m = techmap_tech[l]
for p in tech_output[m]
ΞΎgenMAX[l,p] = tech_output_yield[m,p]*tech_cap[m]
end
end
return ΞΎgenMAX
end
function par_ΞΎconMAX(techmap_id,techmap_tech,product_id,tech_input,tech_input_yield,tech_cap)
### Maximum consumption levels
#ΞΎconMAX = DictInit([tech_id,node_id,time_id,product_id],0.0)
ΞΎconMAX = DictInit([techmap_id,product_id],0.0)
for l in techmap_id
m = techmap_tech[l]
for p in tech_input[m]
ΞΎconMAX[l,p] = tech_input_yield[m,p]*tech_cap[m]
end
end
return ΞΎconMAX
end
function par_ΞΎenvMAX(techmap_id,techmap_tech,impact_id,tech_impact,tech_impact_yield,tech_cap)
### Maximum impact generation levels
#ΞΎenvMAX = DictInit([tech_id,node_id,time_id,impact_id],0.0)
ΞΎenvMAX = DictInit([techmap_id,impact_id],0.0)
for l in techmap_id
m = techmap_tech[l]
for q in tech_impact[m]
ΞΎenvMAX[l,q] = tech_impact_yield[m,q]*tech_cap[m]
end
end
return ΞΎenvMAX
end | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | code | 1687 | module CoordinatedSupplyChains
################################################################################
### IMPORT LIST
using DelimitedFiles
using JuMP
using HiGHS
################################################################################
### EXPORT LIST
#= List all usable functions here; this makes them callable to users =#
export RunCSC, BuildModelData, BuildModel, GetModelStats, SolveModel, PostSolveCalcs, SaveSolution
################################################################################
### INCLUDE STATEMENTS FOR CODE IN SEPARATE FILES
# Supporting functions
#= Useful functions that support model building; e.g., distance calculations,
dictionary initialization, etc.; not for export =#
include("SupportFunctions.jl")
# Data structure definitions
#= These describe the primary data structures that the model will use; nothing to export =#
include("StructureDefns.jl")
#= These functions help build the data structures for the model; EXPORT =#
include("BuilderFunctions.jl")
# Data import functions
#= Functions that import individual model case studies and build the data structures
for a market model; EXPORT =#
include("DataSetup.jl")
# Model building functions; EXPORT
include("ModelSetup.jl")
# Workflow functions to simplify use; EXPORT
include("WorkflowFunctions.jl")
################################################################################
### CONSTANT VALUES
const PrintSpacer = "*"^50
const DefaultOptimizer = HiGHS.Optimizer
end
#Test lines; delete once testing documentation is done.
#RunCSC("/Users/ptominac/Documents/environmentaleconomics/BilevelImpactMarkets/Code/ExtendedTestSets/NoArcs", optimizer=Gurobi.Optimizer) | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | code | 18238 | ################################################################################
### IMPORT DATA AND BUILD DATA STRUCTURES
function BuildModelData(DataDir,UseArcLengths)
"""
Imports source data from given folder and builds model data structures
Inputs:
-> DataDir: a directory to a folder containing model data
Returns:
-> T: struct for time data
-> N: struct for node data
-> P: struct for product data
-> Q: struct for impact data
-> A: struct for arc data
-> D: struct for demand data
-> S: struct for supply data
-> V: struct for environmental consumer data
-> M: struct for technology data
-> L: struct for technology mapping data
-> Subsets: struct containing subsets and inersection sets of the above
-> Pars: struct containing calculated parameters
"""
################################################################################
### DATA SETUP
### Time data parsing
time_data = try
readdlm(joinpath(DataDir,"csvdata_time.csv"),',',Any,comments=true) # import csv data
catch
false
end
UseTime = true # assume time data exists by default
if time_data == false
UseTime = false
time_id = Vector{String}(["T0"])
time_dur = Dict{String,Float64}(time_id[1] => 0)
else
time_id = convert(Vector{String}, time_data[:, 1])
time_dur = Dict{String,Float64}(zip(time_id, convert(Array{Float64}, time_data[:, 2]))) # duration
end
### Node data parsing
node_data = readdlm(joinpath(DataDir,"csvdata_node.csv"),',',Any,comments=true) # import csv data
node_id = convert(Vector{String}, node_data[:, 1])
node_alias = Dict{String,String}(zip(node_id, convert(Vector{String}, node_data[:, 2]))) # node name
node_lon = Dict{String,Float64}(zip(node_id, convert(Vector{Float64}, node_data[:, 3]))) # longitude
node_lat = Dict{String,Float64}(zip(node_id, convert(Vector{Float64}, node_data[:, 4]))) # latitude
### Product data parsing
product_data = readdlm(joinpath(DataDir,"csvdata_product.csv"),',',Any,comments=true) # import csv data
product_id = convert(Vector{String}, product_data[:, 1])
product_alias = Dict{String,String}(zip(product_id, convert(Vector{String}, product_data[:, 2]))) # product name
product_transport_cost = Dict{String,Float64}(zip(product_id, convert(Vector{Float64}, product_data[:, 3]))) # product transport cost
product_storage_cost = Dict{String,Float64}(zip(product_id, convert(Vector{Float64}, product_data[:, 4]))) # product storage cost
### Impact data parsing
impact_data = try
readdlm(joinpath(DataDir,"csvdata_impact.csv"),',',Any,comments=true) # import csv data
catch
false # assign false just to get out of try/catch
end
# Control flow for scenario in which no impact data is provided
UseImpacts = true # assume there will be impact data by default
if impact_data == false
UseImpacts = false # use UseImpacts as the control flow condition in main code
end
if UseImpacts # Parse impact data if it is defined
impact_id = convert(Vector{String}, impact_data[:, 1])
impact_alias = Dict{String,String}(zip(impact_id, convert(Vector{String}, impact_data[:, 2]))) # impact name & units
impact_transport_coeff = Dict{String,Float64}(zip(impact_id, convert(Vector{Float64}, impact_data[:, 3]))) # impact production during transport
impact_storage_coeff = Dict{String,Float64}(zip(impact_id, convert(Vector{Float64}, impact_data[:, 4]))) # impact production during storage
else
impact_id = Vector{String}()
impact_alias = Dict{String,String}() # impact name & units
impact_transport_coeff = Dict{String,Float64}() # impact production during transport
impact_storage_coeff = Dict{String,Float64}() # impact production during storage
end
### Arc data parsing
arc_data = try
readdlm(joinpath(DataDir,"csvdata_arcs.csv"),',',Any,comments=true) # import csv data
catch
false # assign false just to get out of try/catch
end
# Control flow for scenario in which no arc data is provided
UseArcs = true # assume there will be technology data by default
if arc_data == false
UseArcs = false # use UseArcs as the control flow condition in main code
end
if UseArcs # Parse arc data if it is defined
arc_id = convert(Array{String}, arc_data[:, 1])
arc_n = Dict{String,String}(zip(arc_id, convert(Vector{String}, arc_data[:, 2]))) # sending node "n"
arc_m = Dict{String,String}(zip(arc_id, convert(Vector{String}, arc_data[:, 3]))) # receiving node "m"
arc_cap = NumericListFromCSV2(arc_id, product_id, string.(arc_data[:, 4])) # arc capacities, by product
if UseArcLengths
arc_len = Dict{String,Float64}(zip(arc_id, convert(Vector{Float64}, arc_data[:, 5]))) # custom arc length
else # i.e., calculate great circle arc lengths
ref_lons = zeros(length(arc_id))
ref_lats = zeros(length(arc_id))
dest_lons = zeros(length(arc_id))
dest_lats = zeros(length(arc_id))
for a = 1:length(arc_id)
ref_lons[a] = node_lon[arc_data[a, 2]]
ref_lats[a] = node_lat[arc_data[a, 2]]
dest_lons[a] = node_lon[arc_data[a, 3]]
dest_lats[a] = node_lat[arc_data[a, 3]]
end
arc_len = Dict{String,Float64}(zip(arc_id,GreatCircle(ref_lons, ref_lats, dest_lons, dest_lats)))
end
else # there are no arcs, and all of these stay empty
# NOTE: it is important that JuMP has these empty values so it's iterators can skip over the corresponding code
arc_id = Vector{String}()
arc_n = Dict{String,String}() # sending node "n"
arc_m = Dict{String,String}() # receiving node "m"
arc_cap = Dict{Tuple{String,String},Float64}() # arc capacities, by product
arc_len = Dict{String,Float64}() # custom arc length
end
if !UseArcs && length(node_id) > 1
println("*"^10*" WARNING "*"*"^10)
println("There is more than one NODE entry, but no ARC data is detected!")
println("Check your data and make sure everything is as intended.")
println("Code will proceed.")
println("*"^31)
end
### Demand data parsing
demand_data = readdlm(joinpath(DataDir,"csvdata_demand.csv"),',',Any,comments=true) # import csv data
demand_id = convert(Vector{String}, demand_data[:, 1])
demand_node = Dict{String,String}(zip(demand_id, convert(Vector{String}, demand_data[:, 2]))) # demand node
if UseTime
demand_time = Dict{String,String}(zip(demand_id, convert(Vector{String}, demand_data[:, 3]))) # demand time
else
demand_time = Dict{String,String}(zip(demand_id, repeat(time_id,length(demand_id)))) # demand time
end
demand_prod = Dict{String,String}(zip(demand_id, convert(Vector{String}, demand_data[:, 4]))) # demand product
demand_bid = Dict{String,Float64}(zip(demand_id, convert(Vector{Float64}, demand_data[:, 5]))) # demand bid
demand_cap = Dict{String,Float64}(zip(demand_id, convert(Vector{Float64}, demand_data[:, 6]))) # demand capacity
demand_impact = PurgeQuotes(TextListFromCSV(demand_id, string.(demand_data[:,7]))) # demand impacts
demand_impact_yield = NumericListFromCSV2(demand_id, demand_impact, string.(demand_data[:, 8])) # demand impact yield factors
### Supply data parsing
supply_data = readdlm(joinpath(DataDir,"csvdata_supply.csv"),',',Any,comments=true)
supply_id = convert(Vector{String}, supply_data[:, 1])
supply_node = Dict{String,String}(zip(supply_id, convert(Vector{String}, supply_data[:, 2]))) # supply node
if UseTime
supply_time = Dict{String,String}(zip(supply_id, convert(Vector{String}, supply_data[:, 3]))) # supply time
else
supply_time = Dict{String,String}(zip(supply_id, repeat(time_id,length(supply_id)))) # supply time
end
supply_prod = Dict{String,String}(zip(supply_id, convert(Vector{String}, supply_data[:, 4]))) # supply product
supply_bid = Dict{String,Float64}(zip(supply_id, convert(Vector{Float64}, supply_data[:, 5]))) # supply bid
supply_cap = Dict{String,Float64}(zip(supply_id, convert(Vector{Float64}, supply_data[:, 6]))) # supply capacity
supply_impact = PurgeQuotes(TextListFromCSV(supply_id, string.(supply_data[:,7]))) # supply impacts
supply_impact_yield = NumericListFromCSV2(supply_id, supply_impact, string.(supply_data[:, 8])) # supply impact yield factors
### Environmental stakeholder data parsing
if UseImpacts # if impacts are undefined, definitely don't need impact consumption data
env_data = readdlm(joinpath(DataDir,"csvdata_env.csv"),',',Any,comments=true) # import csv data
env_id = convert(Vector{String}, env_data[:, 1])
env_node = Dict{String,String}(zip(env_id, convert(Vector{String}, env_data[:, 2]))) # environmental node
if UseTime
env_time = Dict{String,String}(zip(env_id, convert(Vector{String}, env_data[:, 3]))) # environmental time
else
env_time = Dict{String,String}(zip(env_id, repeat(time_id,length(env_id)))) # environmental time
end
env_impact = Dict{String,String}(zip(env_id, convert(Vector{String}, env_data[:, 4]))) # environmental product
env_bid = Dict{String,Float64}(zip(env_id, convert(Vector{Float64}, env_data[:, 5]))) # environmental bid
env_cap = Dict{String,Float64}(zip(env_id, convert(Vector{Float64}, env_data[:, 6]))) # environmental capacity (Inf, in most cases)
else
env_id = Vector{String}()
env_node = Dict{String,String}() # environmental node
env_time = Dict{String,String}() # environmental time
env_impact = Dict{String,String}() # environmental product
env_bid = Dict{String,Float64}() # environmental bid
env_cap = Dict{String,Float64}() # environmental capacity (Inf, in most cases)
end
### Technology data parsing
tech_data = try
readdlm(joinpath(DataDir,"csvdata_tech.csv"),',',Any,comments=true) # import csv data
catch
false # assign false just to get out of try/catch
end
# Control flow for scenario in which no technology data is provided
UseTechs = true # assume there will be technology data by default
if tech_data == false
UseTechs = false # use UseTechs as the control flow condition in main code
end
if UseTechs # Parse technology data if it is defined
tech_id = convert(Vector{String}, tech_data[:, 1])
tech_output = TextListFromCSV(tech_id, convert(Vector{String}, tech_data[:,2])) # technology outputs
tech_input = TextListFromCSV(tech_id, convert(Vector{String}, tech_data[:,3])) # technology inputs
tech_impact = PurgeQuotes(TextListFromCSV(tech_id, convert(Vector{String}, tech_data[:,4]))) # technology impacts
tech_output_yield = NumericListFromCSV2(tech_id, tech_output, string.(tech_data[:, 5])) # product yield factors
tech_input_yield = NumericListFromCSV2(tech_id, tech_input, string.(tech_data[:, 6])) # product yield factors
tech_impact_yield = NumericListFromCSV2(tech_id, tech_impact, string.(tech_data[:, 7])) # impact yield factors
tech_ref = Dict(zip(tech_id, convert(Vector{String}, tech_data[:, 8]))) # reference product
tech_bid = Dict(zip(tech_id, convert(Vector{Float64}, tech_data[:, 9]))) # technology bid (operating cost)
tech_cap = Dict(zip(tech_id, convert(Vector{Float64}, tech_data[:, 10]))) # technology capacity (per time unit)
tech_alias = Dict(zip(tech_id, convert(Vector{String}, tech_data[:, 11]))) # technology alias
# Technology mapping data parsing
techmap_data = readdlm(joinpath(DataDir,"csvdata_techmap.csv"),',',Any,comments=true) # import csv data
techmap_id = convert(Vector{String}, techmap_data[:, 1])
techmap_node = Dict{String,String}(zip(techmap_id, convert(Vector{String}, techmap_data[:, 2]))) # technology node (location)
if UseTime
techmap_time = Dict{String,String}(zip(techmap_id, convert(Vector{String}, techmap_data[:, 3]))) # technology time (availability)
else
techmap_time = Dict{String,String}(zip(techmap_id, repeat(time_id,length(techmap_id)))) # technology time (availability)
end
techmap_tech = Dict{String,String}(zip(techmap_id, convert(Vector{String}, techmap_data[:, 4]))) # technology type (from tech_id)
else
tech_id = Vector{String}()
tech_output = Dict{String,Vector{String}}() # technology outputs
tech_input = Dict{String,Vector{String}}() # technology inputs
tech_impact = Dict{String,Vector{String}}() # technology impacts
tech_output_yield = Dict{Tuple{String,String},Float64}() # product yield factors
tech_input_yield = Dict{Tuple{String,String},Float64}() # product yield factors
tech_impact_yield = Dict{Tuple{String,String},Float64}() # impact yield factors
tech_ref = Dict{String,String}() # reference product
tech_bid = Dict{String,Float64}() # technology bid (operating cost)
tech_cap = Dict{String,Float64}() # technology capacity (per time unit)
tech_alias = Dict{String,String}() # technology alias
# Technology mapping data parsing
techmap_id = Vector{String}()
techmap_node = Dict{String,String}() # technology node (location)
techmap_time = Dict{String,String}() # technology time (availability)
techmap_tech = Dict{String,String}() # technology type (from tech_id)
end
################################################################################
### GENERATE INDEX SETS
# temporal data structure
T, T1, Tt, TT, Tprior, Tpost = TimeGen(time_id, time_dur)
# spatial data strucure
N = NodeGen(node_id, node_alias, node_lon, node_lat)
# product data structure
P = ProductGen(product_id, product_alias, product_transport_cost, product_storage_cost)
# impact data structure
Q = ImpactGen(impact_id, impact_alias, impact_transport_coeff, impact_storage_coeff)
# spatio-temporal arc set from geographical arc data
A, Ain, Aout = ArcGen(time_id, time_dur, node_id, product_id, product_transport_cost, product_storage_cost, arc_id, arc_n, arc_m, arc_cap, arc_len)
# demand data structure
D = DemandGen(demand_id, demand_node, demand_time, demand_prod, demand_bid, demand_cap, demand_impact, demand_impact_yield)
Dntp = DemandIndexGen(node_id,time_id,product_id,demand_id,demand_node,demand_time,demand_prod)
DQ = ConsumersWithImpacts(demand_id, demand_impact)
# supply data structure
G = SupplyGen(supply_id, supply_node, supply_time, supply_prod, supply_bid, supply_cap, supply_impact, supply_impact_yield)
Gntp = SupplyIndexGen(node_id,time_id,product_id,supply_id,supply_node,supply_time,supply_prod)
GQ = SuppliersWithImpacts(supply_id, supply_impact)
# environmental data structure
V = EnvGen(env_id, env_node, env_time, env_impact, env_bid, env_cap)
Gntq, Dntq, Vntq = EnvIndexGen(node_id,time_id,impact_id,supply_id,supply_node,supply_time,supply_impact,demand_id,demand_node,demand_time,demand_impact,env_id,env_node,env_time,env_impact)
# technology data structure
M = TechGen(tech_id, tech_output, tech_input, tech_impact, tech_output_yield, tech_input_yield, tech_impact_yield, tech_ref, tech_bid, tech_cap, tech_alias)
# technology mapping data structure
L = TechMapGen(techmap_id, techmap_node, techmap_time, techmap_tech)
# technology indexing set generation
NTPgenl, NTPconl = TechProductIndexSetGen(node_id, time_id, product_id, tech_output, tech_input, techmap_id, techmap_node, techmap_time, techmap_tech)
NTQgenl = TechImpactIndexSetGen(node_id, time_id, impact_id, tech_impact, techmap_id, techmap_node, techmap_time, techmap_tech)
################################################################################
### GROUP SETS INTO STRUCT
#= NOTE: sets are generated in the functions above because it seems to pass data
back and forth fewer times. This may or may not be the correct interpreation.
Consider testing it out at some point; there are likely efficiencies to be found =#
Subsets = SetStruct(T1, Tt, TT, Tprior, Tpost, Ain, Aout, Dntp, Gntp, Dntq, Gntq, Vntq, DQ, GQ, NTPgenl, NTPconl, NTQgenl)
################################################################################
### GENERATE CALCULATED PARAMETERS
# For code readability, break down by parameter; depricate old single-function approach
gMAX = par_gMAX(node_id,time_id,product_id,supply_id,supply_node,supply_time,supply_prod,supply_cap)
dMAX = par_dMAX(node_id,time_id,product_id,demand_id,demand_node,demand_time,demand_prod,demand_cap)
eMAX = par_eMAX(node_id,time_id,impact_id,env_id,env_node,env_time,env_impact,env_cap)
Ξ³iq = par_Ξ³iq(GQ,supply_impact,supply_impact_yield)
Ξ³jq = par_Ξ³jq(DQ,demand_impact,demand_impact_yield)
Ξ³aq = par_Ξ³aq(A.ID,impact_id,A.len,A.dur,impact_transport_coeff,impact_storage_coeff)
Ξ³mp = par_Ξ³mp(tech_id,tech_output,tech_input,tech_output_yield,tech_input_yield)
Ξ³mq = par_Ξ³mq(tech_id,tech_impact,tech_impact_yield)
ΞΎgenMAX = par_ΞΎgenMAX(techmap_id,techmap_tech,product_id,tech_output,tech_output_yield,tech_cap)
ΞΎconMAX = par_ΞΎconMAX(techmap_id,techmap_tech,product_id,tech_input,tech_input_yield,tech_cap)
ΞΎenvMAX = par_ΞΎenvMAX(techmap_id,techmap_tech,impact_id,tech_impact,tech_impact_yield,tech_cap)
# Build parameter structure with required data or nothing entries
Pars = ParStruct(gMAX, dMAX, eMAX, Ξ³iq, Ξ³jq, Ξ³aq, Ξ³mp, Ξ³mq, ΞΎgenMAX, ΞΎconMAX, ΞΎenvMAX)
################################################################################
### POPULATE CONTROL FLOW STRUCTURE
CF = DataCF(UseTime,UseArcs,UseTechs,UseImpacts)
################################################################################
### RETURN
return T, N, P, Q, A, D, G, V, M, L, Subsets, Pars, CF
end | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | code | 24748 | ################################################################################
### BUILD JuMP MODEL
function BuildModel(T, N, P, Q, A, D, G, V, M, L, Subsets, Pars; optimizer=DefaultOptimizer)
"""
Builds a coordination model from data structures
Inputs:
- time struct (T)
- node struct (N)
- product struct (P)
- impact struct (Q)
- arc struct (A)
- demand struct (D)
- supply struct (G)
- impact struct (V)
- technology data struct (M)
- technology mapping struct (L)
- Subsets struct (Subsets)
- Parameter struct (Pars)
Outputs:
- JuMP model (MOD)
"""
################################################################################
### MODEL STATEMENT
MOD = JuMP.Model(optimizer)
################################################################################
### VARIABLES
@variable(MOD, 0 <= g[i=G.ID] <= G.cap[i]) # supplier allocation by ID
@variable(MOD, 0 <= d[j=D.ID] <= D.cap[j]) # consumer allocation by ID
@variable(MOD, 0 <= e[v=V.ID] <= V.cap[v]) # impact allocation by ID
@variable(MOD, 0 <= f[a=A.ID,p=P.ID] <= A.cap[a,p]) # transport allocation (arc-indexed)
@variable(MOD, 0 <= ΞΎ[l=L.ID] <= M.cap[L.tech[l]]) # technology allocation w.r.t. reference product
################################################################################
### EQUATIONS
# Constraint expressions
PB_i = @expression(MOD, [n=N.ID,t=T.ID,p=P.ID], sum(g[i] for i in Subsets.Gntp[n,t,p]))
PB_j = @expression(MOD, [n=N.ID,t=T.ID,p=P.ID], sum(d[j] for j in Subsets.Dntp[n,t,p]))
PB_a_in = @expression(MOD, [n=N.ID,t=T.ID,p=P.ID], sum(f[a,p] for a in Subsets.Ain[n,t]))
PB_a_out = @expression(MOD, [n=N.ID,t=T.ID,p=P.ID], sum(f[a,p] for a in Subsets.Aout[n,t]))
PB_m_gen = @expression(MOD, [n=N.ID,t=T.ID,p=P.ID], sum(Pars.Ξ³mp[L.tech[l],p]*ΞΎ[l] for l in Subsets.NTPgenl[n,t,p]))
PB_m_con = @expression(MOD, [n=N.ID,t=T.ID,p=P.ID], sum(Pars.Ξ³mp[L.tech[l],p]*ΞΎ[l] for l in Subsets.NTPconl[n,t,p]))
QB_i = @expression(MOD, [n=N.ID,t=T.ID,q=Q.ID], sum(Pars.Ξ³iq[i,q]*g[i] for i in Subsets.Gntq[n,t,q]))
QB_j = @expression(MOD, [n=N.ID,t=T.ID,q=Q.ID], sum(Pars.Ξ³jq[j,q]*d[j] for j in Subsets.Dntq[n,t,q]))
QB_a = @expression(MOD, [n=N.ID,t=T.ID,q=Q.ID], sum(Pars.Ξ³aq[a,q]*f[a,p] for a in Subsets.Ain[n,t], p in P.ID))
QB_m = @expression(MOD, [n=N.ID,t=T.ID,q=Q.ID], sum(Pars.Ξ³mq[L.tech[l],q]*ΞΎ[l] for l in Subsets.NTQgenl[n,t,q]))
QB_v = @expression(MOD, [n=N.ID,t=T.ID,q=Q.ID], sum(e[v] for v in Subsets.Vntq[n,t,q]))
# Constraints
@constraint(MOD, ProductBalance[n=N.ID,t=T.ID,p=P.ID],
PB_i[n,t,p] + PB_a_in[n,t,p] + PB_m_gen[n,t,p] - PB_j[n,t,p] - PB_a_out[n,t,p] - PB_m_con[n,t,p] == 0)
@constraint(MOD, ImpactBalance[n=N.ID,t=T.ID,q=Q.ID],
QB_i[n,t,q] + QB_j[n,t,q] + QB_a[n,t,q] + QB_m[n,t,q] - QB_v[n,t,q] == 0)
# Objective function expressions
demand_obj = @expression(MOD, sum(D.bid[j]*d[j] for j in D.ID))
supply_obj = @expression(MOD, sum(G.bid[i]*g[i] for i in G.ID))
env_obj = @expression(MOD, sum(V.bid[v]*e[v] for v in V.ID))
transport_obj = @expression(MOD, sum(A.bid[a,p]*f[a,p] for a in A.ID, p in P.ID))
technology_obj = @expression(MOD, sum(M.bid[L.tech[l]]*ΞΎ[l] for l in L.ID))
# Objective function
@objective(MOD, Max, demand_obj + env_obj - supply_obj - transport_obj - technology_obj)
# Return
return MOD
end
################################################################################
### ASSEMBLE MODEL STATISTICS
function GetModelStats(MOD, DisplayMode=true)#, PrintSpacer="*"^50)
### Calculate statistics
NumVars = length(all_variables(MOD))
TotalIneqCons = num_constraints(MOD,AffExpr, MOI.LessThan{Float64})+num_constraints(MOD,AffExpr, MOI.GreaterThan{Float64})+num_constraints(MOD,VariableRef, MOI.LessThan{Float64})+num_constraints(MOD,VariableRef, MOI.GreaterThan{Float64})
TotalEqCons = num_constraints(MOD,VariableRef, MOI.EqualTo{Float64})+num_constraints(MOD,AffExpr, MOI.EqualTo{Float64})
NumVarBounds = num_constraints(MOD,VariableRef, MOI.LessThan{Float64})+num_constraints(MOD,VariableRef, MOI.GreaterThan{Float64})
ModelIneqCons = num_constraints(MOD,AffExpr, MOI.LessThan{Float64})+num_constraints(MOD,AffExpr, MOI.GreaterThan{Float64})
ModelEqCons = num_constraints(MOD,AffExpr, MOI.EqualTo{Float64})
### Optional: display statistics to REPL
if DisplayMode # default true
println(PrintSpacer*"\nModel statistics:")
println("Variables: "*string(NumVars))
println("Total inequality constraints: "*string(TotalIneqCons))
println("Total equality constraints: "*string(TotalEqCons))
println("Variable bounds: "*string(NumVarBounds))
println("Model inequality constraints: "*string(ModelIneqCons))
println("Model equality constraints: "*string(ModelEqCons))
println(PrintSpacer)
end
### Return
return ModelStatStruct(NumVars,TotalIneqCons,TotalEqCons,NumVarBounds,ModelIneqCons,ModelEqCons)
end
################################################################################
### SOLVE JuMP MODEL
function SolveModel(MOD)
"""
Solves JuMP coordination model MOD and returns solution structure with variable values
Inputs:
- JuMP coordination model (MOD)
Outputs:
- solution data struct (SOL)
"""
### Solve & print status to REPL
println("Solving original problem...")
JuMP.optimize!(MOD)
println("Termination status: "*string(termination_status(MOD)))
println("Primal status: "*string(primal_status(MOD)))
println("Dual status: "*string(dual_status(MOD)))
### Get results
z_out = JuMP.objective_value(MOD)
g_out = JuMP.value.(MOD[:g])
d_out = JuMP.value.(MOD[:d])
e_out = JuMP.value.(MOD[:e])
f_out = JuMP.value.(MOD[:f])
ΞΎ_out = JuMP.value.(MOD[:ΞΎ])
Ο_p_out = JuMP.dual.(MOD[:ProductBalance])
Ο_q_out = JuMP.dual.(MOD[:ImpactBalance])
### Return
return SolutionStruct(string(termination_status(MOD)),string(primal_status(MOD)),string(dual_status(MOD)),z_out,g_out,d_out,e_out,f_out,ΞΎ_out,Ο_p_out,Ο_q_out)
end
################################################################################
### CALCULATE ADDITIONAL SOLUTION VALUES
function PostSolveCalcs(T, N, P, Q, A, D, G, V, M, L, Subsets, Pars, SOL, CF)
"""
Calculates post-solve parameter values
Inputs:
Outputs:
"""
### determine nodal supply, demand, environmental consumption
gNTP = DictInit([N.ID,T.ID,P.ID], 0.0)
dNTP = DictInit([N.ID,T.ID,P.ID], 0.0)
eNTQ = DictInit([N.ID,T.ID,Q.ID], 0.0)
for n in N.ID
for t in T.ID
for p in P.ID
#if Subsets.Gntp[n,t,p] != []
gNTP[n,t,p] = reduce(+,SOL.g[i] for i in Subsets.Gntp[n,t,p];init=0)
#end
#if Subsets.Dntp[n,t,p] != []
dNTP[n,t,p] = reduce(+,SOL.d[j] for j in Subsets.Dntp[n,t,p];init=0)
#end
end
for q in Q.ID
#if Subsets.Vntq[n,t,q] != []
eNTQ[n,t,q] = reduce(+,SOL.e[v] for v in Subsets.Vntq[n,t,q];init=0)
#end
end
end
end
### determine consumption/generation values based on ΞΎ[l]
ΞΎgen = DictInit([L.ID,P.ID], 0.0)
ΞΎcon = DictInit([L.ID,P.ID], 0.0)
ΞΎenv = DictInit([L.ID,Q.ID], 0.0)
for l in L.ID
for p in M.Outputs[L.tech[l]]
ΞΎgen[l,p] = M.OutputYields[L.tech[l],p]*SOL.ΞΎ[l]
end
for p in M.Inputs[L.tech[l]]
ΞΎcon[l,p] = M.InputYields[L.tech[l],p]*SOL.ΞΎ[l]
end
for q in M.Impacts[L.tech[l]]
ΞΎenv[l,q] = M.ImpactYields[L.tech[l],q]*SOL.ΞΎ[l]
end
end
### calculate supply and demand impact values (prices)
# Supplier impact value
Ο_iq = DictInit([G.ID,Q.ID], 0.0)
for i in Subsets.GQ
for q in G.Impacts[i]
Ο_iq[i,q] = Pars.Ξ³iq[i,q]*SOL.Οq[G.node[i],G.time[i],q]
end
end
# Consumer impact value
Ο_jq = DictInit([D.ID,Q.ID], 0.0)
for j in Subsets.DQ
for q in D.Impacts[j]
Ο_jq[j,q] = Pars.Ξ³jq[j,q]*SOL.Οq[D.node[j],D.time[j],q]
end
end
### calculate transport and technology price values
# transportation price
Ο_a = DictInit([A.ID,P.ID], 0.0)
# transportation price associated with impact q β Q
Ο_aq = DictInit([A.ID,Q.ID], 0.0)
for a in A.ID
for p in P.ID
Ο_a[a,p] = SOL.Οp[A.n_recv[a],A.t_recv[a],p] - SOL.Οp[A.n_send[a],A.t_send[a],p]
end
for q in Q.ID
Ο_aq[a,q] = Pars.Ξ³aq[a,q]*SOL.Οq[A.n_recv[a],A.t_recv[a],q]
end
end
# technology price
Ο_m = DictInit([M.ID,N.ID,T.ID], 0.0)
# technology price associated with impact q β Q
Ο_mq = DictInit([M.ID,N.ID,T.ID,Q.ID], 0.0)
for m in M.ID
for n in N.ID
for t in T.ID
Ο_m[m,n,t] = reduce(+,Pars.Ξ³mp[m,p]*SOL.Οp[n,t,p] for p in M.Outputs[m];init=0) - reduce(+,Pars.Ξ³mp[m,p]*SOL.Οp[n,t,p] for p in M.Inputs[m];init=0)
for q in M.Impacts[m]
Ο_mq[m,n,t,q] = Pars.Ξ³mq[m,q]*SOL.Οq[n,t,q]
end
end
end
end
### Profits
# Supplier profits
Οi = DictInit([G.ID], 0.0)
for i in G.ID
Οi[i] = (SOL.Οp[G.node[i],G.time[i],G.prod[i]] + reduce(+,Ο_iq[i,q] for q in G.Impacts[i];init=0) - G.bid[i])*SOL.g[i]
end
# Consumer profits
Οj = DictInit([D.ID], 0.0)
for j in D.ID
Οj[j] = (D.bid[j] - SOL.Οp[D.node[j],D.time[j],D.prod[j]] + reduce(+,Ο_jq[j,q] for q in D.Impacts[j];init=0))*SOL.d[j]
end
# Environmental consumer profits
Οv = DictInit([V.ID], 0.0)
for v in V.ID
Οv[v] = (V.bid[v] - SOL.Οq[V.node[v],V.time[v],V.impact[v]])*SOL.e[v]
end
# Technology profits, by techmap index
Οl = DictInit([L.ID], 0.0)
for l in L.ID
m = L.tech[l]
n = L.node[l]
t = L.time[l]
Οl[l] = (Ο_m[m,n,t] + reduce(+,Ο_mq[m,n,t,q] for q in M.Impacts[m];init=0) - M.bid[m])*SOL.ΞΎ[l]
end
# Transportation profits
Οa = DictInit([A.ID,P.ID], 0.0)
for a in A.ID
for p in P.ID
Οa[a,p] = (Ο_a[a,p] + reduce(+,Ο_aq[a,q] for q in Q.ID) - A.bid[a,p];init=0)*SOL.f[a,p]
end
end
### Return
return PostSolveValues(gNTP, dNTP, eNTQ, ΞΎgen, ΞΎcon, ΞΎenv, Ο_iq, Ο_jq, Ο_a, Ο_aq, Ο_m, Ο_mq, Οi, Οj, Οv, Οl, Οa)
end
################################################################################
### SAVE SOLUTION TO FILE
function SaveSolution(filedir, ModelStats, SOL, POST, T, N, P, Q, A, D, G, V, M, L, CF)
"""
Saves the solution from a JuMP model to a text file
Inputs:
- filedir -> location for case study
- JuMP solution struct (SOL)
- JuMP model statistics (ModelStats)
- Post-solve calculation struct (POST)
- Set structures: T, N, P, Q, A, D, G, V, M, L
Outputs:
- returns nothing
"""
### Create solution directory if not present
SolutionDir = joinpath(filedir, "_SolutionData")
if !isdir(SolutionDir)
mkdir(SolutionDir)
end
### Write solution data to one single text file
SolutionFile = joinpath(SolutionDir, "_SolutionData.txt")
solution = open(SolutionFile, "w")
# print solution stats to file
print(solution, PrintSpacer*"\nTermination status: "*string(SOL.TermStat))
print(solution, "\nPrimal status: "*string(SOL.PrimalStat))
print(solution, "\nDual status: "*string(SOL.DualStat))
print(solution, "\n"*PrintSpacer*"\nNumber of variables: "*string(ModelStats.Variables))
print(solution, "\nTotal inequality constraints: "*string(ModelStats.TotalInequalityConstraints))
print(solution, "\nTotal equality constraints: "*string(ModelStats.TotalEqualityConstraints))
print(solution, "\nNumber of variable bounds: "*string(ModelStats.VariableBounds))
print(solution, "\nModel inequality constraints: "*string(ModelStats.ModelInequalityConstrtaints))
print(solution, "\nModel equality constraints: "*string(ModelStats.ModelEqualityConstraints))
# print solution data to file
print(solution, "\n"*PrintSpacer*"\nObjective value: "*string(SOL.z))
FilePrint(SOL.g,[G.ID],solution;Header=PrintSpacer,DataName="Supply Allocations:",VarName="g")
FilePrint(SOL.d,[D.ID],solution;Header=PrintSpacer,DataName="Demand Allocations:",VarName="d")
if CF.UseImpacts
FilePrint(SOL.e,[V.ID],solution;Header=PrintSpacer,DataName="Environmental Consumption Allocations:",VarName="e")
end
if CF.UseArcs
FilePrint(SOL.f,[A.ID,P.ID],solution;Header=PrintSpacer,DataName="Transport Allocations:",VarName="f")
end
if CF.UseTechs
FilePrint(SOL.ΞΎ,[L.ID],solution;Header=PrintSpacer,DataName="Technology Allocations:",VarName="ΞΎ")
end
FilePrint(SOL.Οp,[N.ID,T.ID,P.ID],solution;Header=PrintSpacer,DataName="Nodal Product Prices:",VarName="Ο")
if CF.UseImpacts
FilePrint(SOL.Οq,[N.ID,T.ID,Q.ID],solution;Header=PrintSpacer,DataName="Nodal Impact Prices:",VarName="Ο")
end
FilePrint(POST.gNTP,[N.ID,T.ID,P.ID],solution;Header=PrintSpacer,DataName="Nodal Supply Allocations:",VarName="g")
FilePrint(POST.dNTP,[N.ID,T.ID,P.ID],solution;Header=PrintSpacer,DataName="Nodal Demand Allocations:",VarName="d")
if CF.UseImpacts
FilePrint(POST.eNTQ,[N.ID,T.ID,Q.ID],solution;Header=PrintSpacer,DataName="Nodal Environmental Allocations:",VarName="e")
end
if CF.UseTechs
FilePrint(POST.ΞΎgen,[L.ID,P.ID],solution;Header=PrintSpacer,DataName="Technology Generation Allocations:",VarName="ΞΎgen")
FilePrint(POST.ΞΎcon,[L.ID,P.ID],solution;Header=PrintSpacer,DataName="Technology Consumption Allocations:",VarName="ΞΎcon")
if CF.UseImpacts
FilePrint(POST.ΞΎenv,[L.ID,Q.ID],solution;Header=PrintSpacer,DataName="Technology Impact Allocations:",VarName="ΞΎenv")
end
end
if CF.UseImpacts
FilePrint(POST.Ο_iq,[G.ID,Q.ID],solution;Header=PrintSpacer,DataName="Supply Impact Prices:",VarName="Ο_iq")
FilePrint(POST.Ο_jq,[D.ID,Q.ID],solution;Header=PrintSpacer,DataName="Demand Impact Prices:",VarName="Ο_jq")
end
if CF.UseArcs
FilePrint(POST.Ο_a,[A.ID,P.ID],solution;Header=PrintSpacer,DataName="Transport Prices:",VarName="Ο_a")
if CF.UseImpacts
FilePrint(POST.Ο_aq,[A.ID,Q.ID],solution;Header=PrintSpacer,DataName="Transport Impact Prices:",VarName="Ο_aq")
end
end
if CF.UseTechs
FilePrint(POST.Ο_m,[M.ID,N.ID,T.ID],solution;Header=PrintSpacer,DataName="Technology Prices:",VarName="Ο_m")
if CF.UseImpacts
FilePrint(POST.Ο_mq,[M.ID,N.ID,T.ID,Q.ID],solution;Header=PrintSpacer,DataName="Technology Impact Prices:",VarName="Ο_mq")
end
end
FilePrint(POST.Οi,[G.ID],solution;Header=PrintSpacer,DataName="Supply Profits:",VarName="Οi")
FilePrint(POST.Οj,[D.ID],solution;Header=PrintSpacer,DataName="Demand Profits:",VarName="Οj")
if CF.UseImpacts
FilePrint(POST.Οv,[V.ID],solution;Header=PrintSpacer,DataName="Environmental Consumer Profits:",VarName="Οv")
end
if CF.UseTechs
FilePrint(POST.Οl,[L.ID],solution;Header=PrintSpacer,DataName="Technology Profits:",VarName="Οl")
end
if CF.UseArcs
FilePrint(POST.Οa,[A.ID,P.ID],solution;Header=PrintSpacer,DataName="Transport Profits:",VarName="Οa")
end
close(solution)
### Write individual variables to csv files for easy access
# delimiter
Ξ = ","
# supply
filename = open(joinpath(SolutionDir, "supply_allocations.csv"), "w")
print(filename, "Supply ID"*Ξ*"Supply node"*Ξ*"Supply time"*Ξ*"Supply Product"*Ξ*"Supply allocation"*Ξ*"Supply Profit")
for i in G.ID
print(filename, "\n"*i*Ξ*G.node[i]*Ξ*G.time[i]*Ξ*G.prod[i]*Ξ*string(SOL.g[i])*Ξ*string(POST.Οi[i]))
end
close(filename)
# demand
filename = open(joinpath(SolutionDir, "demand_allocations.csv"), "w")
print(filename, "Demand ID"*Ξ*"Demand node"*Ξ*"Demand time"*Ξ*"Demand product"*Ξ*"Demand allocation"*Ξ*"Consumer Profit")
for j in D.ID
print(filename, "\n"*j*Ξ*D.node[j]*Ξ*D.time[j]*Ξ*D.prod[j]*Ξ*string(SOL.d[j])*Ξ*string(POST.Οj[j]))
end
close(filename)
# environmental consumption
if CF.UseImpacts
filename = open(joinpath(SolutionDir, "env_con_allocations.csv"), "w")
print(filename, "Environmental Consumer ID"*Ξ*"node"*Ξ*"time"*Ξ*"impact"*Ξ*"Environmental Consumer allocation"*Ξ*"Environmental Consumer Profit")
for v in V.ID
print(filename, "\n"*v*Ξ*V.node[v]*Ξ*V.time[v]*Ξ*V.impact[v]*Ξ*string(SOL.e[v])*Ξ*string(POST.Οv[v]))
end
close(filename)
end
# transportation
if CF.UseArcs
filename = open(joinpath(SolutionDir, "transport_allocations.csv"), "w")
header = "Arc ID"*Ξ*"Send node"*Ξ*"Receiving node"*Ξ*"Send time"*Ξ*"Receiving time"
for p in P.ID
header *= Ξ*"Product: "*p
end
for p in P.ID
header *= Ξ*"Product "*p*" transport profit"
end
print(filename, header)
for a in A.ID
print(filename, "\n"*a*Ξ*A.n_send[a]*Ξ*A.n_recv[a]*Ξ*A.t_send[a]*Ξ*A.t_recv[a])
for p in P.ID
print(filename, Ξ*string(SOL.f[a,p]))
end
for p in P.ID
print(filename, Ξ*string(POST.Οa[a,p]))
end
end
close(filename)
end
# technology
if CF.UseTechs
filename = open(joinpath(SolutionDir, "technology_allocations.csv"), "w")
header = "Technology ID"*Ξ*"Node"*Ξ*"Time"*Ξ*"Reference Product"
for p in P.ID
header *= Ξ*"Consumed: "*p
end
for p in P.ID
header *= Ξ*"Generated: "*p
end
for q in Q.ID
header *= Ξ*"Impact: "*q
end
header *= Ξ*"Profit"
print(filename, header)
for l in L.ID
print(filename, "\n"*l*Ξ*L.node[l]*Ξ*L.time[l]*Ξ*M.InputRef[L.tech[l]])
for p in P.ID
if p in M.Inputs[L.tech[l]]
print(filename, Ξ*string(POST.ΞΎcon[l,p]))
else
print(filename, Ξ)
end
end
for p in P.ID
if p in M.Outputs[L.tech[l]]
print(filename, Ξ*string(POST.ΞΎgen[l,p]))
else
print(filename, Ξ)
end
end
if CF.UseImpacts
for q in Q.ID
if q in M.Impacts[L.tech[l]]
print(filename, Ξ*string(POST.ΞΎenv[l,q]))
else
print(filename, Ξ)
end
end
end
print(filename, Ξ*string(POST.Οl[l]))
end
close(filename)
end
# nodal supply
filename = open(joinpath(SolutionDir, "nodal_supply_allocations.csv"), "w")
print(filename, "Node"*Ξ*"Time point"*Ξ*"Product"*Ξ*"Total Supply")
for n in N.ID
for t in T.ID
for p in P.ID
if POST.gNTP[n,t,p] != 0
print(filename, "\n"*n*Ξ*t*Ξ*p*Ξ*string(POST.gNTP[n,t,p]))
end
end
end
end
close(filename)
# nodal demand
filename = open(joinpath(SolutionDir, "nodal_demand_allocations.csv"), "w")
print(filename, "Node"*Ξ*"Time point"*Ξ*"Product"*Ξ*"Total Demand")
for n in N.ID
for t in T.ID
for p in P.ID
if POST.dNTP[n,t,p] != 0
print(filename, "\n"*n*Ξ*t*Ξ*p*Ξ*string(POST.dNTP[n,t,p]))
end
end
end
end
close(filename)
# nodal environmental consumption
if CF.UseImpacts
filename = open(joinpath(SolutionDir, "nodal_env_con_allocations.csv"), "w")
print(filename, "Node"*Ξ*"Time point"*Ξ*"Product"*Ξ*"Total Environmental Consumption")
for n in N.ID
for t in T.ID
for q in Q.ID
if POST.eNTQ[n,t,q] != 0
print(filename, "\n"*n*Ξ*t*Ξ*q*Ξ*string(POST.eNTQ[n,t,q]))
end
end
end
end
close(filename)
end
# Nodal prices
filename = open(joinpath(SolutionDir, "nodal_prices.csv"), "w")
print(filename, "Node"*Ξ*"Time point"*Ξ*"Product/Impact"*Ξ*"Nodal price")
for n in N.ID
for t in T.ID
for p in P.ID
print(filename, "\n"*n*Ξ*t*Ξ*p*Ξ*string(SOL.Οp[n,t,p]))
end
if CF.UseImpacts
for q in Q.ID
print(filename, "\n"*n*Ξ*t*Ξ*q*Ξ*string(SOL.Οq[n,t,q]))
end
end
end
end
close(filename)
# supply impact prices
if CF.UseImpacts
filename = open(joinpath(SolutionDir, "supply_impact_prices.csv"), "w")
print(filename, "Supplier"*Ξ*"Node"*Ξ*"Time point"*Ξ*"Impact"*Ξ*"Supply Impact Price")
for i in G.ID
for q in G.Impacts[i]
print(filename, "\n"*i*Ξ*G.node[i]*Ξ*G.time[i]*Ξ*q*Ξ*string(POST.Ο_iq[i,q]))
end
end
close(filename)
end
# demand impact prices
if CF.UseImpacts
filename = open(joinpath(SolutionDir, "demand_impact_prices.csv"), "w")
print(filename, "Consumer"*Ξ*"Node"*Ξ*"Time point"*Ξ*"Impact"*Ξ*"Demand Impact Price")
for j in D.ID
for q in D.Impacts[j]
print(filename, "\n"*j*Ξ*D.node[j]*Ξ*D.time[j]*Ξ*q*Ξ*string(POST.Ο_jq[j,q]))
end
end
close(filename)
end
# transportation prices
if CF.UseArcs
filename = open(joinpath(SolutionDir, "transport_prices.csv"), "w")
header = "Arc ID"*Ξ*"Send node"*Ξ*"Receiving node"*Ξ*"Send time"*Ξ*"Receiving time"
for p in P.ID
header *= Ξ*"Product "*p*" transport price"
end
if CF.UseImpacts
for q in Q.ID
header *= Ξ*"Impact "*q*" transport price"
end
end
print(filename, header)
for a in A.ID
print(filename, "\n"*a*Ξ*A.n_send[a]*Ξ*A.n_recv[a]*Ξ*A.t_send[a]*Ξ*A.t_recv[a])
for p in P.ID
print(filename, Ξ*string(POST.Ο_a[a,p]))
end
if CF.UseImpacts
for q in Q.ID
print(filename, Ξ*string(POST.Ο_aq[a,q]))
end
end
end
close(filename)
end
# technology prices
if CF.UseTechs
filename = open(joinpath(SolutionDir, "technology_prices.csv"), "w")
header = "Technology ID"*Ξ*"Node"*Ξ*"Time"*Ξ*"Reference Product"*Ξ*"Technology Price"
if CF.UseImpacts
for q in Q.ID
header *= Ξ*"Impact: "*q*" price"
end
end
print(filename, header)
for l in L.ID
print(filename, "\n"*l*Ξ*L.node[l]*Ξ*L.time[l]*Ξ*M.InputRef[L.tech[l]]*Ξ*string(POST.Ο_m[L.tech[l],L.node[l],L.time[l]]))
if CF.UseImpacts
for q in Q.ID
if q in M.Impacts[L.tech[l]]
print(filename, Ξ*string(POST.Ο_mq[L.tech[l],L.node[l],L.time[l],q]))
else
print(filename, Ξ)
end
end
end
end
close(filename)
end
### Return
return
end | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | code | 69 | # Run me to start terminal
using Revise
using CoordinatedSupplyChains | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | code | 6353 | ################################################################################
### DEFINE DATA STRUCTURES "STRUCT" FOR USE IN MAIN MODEL
################################################################################
### PRIMARY INDEX SETS
struct TimeDataStruct
ID::Vector{String}
dt::Dict{String,Float64}
end
struct NodeDataStruct
ID::Vector{String}
alias::Dict{String,String}
lon::Dict{String,Float64}
lat::Dict{String,Float64}
end
struct ArcDataStruct
ID::Vector{String}
n_send::Dict{String,String}
n_recv::Dict{String,String}
t_send::Dict{String,String}
t_recv::Dict{String,String}
bid::Dict{Tuple{String,String},Float64}
cap::Dict{Tuple{String,String},Float64}
len::Dict{String,Float64}
dur::Dict{String,Float64}
ID_S::Vector{String}
ID_T::Vector{String}
ID_ST::Vector{String}
end
struct ProductDataStruct
ID::Vector{String}
alias::Dict{String,String}
transport_cost::Dict{String,Float64}
storage_cost::Dict{String,Float64}
end
struct ImpactDataStruct
ID::Vector{String}
alias::Dict{String,String}
transport_coeff::Dict{String,Float64}
storage_coeff::Dict{String,Float64}
end
struct DemandDataStruct
ID::Vector{String}
node::Dict{String,String}
time::Dict{String,String}
prod::Dict{String,String}
bid::Dict{String,Float64}
cap::Dict{String,Float64}
Impacts::Dict{String,Vector{String}}
ImpactYields::Dict{Tuple{String,String},Float64}
end
struct SupplyDataStruct
ID::Vector{String}
node::Dict{String,String}
time::Dict{String,String}
prod::Dict{String,String}
bid::Dict{String,Float64}
cap::Dict{String,Float64}
Impacts::Dict{String,Vector{String}}
ImpactYields::Dict{Tuple{String,String},Float64}
end
struct EnvDataStruct
ID::Vector{String}
node::Dict{String,String}
time::Dict{String,String}
impact::Dict{String,String}
bid::Dict{String,Float64}
cap::Dict{String,Float64}
end
struct TechDataStruct
ID::Vector{String}
Outputs::Dict{String,Vector{String}}
Inputs::Dict{String,Vector{String}}
Impacts::Dict{String,Vector{String}}
OutputYields::Dict{Tuple{String,String},Float64}
InputYields::Dict{Tuple{String,String},Float64}
ImpactYields::Dict{Tuple{String,String},Float64}
InputRef::Dict{String,String}
bid::Dict{String,Float64}
cap::Dict{String,Float64}
alias::Dict{String,String}
end
struct TechmapDataStruct
ID::Vector{String}
node::Dict{String,String}
time::Dict{String,String}
tech::Dict{String,String}
end
struct SetStruct
T1::Vector{String}
Tt::Vector{String}
TT::Vector{String}
Tprior::Dict{String,String}
Tpost::Dict{String,String}
Ain::Union{Dict{Tuple{String,String},Vector{String}}, Nothing}
Aout::Union{Dict{Tuple{String,String},Vector{String}}, Nothing}
Dntp::Dict{Tuple{String,String,String},Vector{String}}
Gntp::Dict{Tuple{String,String,String},Vector{String}}
Dntq::Union{Dict{Tuple{String,String,String},Vector{String}}, Nothing}
Gntq::Union{Dict{Tuple{String,String,String},Vector{String}}, Nothing}
Vntq::Union{Dict{Tuple{String,String,String},Vector{String}}, Nothing}
DQ::Union{Vector{String}, Nothing}
GQ::Union{Vector{String}, Nothing}
NTPgenl::Union{Dict{Tuple{String,String,String},Vector{String}}, Nothing}
NTPconl::Union{Dict{Tuple{String,String,String},Vector{String}}, Nothing}
NTQgenl::Union{Dict{Tuple{String,String,String},Vector{String}}, Nothing}
end
struct ParStruct
gMAX::Dict{Tuple{String,String,String},Float64} # maximum nodal supply
dMAX::Dict{Tuple{String,String,String},Float64} # maximum nodal demand
eMAX::Union{Dict{Tuple{String,String,String},Float64}, Nothing} # maximum environmental demand
Ξ³iq::Union{Dict{Tuple{String,String},Float64}, Nothing} # supply impact yield
Ξ³jq::Union{Dict{Tuple{String,String},Float64}, Nothing} # demand impact yield
Ξ³aq::Union{Dict{Tuple{String,String},Float64}, Nothing} # transport impact yield
Ξ³mp::Union{Dict{Tuple{String,String},Float64}, Nothing} # technology product yield
Ξ³mq::Union{Dict{Tuple{String,String},Float64}, Nothing} # technology impact yield
ΞΎgenMAX::Union{Dict{Tuple{String,String},Float64}, Nothing} # technology generation capacity
ΞΎconMAX::Union{Dict{Tuple{String,String},Float64}, Nothing} # technology consumption capacity
ΞΎenvMAX::Union{Dict{Tuple{String,String},Float64}, Nothing} # technology impact capacity
end
struct ModelStatStruct
Variables::Int
TotalInequalityConstraints::Int
TotalEqualityConstraints::Int
VariableBounds::Int
ModelInequalityConstrtaints::Int
ModelEqualityConstraints::Int
end
struct SolutionStruct
TermStat::String
PrimalStat::String
DualStat::String
z::Float64
g::JuMP.Containers.DenseAxisArray
d::JuMP.Containers.DenseAxisArray
e::Union{JuMP.Containers.DenseAxisArray,Nothing}
f::Union{JuMP.Containers.DenseAxisArray,Nothing}
ΞΎ::Union{JuMP.Containers.DenseAxisArray,Nothing}
Οp::JuMP.Containers.DenseAxisArray
Οq::Union{JuMP.Containers.DenseAxisArray,Nothing}
end
struct PostSolveValues
gNTP::Dict{Tuple{String,String,String},Float64}
dNTP::Dict{Tuple{String,String,String},Float64}
eNTQ::Union{Dict{Tuple{String,String,String},Float64},Nothing}
ΞΎgen::Union{Dict{Tuple{String,String},Float64},Nothing}
ΞΎcon::Union{Dict{Tuple{String,String},Float64},Nothing}
ΞΎenv::Union{Dict{Tuple{String,String},Float64},Nothing}
Ο_iq::Union{Dict{Tuple{String,String},Float64},Nothing}
Ο_jq::Union{Dict{Tuple{String,String},Float64},Nothing}
Ο_a::Union{Dict{Tuple{String,String},Float64},Nothing}
Ο_aq::Union{Dict{Tuple{String,String},Float64},Nothing}
Ο_m::Union{Dict{Tuple{String,String,String},Float64},Nothing}
Ο_mq::Union{Dict{Tuple{String,String,String,String},Float64},Nothing}
Οi::Dict{String,Float64}
Οj::Dict{String,Float64}
Οv::Union{Dict{String,Float64},Nothing}
Οl::Union{Dict{String,Float64},Nothing}
Οa::Union{Dict{Tuple{String,String},Float64},Nothing}
end
# Control flow based on input data
struct DataCF
UseTime::Bool # are time points provided?
UseArcs::Bool # are arcs provided?
UseTechs::Bool # are technologies provided?
UseImpacts::Bool # are impact data provided?
end | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | code | 15236 | ################################################################################
### SUPPORTING FUNCTIONS - NOT FOR CALLING BY USER
# mean Earth radius used for Great Circle distance calculation
const R = 6335.439
function GreatCircle(ref_lon::Float64, ref_lat::Float64, dest_lon::Float64, dest_lat::Float64)
"""
Calculates the great circle distance between a reference location (ref)
and a destination (dest) and returns the great circle distances in km
"""
# haversine formula for Great Circle distance calculation
dist = 2.0*R*asin(sqrt(((sind((dest_lat - ref_lat)/2))^2) +
cosd(dest_lat)*cosd(ref_lat)*((sind((dest_lon - ref_lon)/2))^2)))
return dist
end
function GreatCircle(ref_lon::Vector{Float64}, ref_lat::Vector{Float64}, dest_lon::Vector{Float64}, dest_lat::Vector{Float64})
"""
Calculates the great circle distance between a reference location (ref)
and a destination (dest) and returns the great circle distances in km
"""
# haversine formula for Great Circle distance calculation, vectorized
dist = 2.0.*R.*asin.(sqrt.(((sind.((dest_lat .- ref_lat)./2)).^2) +
cosd.(dest_lat).*cosd.(ref_lat).*((sind.((dest_lon .- ref_lon)./2)).^2)))
return dist
end
function TextListFromCSV(IDs::Vector{String},DataCol::Vector{String})
"""
This function recovers comma-separated lists of text labels stored as
single entries in a csv file (separated by a non-comma separator) and
returns them as a dictionary indexed by the given IDs
i.e., |P01,P02,P03| -> ["P01","P02","P03"]
Inputs:
IDs - a list of labels to be used as dictionary keys
DataCol - The column of data with rows corresponding to the labels in
IDS, with each containing one or more text labels separated by commas
Outputs:
Out - a dictionary mapping the keys in IDs to the values in DataCol
Note: This function replaces code of the form:
for s = 1:length(asset_id) # AssetData[s,4] is a comma-separated list
asset_inputs[asset_id[s]] = [String(i) for i in split(AssetData[s,4],",")]
end
"""
Out = Dict{String,Vector{String}}()
for i = 1:length(IDs)
# Broadcast string() function to individual vector elements
# Out[IDs[i]] is assigned a Vector{String} object
Out[IDs[i]] = string.(split(DataCol[i],"|"))
end
return Out
end
#=
function NumericListFromCSV(IDs,ID2s::Dict,DataCol)
"""
For data that may be stored as a list of numeric values within CSV data
i.e., |0.1,0.2,0.3,0.4| in a "|"-separated data file. This function parses
these data as Float64 values and assigns them to a dictionary using given
keys; the dictionary is returned.
Inputs:
IDs - a list of IDs corresponding to the rows of the data in DataCol; also used as dict keys
ID2s - a list of secondary IDs for use as keys in the dict; a list of lists
DataCol - a column of data from a .csv file
Note: This function replaces code like this:
for s = 1:length(asset_id) # AssetData[s,7] is a comma-separated list
check = typeof(AssetData[s, 7])
if check != Float64 && check != Int64 # then it's a string of numbers
temp = split(asset_data[s, 7],",")
values = [parse(Float64,temp[i]) for i = 1:length(temp)] # Float64 array
else # in the case it was a Float64 or an Int64
values = asset_data[s, 7]
end
key1 = asset_id[s]
key2s = asset_inputs[key1]
for i = 1:length(key2s)
asset_input_stoich[key1,key2s[i]] = values[i]
end
end
"""
Out = Dict()
L = length(IDs)
for l = 1:L
if DataCol[l] != "" # if blank, ignore entry
check = typeof(DataCol[l])
if check != Float64 && check != Int64 # then it's a string of numbers
temp = split(DataCol[l],"|") # separate by pipes into temporary list
values = [parse(Float64,temp[i]) for i = 1:length(temp)] # parse as Float64 array
else # in the case it was already a Float64 or an Int64
values = DataCol[l]
end
key1 = IDs[l]
key2s = ID2s[key1]
for i = 1:length(key2s)
Out[key1,key2s[i]] = values[i]
end
end
end
return Out
end
function NumericListFromCSV(IDs,ID2s::Array,DataCol)
"""
If ID2s is an Array (i.e., full loop over IDs and ID2s)
"""
Out = Dict{String,Float64}()
L = length(IDs)
for l = 1:L
check = typeof(DataCol[l])
if check != Float64 && check != Int64 # then it's a string of numbers
temp = split(DataCol[l],"|") # separate by commas into temporary list
values = [parse(Float64,temp[i]) for i = 1:length(temp)] # parse as Float64 array
else # in the case it was already a Float64 or an Int64
values = DataCol[l]
end
# Assign keys and values to output dictionary
for i = 1:length(ID2s)
Out[IDs[l],ID2s[i]] = values[i]
end
end
return Out
end
=#
function NumericListFromCSV2(ID1s::Vector{String},ID2s::Vector{String},DataCol::Vector{String})
"""
Multiple dispatch option for ID2s::Vector{String}
"""
# Set up output dictionary;
# it will always have two keys (type: Tuple{String,String})
# mapped to one Float64 value
Out = Dict{Tuple{String,String},Float64}()
cardID1 = length(ID1s)
cardID2 = length(ID2s)
# There will be one DataCol entry for each ID in IDs
for id1 = 1:cardID1
# break up the DataCol entry string
pieces = split(DataCol[id1], "|")
# parse the pieces as Float64
pieces_float = map(pieces) do piece
parse(Float64, piece)
end
for id2 = 1:cardID2
Out[ID1s[id1],ID2s[id2]] = pieces_float[id2]
end
end
return Out
end
function NumericListFromCSV2(ID1s::Vector{String},ID2s::Dict{String,Vector{String}},DataCol::Vector{String})
"""
Multiple dispatch option for ID2s::Dict{String,Vector{String}}
"""
# Set up output dictionary;
# it will always have two keys (type: Tuple{String,String})
# mapped to one Float64 value
Out = Dict{Tuple{String,String},Float64}()
cardID1 = length(ID1s)
cardID2 = [length(ID2s[ID1s[i]]) for i = 1:cardID1]
# There will be one DataCol entry for each ID in IDs
for id1 = 1:cardID1
if ID2s[ID1s[id1]] != [] # skip cases with no entries; only an issue with ID2s::Dict{String,Vector{String}}
# break up the DataCol entry string
pieces = split(DataCol[id1], "|")
# parse the pieces as Float64
pieces_float = map(pieces) do piece
parse(Float64, piece)
end
for id2 = 1:cardID2[id1]
Out[ID1s[id1],ID2s[ID1s[id1]][id2]] = pieces_float[id2]
end
end
end
return Out
end
function KeyArrayInit(OrderedKeyList)
"""
Creates an array containing all combiinations of the keys in in OrderedKeyList;
identical to DictInit() and DictListInit() functionality, but doesn't
create a dictionary; just the array of keys
Inputs:
OrderedKeyList - a list of lists (i.e., list of lists of keys)
Outputs:
Out - an array of keys
"""
if length(OrderedKeyList) == 1
return OrderedKeyList[1]
else
return collect(Iterators.product(OrderedKeyList...))
end
end
function DictListInit(OrderedKeyList,InitFunction)
"""
Initializes a dictionary with the keys in OrderedKeyList and assigns each
key an empty array defined by InitFunction
Inputs:
OrderedKeyList - a list of lists (i.e., list of lists of keys)
InitFunction - a function producing an empty array
Outputs:
Out - a dictionary
"""
if length(OrderedKeyList) == 1
return Dict(key => InitFunction() for key in OrderedKeyList[1])
else
return Dict(key => InitFunction() for key in collect(Iterators.product(OrderedKeyList...)))
end
end
function InitStringArray()
"""
Returns an empty String array; function for use with DictListInit
"""
return Array{String}(undef,0)
end
function DictInit(OrderedKeyList,InitValue)
"""
Initializes a dictionary with the keys in OrderedKeyList and assigns each
key the value in InitValue
Inputs:
OrderedKeyList - a list of lists (i.e., list of lists of keys)
InitValue - probably either 0 or false
Outputs:
Out - a dictionary
Notes:
1) replaces the initialization loop for dictionaries that require full
population; i.e., in the case of set intersections
2) DO NOT USE WHERE InitValue IS ::Function
"""
if length(OrderedKeyList) == 1
return Dict(key => InitValue for key in OrderedKeyList[1])
else
return Dict(key => InitValue for key in collect(Iterators.product(OrderedKeyList...)))
end
end
function PrettyPrint(Data, OrderedIndexList, TruthTable=nothing; Header="*"^50, DataName="", VarName="")
"""
Prints Data values indexed by OrderedIndexList based on whether or not
the corresponding index value of TruthTable is true; Data and TruthTable
share the same index pattern.
Inputs:
Data - an indexed data structure; a dictionary or JuMP variable
OrderedIndexList - a list of the indices corresponding to Data and TruthTable
TruthTable - a dictionary indexed on OrderedindexList which outputs Boolean values (true/false)
Header - A string to print above any data
DataName - A string to be printed as a header for the data
VarName - A string to be printed before each line
"""
# check truthtable; if not, default to true
if TruthTable === nothing
TruthTable = DictInit(OrderedIndexList, true)
end
# Start printing headers
println(Header)
println(DataName)
# Check index index list length for proper index handling
if length(OrderedIndexList) == 1
SplatIndex = OrderedIndexList[1]
for index in SplatIndex
if TruthTable[index]
println(VarName*"(\""*string(index)*"\"): "*string(Data[index]))
end
end
else
SplatIndex = collect(Iterators.product(OrderedIndexList...))
for index in SplatIndex
if TruthTable[index...]
println(VarName*string(index)*": "*string(Data[index...]))
end
end
end
end
function Nonzeros(Data, OrderedIndexList; Threshold=1E-9)
"""
Given a dictionary of data indexed by the labels in OrderedIndexList
returns a Boolean dictionary pointing to nonzero indices in Data, where
nonzero is subject to a threshold value Threshold, defaulting to 1E-9.
"""
OutDict = Dict()
if length(OrderedIndexList) == 1
SplatIndex = OrderedIndexList[1]
for index in SplatIndex
if abs(Data[index]) > Threshold
OutDict[index] = true
else
OutDict[index] = false
end
end
else
SplatIndex = collect(Iterators.product(OrderedIndexList...))
for index in SplatIndex
if abs(Data[[i for i in index]...]) > Threshold
OutDict[[i for i in index]...] = true
else
OutDict[[i for i in index]...] = false
end
end
end
return(OutDict)
end
function FilePrint(Variable,OrderedIndexList,filename;Header="*"^50,DataName="",VarName="")
"""
Generates a list of strings and prints them to file with nice formatting;
reduces required script code clutter.
> Variable: the result of a JuMP getvalue() call; a Dict().
> OrderedIndexList: a list of indices in the same order as the indices of
the data in Variale; each element is a list of index elements. As an
example: [A,B,C] where A = [a1,a2,...,aN], B = ....
> filename: the file name for printing
> Header: a string to be used as a header above the printed data
> DataName: a header to appear above the printed data
> VarName: the desired output name of the variable on screen; a string
"""
# Header and DataName:
print(filename,"\n"*Header*"\n"*DataName)
# Collect indices via splatting to create all permutations; print each permuted index and value to file
if length(OrderedIndexList) == 1
SplatIndex = OrderedIndexList[1]
for index in SplatIndex
print(filename,"\n"*VarName*"(\""*string(index)*"\") = "*string(Variable[index]))
end
else
SplatIndex = collect(Iterators.product(OrderedIndexList...))
for index in SplatIndex
print(filename,"\n"*VarName*string(index)*" = "*string(Variable[[i for i in index]...]))
end
end
end
function PurgeQuotes(d::Dict)
"""
Removes empty quote strings ("") from dictionaries
Inputs:
- arr, an array
Outputs:
- cleaned array
"""
for k in keys(d)
if d[k] == [""]
d[k] = InitStringArray()
end
end
return d
end
#=
function RawDataPrint(data,filename;Header="*"^50,DataName="")
"""
Prints raw data to file for record-keeping purposes;
reduces required script code clutter.
> data: an array of data read from a .csv file.
> filename: the file name for printing
> Header: a string to be used as a header above the printed data
> DataName: a header to appear above the printed data
"""
# Header and DataName:
print(filename,"\n"*Header*"\n"*DataName)
# number of rows
n = size(data)[1]
# print rows of data array to file
for i = 1:n
print(filename,"\n")
print(filename,data[i,:])
end
end
=#
function EnvDataGen(N,T,Q,InitValues,filedir)
"""
Generates a default list of environmental stakeholders
based on node and time IDs, and sets a default bid (tax)
for each impact type using InitValueas. Just a convenient
way to create this file.
Inputs:
- N: node IDs
- T: time IDs
- Q: impact IDs
- InitValues: bid values; same length as Q
- filedir: file location for csvdata_env.csv
Outputs:
- text file: csvdata_env.csv
"""
### Setup
CardN = length(N)
CardT = length(T)
CardQ = length(Q)
CardV = CardN*CardT*CardQ
Vdigits = ndigits(CardV)
header = "# 1. Env. stakeholder reference| 2. Node| 3. Time| 4. Impact| 5. Bid (USD/impact unit)"
# Open file
filename = open(joinpath(filedir,"csvdata_env.csv"),"w")
# print header line with column information
print(filename, header)
# build list of default environmental stakeholders
OrdV = 0
for n = 1:CardN
for t = 1:CardT
for q = 1:CardQ
OrdV += 1
print(filename, "\nV"*lpad(OrdV,Vdigits,"0")*"|"*N[n]*"|"*T[t]*"|"*Q[q]*"|"*string(InitValues[q]))
end
end
end
close(filename)
return
end
# EnvDataGen(N.ID,T.ID,Q.ID,[0,0,0],"TestSets/BuildTest01") | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | code | 1468 | ################################################################################
### FUNCTIONS FOR RUNNING COMPLETE WORKFLOWS
function RunCSC(DataDir=pwd(); optimizer=DefaultOptimizer, UseArcLengths=true, Output=false)
"""
A function to simplify workflow with CoordinatedSupplyChains.jl
Inputs:
- DataDir: the directory to a file containing case study data
- optimizer: (optional keyword argument) an aptimizer to solve
a case study, e.g., Gurobi.Optimiizer; defaults to
HiGHS.Optimizer if not specified
Returns:
- nothing by default, all data if Output=true
"""
# Load data
T, N, P, Q, A, D, G, V, M, L, Subsets, Pars, CF = BuildModelData(DataDir, UseArcLengths);
# Build model
MOD = BuildModel(T, N, P, Q, A, D, G, V, M, L, Subsets, Pars, optimizer=optimizer)
# Get model statistics
ModelStats = GetModelStats(MOD)
# Solve the case model
SOL = SolveModel(MOD)
# Calculate case study values determined post-model solve
POST = PostSolveCalcs(T, N, P, Q, A, D, G, V, M, L, Subsets, Pars, SOL, CF)
# Save solution data
SaveSolution(DataDir, ModelStats, SOL, POST, T, N, P, Q, A, D, G, V, M, L, CF)
# Update User
println(PrintSpacer*"\n"*" "^19*"All Done!\n"*PrintSpacer)
# return
if Output
return T, N, P, Q, A, D, G, V, M, L, Subsets, Pars, MOD, ModelStats, SOL, POST
else
return
end
end | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | code | 991 | using CoordinatedSupplyChains
using Test
@testset "CoordinatedSupplyChains.jl" begin
# Test directory and file directory information
TestFolder = "ExtendedTestSets"
TestList = ["NoArcs",
"NoTechnologies",
"NoTimeNoArcsNoImpsNoTechs",
"NutrientModelDemandLoss",
"NutrientModelDemandLossV2",
"TutorialModel",
"TutorialModelIntValues"]
SolutionDirectory = "_SolutionData"
SolutionFileName = "_SolutionData.txt"
# Keyword options
UseArcsForTest = [true,true,true,true,true,false,false]
# Run through the six test cases
for i = 1:length(TestList)
RunCSC(joinpath(@__DIR__,TestFolder,TestList[i]),UseArcLengths=UseArcsForTest[i])
@test isfile(joinpath(@__DIR__,TestFolder,TestList[i],SolutionDirectory,SolutionFileName)) == true
rm(joinpath(@__DIR__,TestFolder,TestList[i],SolutionDirectory,SolutionFileName), recursive=true)
end
end
| CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | docs | 4953 | 
---
A supply chain modeling framework based on `JuMP` implementing the coordination model described described by [Tominac and Zavala](https://doi.org/10.1016/j.compchemeng.2020.107157). `CoordinatedSuppylChains.jl` automates this implementation; users can point it at a set of data files, and the package will build the model, solve it, save solution variables to .csv files, and create basic network plots of the system. For more control, users can call functions one-by-one, giving access to all intermediate data structures, or simply point a single convenient function at a directory with the required data files, and `CoordinatedSupplyChains` will do the rest. The present release supports steady-state and dynamic supply chain coordination problems, with the option to include environmental impact metrics.
| **Documentation** | **Build Status** | **Citation** |
|:-------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------:|:--------------------------------------:|
|[](https://tominapa.github.io/CoordinatedSupplyChains.jl/dev)|[](https://github.com/Tominapa/CoordinatedSupplyChains.jl/actions)[](https://codecov.io/gh/Tominapa/CoordinatedSupplyChains.jl)|[](https://doi.org/10.1016/j.compchemeng.2020.107157)|
Coordination is a powerful market management system used in electrical grid management to determine the optimal set of transactions between buyers and sellers of electricity as well as the resulting prices. Electricity markets involve multiple competing stakeholders (multiple buyers and multiple sellers) and the coordination process ensures that market outcomes are in a state of equilibrium. Among other notable properties, the underlying coordination optimization model is linear, and provides information about electricity pricing through dual interpretations. `CoordinatedSupplyChains.jl` generalizes coordination to multi-product supply chains, including product transformation; i.e., products can change form within this framework. `CoordinatedSupplyChains.jl` uses an abstraction with four stakeholder classes: buyers, sellers, technology providers, and transportation providers. Every supply chain stakeholder falls into one of these classes, and the coordination procedure guarantees that each stakeholder participating in the market has a positive profit, so the package shows its utility in the analysis of multi-stakeholder supply chains, where there multiple independent entities (companies or individuals) making up a complex, interconnected supply chain.
## License
`CoordinatedSupplyChains.jl` is licensed under the [MIT "Expat" license](./LICENSE).
## Documentation
[](https://tominapa.github.io/CoordinatedSupplyChains.jl/dev)
Documentation includes an overview of the software, instructions for setting up the required data files, and guides that will help you get started.
## Citing
If `CoordinatedSupplyChains.jl` is useful in your research, we appreciate your citation to our work. This helps us promote new work and development on our code releases. We hope you find our code helpful, and thank you for any feedback you might have for us.
[](https://doi.org/10.1016/j.compchemeng.2020.107157)
```latex
@article{TominacZavala2020,
title = {Economic properties of multi-product supply chains},
journal ={Comput Chem Eng},
pages = {107157},
year = {2020},
issn = {0098-1354},
doi ={https://doi.org/10.1016/j.compchemeng.2020.10715},
url = {http://www.sciencedirect.com/science/article/pii/S0098135420305810},
author = {Philip A. Tominac and Victor M. Zavala}
}
```
[](https://doi.org/10.1016/j.compchemeng.2022.107666)
```latex
@article{TomacZhangZavala2022,
title = {Spatio-temporal economic properties of multi-product supply chains},
journal = {Comput Chem Eng},
volume = {159},
pages = {107666},
year = {2022},
issn = {0098-1354},
doi = {https://doi.org/10.1016/j.compchemeng.2022.107666},
url = {https://www.sciencedirect.com/science/article/pii/S0098135422000114},
author = {Philip A. Tominac and Weiqi Zhang and Victor M. Zavala},
}
```
## Acknowledgements
We acknowledge support from the U.S. Department of Agriculture (grant 2017-67003-26055) and partial funding from the National Science Foundation (under grant CBET-1604374). | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | docs | 3788 | 
---
## What is CoordinatedSupplyChains.jl?
`CoordinatedSupplyChains.jl` is a ready-made supply chain coordination model. It is built on the concept of supply chain economics, with prices driving transactions between supply chain stakeholders. In other words, with this approach, if a supply chain delivers a product to a customer, it is because it is profitable to do so. Under this economic interpretation, a supply chain is treated as a collection of stakeholders, including product suppliers, consumers, transportation, processing technologies, and environmental impact sinks. This conceptualization makes `CoordinatedSupplyChains.jl` particularly useful for analysis of sustainability-profit trade-offs.
`CoordinatedSupplyChains.jl` encodes a complex abstraction for large-scale supply chains into a simple, user-friendly interface that removes almost all the coding burden separating the user from the results. `CoordinatedSupplyChains.jl` is intended for users who want to solve supply chain problems under a coordination objective (i.e., supply chains as coordinated markets). It is intended to function equally well for users in practical settings (industrial practitioners) and for educators looking to teach complex OperationsResearch concepts.
`CoordinatedSupplyChains.jl` is an abstraction of a supply chain coordination model, the user defines a model by supplying information about supply chain structure, products, stakeholders, and environmental impacts. `CoordinatedSupplyChains.jl` uses this information to build the model, solve it (optionally with a user-specified solver) and returns solution information to the user.
## Installation
`CoordinatedSupplyChains.jl` is a registered Julia package. Installation is as simple as
```julia
(@v1.7) pkg> add CoordinatedSupplyChains
```
`CoordinatedSupplyChains.jl` has the following dependencies, which will need to be installed as well (if they are not already installed globally, or as part of your project installation with `CoordinatedSupplyChains.jl`)
- `JuMP`
- `HiGHS`
`JuMP` provides the modeling facility for the coordination problem, and `CoordinatedSupplyChains.jl` uses the `HiGHS` solver by default, which is open-source and does not require a license.
## Citing
If `CoordinatedSupplyChains.jl` is useful in your research, we appreciate your citation to our work. This helps us promote new work and development on our code releases. We hope you find our code helpful, and thank you for any feedback you might have for us.
[](https://doi.org/10.1016/j.compchemeng.2020.107157)
```latex
@article{TominacZavala2020,
title = {Economic properties of multi-product supply chains},
journal ={Comput Chem Eng},
pages = {107157},
year = {2020},
issn = {0098-1354},
doi ={https://doi.org/10.1016/j.compchemeng.2020.10715},
url = {http://www.sciencedirect.com/science/article/pii/S0098135420305810},
author = {Philip A. Tominac and Victor M. Zavala}
}
```
[](https://doi.org/10.1016/j.compchemeng.2022.107666)
```latex
@article{TomacZhangZavala2022,
title = {Spatio-temporal economic properties of multi-product supply chains},
journal = {Comput Chem Eng},
volume = {159},
pages = {107666},
year = {2022},
issn = {0098-1354},
doi = {https://doi.org/10.1016/j.compchemeng.2022.107666},
url = {https://www.sciencedirect.com/science/article/pii/S0098135422000114},
author = {Philip A. Tominac and Weiqi Zhang and Victor M. Zavala},
}
```
## Acknowledgements
We acknowledge support from the U.S. Department of Agriculture (grant 2017-67003-26055) and partial funding from the National Science Foundation (under grant CBET-1604374) in support of this work. | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.2.1 | 4407d513484560573a5984ad3f2cedb3cb5e8bb0 | docs | 29768 | ## Overview
`CoordinatedSupplyChains.jl` is a user-friendly tool designed to give you access to a powerful supply chain coordination model. `CoordinatedSupplyChains.jl` handles the data processing and model building so that you can quickly solve problems and generate results.
Putting together a supply chain problem is a matter of populating comma-separated-values files with the necessary definitions of the supply chain structure and the stakeholders participating in it. Once this is complete, you can point `CoordinatedSupplyChains.jl` to your data and run the code to generate results.
## Coordination Abstraction
`CoordinatedSupplyChains.jl` is based on the coordination model by [Tominac & Zavala](https://doi.org/10.1016/j.compchemeng.2020.107157) and its more recent iteration described by [Tominac, Zhang, & Zavala](https://www.sciencedirect.com/science/article/pii/S0098135422000114).
The `CoordinatedSupplyChains.jl` abstraction conceptualizes a supply chain as a market operating under a coordination system, like an auction where each stakeholder is bidding its preferred buying rate, its selling rate, or its service rate. This coordination system is managed by a coordinator, an independent operator who does not have a stake in the market, but whose goal is to maximize the total profit of the market. The stakeholders pass their bidding information to the coordinator, who resolves the market by setting product and service prices. As a result, transactions of products and services are allocated to stakeholders. The coordination system has a number of useful theoretical guarantees related to the coordinator's price setting practices and the implications for supply chain stakeholders.
1. No stakeholder loses money as a result of participation in the coordination system. A stakeholder either participates in the market with a positive allocation and nonnegative profit, or it does not participate in the market at all. Market participation is never coerced.
2. The coordinator's prices respect participating stakeholder bids. This is the mechanism by which nonnegative profits are guaranteed.
3. Coordination is efficient; there is no money lost to the coordinator or from the supply chain as a result of operating under the coordination system. In other words, money balances within a coordinated supply chain.
For a complete review of the auction system, please refer to the associated references.
## Supply Chain Representation
`CoordinatedSupplyChains.jl` uses a compact graph representation to describe a supply chain. The graph is defined by:
- A set of geographical (location) nodes
- A set of time points
- A set of spatiotemporal arcs that move products through space and time
Of interest within a coordinated supply chain are
- A set of products to be transacted
- A set of environmental impacts associated with the economic activities in the supply chain
Transacting products and providing services are the supply chain stakeholders. These are divided into five distinct categories, namely
- suppliers, who sell products
- consumers, who purchase products
- transportation providers, who move products between spatiotemporal nodes in the supply chain graph
- technology providers, who transform products from one form to another
- environmental impact consumers, who absorb the environmental impacts emitted by other stakeholders
With this graph abstraction and the five stakeholder categories, it is possible to build representations of complex supply chain systems including product processing and sustainability metrics.
## Data Format and Required Files
Any coordinated supply chain problem is defined by data detailing the sets of nodes, time points, arcs, products, impacts, and stakeholders. This data is divided into ten specially named .csv files. These are comma-separated by default, so any convenient .csv editor (text editor or spreadsheet software) will work. The file name conventions are as follows:
- csvdata_node.csv
- csvdata_time.csv (optional)
- csvdata_arcs.csv (optional)
- csvdata_product.csv
- csvdata_impact.csv (optional)
- csvdata_demand.csv
- csvdata_supply.csv
- csvdata_env.csv (optional)
- csvdata_tech.csv. (optional)
- csvdata_techmap.csv (optional)
Note: not every supply chain problem requires every feature built into `CoordinatedSupplyChains.jl` and as such, certain files are optional. For example, if solving a steady-state supply chain problem, the csvdata_time.csv file may be excluded, and the package will simply assign variables a time index `T0`. Similarly, you may wish to build supply chain problems with no arcs; the package will accommodate this as well. For models with no sustainability focus, the impact and environmental stakeholder data folders may be excluded. Similarly, if there are no technologies, then the technology and mapping files may be excluded. No keywords or arguments are required; the package is programmed to check for the requisite files and proceed accordingly. The minimal `CoordinatedSupplyChains.jl` model must include one or more nodes, products, suppliers, and consumers. These four files are thus non-optional inputs.
A working example will be used to illustrate how these files are structured. You can copy each of the ten files to replicate the example on your own.
### csvdata_node.csv
Node data are structured as follows
1. Node ID: a unique string ID for the node, preferably of the form N01, N02, ...; no spaces allowed!
2. Node Name: a string with detailed information about the node; spaces allowed
3. Node longitude: A number representing the longitude of the node; e.g. Madison is -89.4012
4. Node latitude: A number representing the latitude of the node; e.g. Madison is 43.0731
Our example uses the following node data
```
# 1. Node ID, 2. Node Name, 3. Node longitude, 4. Node latitude
N1,Madison-WI,-89.4,43.1
N2,Austin-TX,-97.7,30.3
N3,Los Angeles-CA,-118.2,34.0
```
You may need to be careful when looking for longitude/latitude data; conventions can differ. However, `CoordinatedSupplyChains.jl` uses the same convention as Google maps, with negative longitude.
### csvdata_time.csv
Time data are structure as follows
1. Time point ID: a unique string ID for the time point; no spaces allowed
2. Time point duration: A number representing the duration of the time point; used for calculating temporal transportation costs
```
# 1. Time point ID,2. Duration
T1,1.0
T2,1.0
T3,1.0
T4,1.0
T5,1.0
```
In this example, we use five time points of equal duration; this is not a requirement. Duration can vary, and any time-dependent parameters will reflect the duration.
### csvdata_product.csv
Data defining products is arranged with columns numbered as follows
1. Product ID: a unique string ID for the product; no spaces allowed!
2. Product name: a string with detailed information about the product; spaces allowed
3. Transportation cost (distance): the transport cost to move the product over distance; needs to be positive to prevent transportation cycles
4. Transportation cost (time): the cost to store, or move the product through time
The demo product file is
```
# 1. Product no.,2. Product name,3. Transportation cost (distance) (USD.tonne^-1.km^-1),4. Transportation cost (time) (USD.tonne^-1.h^-1)
Milk,Milk,0.30,0.20
IceCream,Ice cream - tonne,0.30,0.20
Beef,Beef - boneless sirloin steak - USDA choice - tonne,0.25,0.05
Almonds,Almonds - shelled unseasoned - tonne,0.20,0.15
```
Product names have been used as IDs, with a more detailed description provided in column 2. Transportation costs in columns 3 and 4 are included with units provided in the column headers, as good record keeping practice. It is on the user to keep track of units and ensure they are. consistent.
### csvdata_impact.csv
Data for environmental impact types are formatted as follows
1. Impact ID: a unique string ID for the impact; no spaces allowed!
2. Impact alias: a string with detailed information about the impact measure
3. Transportation coefficient (distance): Some environmental impacts are associated with transportation; the emission coefficient per unit distance is recorded here
4. Storage coefficient (time): Some environmental impacts are associated with storage; the emission coefficient per unit time is recorded here
For our demo, we will use the following
```
# 1. Impact ID, 2. Impact alias, 3. Transportation coefficient (impact unit per tonne.km), 4. Storage coefficient (impact unit per tonne.h)
Phosphorus,phosphorus equivalent eutrophication potential - tonne P-eq,0.0,0.0
CO2,Carbon dioxide emissions - tonne CO2-eq,0.01,0.001
WaterUse,Water use - tonne,0.0,0.0
```
### csvdata_arcs.csv
Arc data are structured as follows
1. Arc ID: a unique string ID for the arc, preferably of the form A01, A02, ...; no spaces allowed!
2. Arc first node: a Node ID included in node_data.csv
3. Arc second node: a Node ID included in node_data.csv
4. Arc capacity: a vector of numbers representing the product capacity of the arc; units (tonne)
5. Custom length (optional): A number representing the length of the arc; units: (km); used only if the CustomLengths parameter is set true; positive-valued
Our example has the following arcs
```
# 1. Arc ID, 2. Arc first node, 3. Arc second node, 4. Arc capacity, 5. Arc Length
A1,N1,N2,1E6|1E6|1E6|1E6,
A2,N2,N3,1E6|1E6|1E6|1E6,
A3,N3,N1,1E6|1E6|1E6|1E6,
```
This is the first data file that depends on others; csvdata_arcs.csv is built based on data in csvdata_node.csv and csvdata_product.csv. The CustomLengths field is unpopulated in this example because it will not be used. We could populate ourselves, if we knew specific route distances. Custom arcs lengths are useful if you already have access to distance data. In this example, we will let the software calculate great circle distances between nodes connected by arcs. The built-in great-circle distance function returns arc lengths in kilometers.
Note that arcs only need to be defined in one direction; e.g., from node N1 to N2 will allow products to flow from N1 to N2, and from N2 to N1.
Arc product capacities are provided as a vector, but note that product order will follow the order of products in csvdata_product.csv file.
### csvdata_supply.csv
Supplier data are structured as follows
1. Supply ID: a unique string ID for the supplier; no spaces allowed!
2. Node: a Node ID included in csvdata_node.csv, where the supplier is located
3. Time: a Time point ID included in csvdata_time.csv, when the supply is available
4. Product: a Product ID included in csvdata_product.csv that the supplier will offer for sale
5. Bid: a number representing the supplier bid for a product; a real number
6. Capacity: a number representing the maximum amount supplied; a positive number
7. Emissions: a vector of impact IDs from csvdata_impact representing the impacts associated with supplying the product
8. Emissions coefficients: a vector of real numbers representing the per unit emissions associated with supplying the product
The demo supply file
```
# 1.Supply reference no., 2.Node, 3.Time, 4.Product, 5.Bid, 6.Capacity, 7. Emissions, 8. Emissions coefficients unit per tonne product consumed
G01,N1,T1,Milk,1150.0,1.12,Phosphorus|CO2|WaterUse,0.0024|0.100|1020.0
G02,N1,T2,Milk,1150.0,1.12,Phosphorus|CO2|WaterUse,0.0024|0.100|1020.0
G03,N1,T3,Milk,1150.0,1.12,Phosphorus|CO2|WaterUse,0.0024|0.100|1020.0
G04,N3,T4,Milk,1150.0,1.12,Phosphorus|CO2|WaterUse,0.0024|0.100|1020.0
G05,N3,T5,Milk,1150.0,1.12,Phosphorus|CO2|WaterUse,0.0024|0.100|1020.0
G06,N2,T3,Beef,23479.23,1.0,Phosphorus|CO2|WaterUse,0.0108|33.1|15415.0
G07,N2,T4,Beef,23479.23,1.0,Phosphorus|CO2|WaterUse,0.0108|33.1|15415.0
G08,N2,T5,Beef,23479.23,1.0,Phosphorus|CO2|WaterUse,0.0108|33.1|15415.0
G09,N3,T1,Almonds,6018.01,1.0,Phosphorus|CO2|WaterUse,0.144|2.009|12984.0
G10,N3,T2,Almonds,6018.01,1.0,Phosphorus|CO2|WaterUse,0.144|2.009|12984.0
```
### demand_data.csv
Consumer data are structured as follows
1. Demand ID: a unique string ID for the consumer; no spaces allowed!
2. Node: a Node ID included in csvdata_node.csv, where the consumer is located
3. Time: a Time point ID included in csvdata_time.csv, when the consumer is available
4. Product: a Product ID included in csvdata_product.csv that the consumer will offer to buy
5. Bid: a number representing the consumer bid for a product; a real number
6. Capacity: a number representing the maximum amount consumed; a positive number
7. Emissions: a vector of impact IDs from csvdata_impact representing the impacts associated with consuming the product
8. Emissions coefficients: a vector of real numbers representing the per unit emissions associated with consuming the product
Our example has five consumers
```
# 1.Demand reference no., 2.Node, 3.Time, 4.Product, 5.Bid, 6.Capacity, 7. Emissions, 8. Emissions coefficients unit per tonne product consumed
D01,N1,T1,IceCream,30000.00,0.3,,
D02,N1,T2,IceCream,30000.00,0.3,,
D03,N1,T3,IceCream,30000.00,0.3,,
D04,N1,T4,IceCream,30000.00,0.3,,
D05,N1,T5,IceCream,30000.00,0.3,,
D06,N2,T1,IceCream,30000.00,0.3,,
D07,N2,T2,IceCream,30000.00,0.3,,
D08,N2,T3,IceCream,30000.00,0.3,,
D09,N2,T4,IceCream,30000.00,0.3,,
D10,N2,T5,IceCream,30000.00,0.3,,
D11,N3,T1,IceCream,35000.00,0.4,,
D12,N3,T2,IceCream,35000.00,0.4,,
D13,N3,T3,IceCream,35000.00,0.4,,
D14,N3,T4,IceCream,35000.00,0.4,,
D15,N3,T5,IceCream,35000.00,0.4,,
D16,N1,T1,Almonds,6800,0.2,,
D17,N1,T2,Almonds,6800,0.2,,
D18,N2,T1,Almonds,6020,0.6,,
D19,N2,T2,Almonds,6020,0.6,,
D20,N3,T1,Almonds,6800,0.2,,
D21,N3,T2,Almonds,6800,0.2,,
D22,N1,T3,Beef,25000,0.2,,
D23,N1,T4,Beef,25000,0.2,,
D24,N1,T5,Beef,25000,0.2,,
D25,N2,T3,Beef,24000,0.4,,
D26,N2,T4,Beef,24000,0.4,,
D27,N2,T5,Beef,24000,0.4,,
D28,N3,T3,Beef,25000,0.4,,
D29,N3,T4,Beef,25000,0.4,,
D30,N3,T5,Beef,25000,0.4,,
```
### csvdata_env.csv
This data file defines environmental impact consumption and policy, and consists of
1. Environmental. stakeholder ID: a unique ID for the environmental stakeholder
2. Node: a Node ID included in csvdata_node.csv, where the environmental stakeholder is located
3. Time: a Time point ID included in csvdata_time.csv, when the environmental stakeholder is available
4. Impact: an Impact ID included in csvdata_impact.csv that the environmental stakeholder will consumer
5. Bid: a number representing the environmental stakeholder bid for a product; a real number
6. Capacity: a number representing the maximum amount consumed; a positive number
Environmental stakeholder data for the demo
```
# 1. Env. stakeholder reference, 2. Node, 3. Time, 4. Impact, 5. Bid (USD/impact unit), 6. Capacity
V01,N1,T1,Phosphorus,0,Inf
V02,N1,T2,Phosphorus,0,Inf
V03,N1,T3,Phosphorus,0,Inf
V04,N1,T4,Phosphorus,0,Inf
V05,N1,T5,Phosphorus,0,Inf
V06,N2,T1,Phosphorus,0,Inf
V07,N2,T2,Phosphorus,0,Inf
V08,N2,T3,Phosphorus,0,Inf
V09,N2,T4,Phosphorus,0,Inf
V10,N2,T5,Phosphorus,0,Inf
V11,N3,T1,Phosphorus,0,Inf
V12,N3,T2,Phosphorus,0,Inf
V13,N3,T3,Phosphorus,0,Inf
V14,N3,T4,Phosphorus,0,Inf
V15,N3,T5,Phosphorus,0,Inf
V16,N1,T1,CO2,0,Inf
V17,N1,T2,CO2,0,Inf
V18,N1,T3,CO2,0,Inf
V19,N1,T4,CO2,0,Inf
V20,N1,T5,CO2,0,Inf
V21,N2,T1,CO2,0,Inf
V22,N2,T2,CO2,0,Inf
V23,N2,T3,CO2,0,Inf
V24,N2,T4,CO2,0,Inf
V25,N2,T5,CO2,0,Inf
V26,N3,T1,CO2,0,Inf
V27,N3,T2,CO2,0,Inf
V28,N3,T3,CO2,0,Inf
V29,N3,T4,CO2,0,Inf
V30,N3,T5,CO2,0,Inf
V31,N1,T1,WaterUse,0,Inf
V32,N1,T2,WaterUse,0,Inf
V33,N1,T3,WaterUse,0,Inf
V34,N1,T4,WaterUse,0,Inf
V35,N1,T5,WaterUse,0,Inf
V36,N2,T1,WaterUse,0,Inf
V37,N2,T2,WaterUse,0,Inf
V38,N2,T3,WaterUse,0,Inf
V39,N2,T4,WaterUse,0,Inf
V40,N2,T5,WaterUse,0,Inf
V41,N3,T1,WaterUse,0,Inf
V42,N3,T2,WaterUse,0,Inf
V43,N3,T3,WaterUse,0,Inf
V44,N3,T4,WaterUse,0,Inf
V45,N3,T5,WaterUse,0,Inf
```
### technology_data.csv
Technology data are structures as follows. Pay attention to these definitions; technology data are the most complex to set up.
1. Tech ID: a unique string ID for the technology, no spaces allowed!
2. Tech Outputs: a vertical bar-delimited list of Product IDs included in csvdata_product.csv; e.g., ",P05|P06,"
3. Tech Inputs: a vertical bar-delimited list of Product IDs included in csvdata_product.csv; e.g., ",P01|P02|P04,"
4. Tech Impacts: a vertical bar-delimited list of Impact IDs included in csvdata_impact.csv; e.g., ",GWP|NH3,"
5. Output Yield: a vertical bar-delimited list of yield parameters (positive) the same length as "Tech Outputs"; e.g., "|0.4|0.3|0.6|"
6. Input Yield: a vertical bar-delimited list of yield parameters (positive) the same length as "Tech Inputs"; e.g., ",1.0|0.7|0.6,"- one of these MUST be 1.0! see 6. Reference Product
7. Impact Yield: a vertical bar-delimited list of impact parameters (positive) the same length as "Tech Impacts"; e.g., ",0.045|0.0033|0.01,"
8. Reference product: a Product ID included in csvdata_product.csv; this is used as the basis for the technology, and its yield coefficient in 5. Input Yield MUST be 1.0.
9. Bid: a number representing the technology bid for a product; positive
10. Capacity: a number representing the maximum amount of reference product processed; positive
11. Name: a string with detailed information about the technology; spaces allowed
The technology data in our example are as follows
```
# 1. Tech ID,2. Tech Outputs,3. Tech Inputs,4. Tech Impacts,5. Output stoich,6. Input stoich,7. Impact stoich,8. Reference product,9. Operating bid (USD/tonne),10. Capacity,11. alias
M1,IceCream,Milk,Phosphorus|CO2|WaterUse,0.178,1.0,0.00065|3.94|2050.0,Milk,3861.11,Inf,IceCream production (extant)
M2,IceCream,Milk,Phosphorus|CO2|WaterUse,0.178,1.0,0.00065|2.94|2050.0,Milk,3999.99,Inf,IceCream production (CO2 emissions reduced 1 tonne)
```
Note that technology_data.csv embeds vertical bar-delimited lists inside a comma-delimited data file. This condenses our representation.
### csvdata_techmap.csv
The final data file, called "techmap" (because it maps instances of technologies onto the supply chain) is structured as follows
1. Tech location ID: a unique string ID for the technology mapping; no spaces allowed!
2. Tech ID: a Technology ID included in csvdata_technology.csv
3. Node ID: a Node ID included in csvdata_node.csv
4. Time ID: a Time ID included in csvdata_time.csv
Our example uses the following techmap entries
```
# 1. Tech location reference ID,2. Node ID,3. Time ID,4. Tech ID
L01,N1,T1,M1
L02,N1,T2,M1
L03,N1,T3,M1
L04,N1,T4,M1
L05,N1,T5,M1
```
Technologies are defined in csvdata_technology.csv in a general form, and are not mapped onto the supply chain. The technology-node pairs in csvdata_techmap.csv serve this function, allowing multiple copies of a technology to be placed at supply chain nodes; i.e., L1,M1,N1,T1 and L2,M1,N2,T1 creates two "copies" of technology M1 at nodes N1 and N2, treated as separate entities in the model. This can reduce the size and complexity of managing large numbers of technologies.
## Basic Usage
`CoordinatedSupplyChains.jl` is built to streamline your workflow with supply chain problems. It handles all the data input and output, as well as model building and solution. The user's responsibility is to set up the input data files defining their supply chain problem correctly. Consequently, the simplest usage of the package requires no more than pointing to the source data files.
```
RunCSC()
```
In this example, it is assumed that Julia is currently running in the same directory as the data files, and defaults to the current directory. This way `RunCSC()` can be used without an argument. The function has three keyword arguments as well, allowing you to tune your experience.
1. optimizer; default: HiGHS.Optimizer
2. UseArcLengths; default: true
3. Output: default: false
The first optional keyword argument is `optimizer` allowing the user to provide a different optimizer to solve their supply chain problem. By default, the open-source HiGHS optimizer is used. The user may want to use a licensed optimizer instead. This can be achieved by passing the optimizer argument:
```
RunCSC(optimizer=Gurobi.Optimizer)
```
The next keyword argument is `UseArcLengths` defaulting to a value of `true`. This keyword allows the user to change the behavior of `CoordinatedSupplyChains.jl` with respect to arcs. By. default, the package will use the arc lengths provided by the user in csvdata_arcs.csv. However, these may be difficult or tedious to calculate by hand, especially if the user's supply chain has many connecting arcs. By passing
```
RunCSC(UseArcLengths=false)
```
the package will instead calculate great circle lengths for the arcs according to the node latitude and longitude data provided. This keyword provides a convenient means of estimating distances between locations.
The final keyword `Output` (defaulting to `false`) allows the user to specify that all model data should be returned once the code has run. This allows that user to inspect data structures manually. This keyword requires that the user indicate output names with the call to `RunCSC()`. The suggested naming convention is optional, but the number of outputs is required
```
T, N, P, Q, A, D, G, V, M, L, Subsets, Pars, MOD, ModelStats, SOL, POST = RunCSC(Output=true)
```
In order, the outputs are:
- `T`: time data structure
- `N`: node data structure
- `P`: product data structure
- `Q`: impact data structure
- `A`: arc data structure
- `D`: demand data structure
- `G`: supply data structure
- `V`: environmental stakeholder data structure
- `M`: technology data structure
- `L`: technology mapping data structure
- `Subsets`: Subsets used in the model
- `Pars`: parameters used in the model
- `MOD`: JuMP model
- `ModelStats`: structure containing model statistics
- `SOL`: structure containing the model solution data
- `POST`: structure containing post-solution values calculated following the model solve
Note that most of this data is made available to the user in the solution output, all of which is stored in a folder called "_SolutionData" in the directory with the model data. If the user wants to access, for example, the JuMP model following the solve, this keyword makes this possible. With the exception of `MOD` which is a JuMP model structure (see the JuMP documentation on [Models](https://jump.dev/JuMP.jl/stable/manual/models/)) these outputs are all custom Julia data structures. They are primarily defined for convenient model representation within `CoordinatedSupplyChains.jl` but you may want to have access to them for use in data manipulations or plotting solutions. Each structure has a number of fields containing data, which are accessed with a syntax `[sstructure_name].[field_name][index]`. The fields are as follows.
Time structure
```
T
ID::Array - time point IDs
dt::Dict - time point durations
```
Node structure
```
N
ID::Array - node IDs
alias::Dict - node names
lon::Dict - node longitudes
lat::Dict - node latitudes
```
Product structure
```
P
ID::Array - product IDs
alias::Dict - product names
transport_cost::Dict - product transportation costs
storage_cost::Dict - product storage costs
```
Impact structure
```
Q
ID::Array - impact IDs
alias::Dict - impact names
transport_coeff::Dict - impact transportation emission coefficients
storage_coeff::Dict - impact storage emission coefficients
```
Arc structure
```
A
ID::Array - arc IDs
n_send::Dict - arc sending node
n_recv::Dict. - arc receiving node
t_send::Dict - arc sending time point
t_recv::Dict - arc receiving time point
bid::Dict - arc bid
cap::Dict - arc capacities
len::Dict - arc length
dur::Dict - arc duration
ID_S::Array - array of arc IDs that are purely spatial
ID_T::Array - array of arc IDs that are purely temporal
ID_ST::Array - array of arc IDs that are spatiotemporal
```
Demand structure
```
D
ID::Array - demand IDs
node::Dict - consumer node
time::Dict - demand time point
prod::Dict - demand product
bid::Dict - demand bid
cap::Dict - demand capacity
Impacts::Dict - impacts associated with demand
ImpactYields::Dict - impact coefficients
```
Supply structure
```
G
ID::Array - supply IDs
node::Dict - supplier node
time::Dict - supply time point
prod::Dict - supply product
bid::Dict - supply bid
cap::Dict - supply capacity
Impacts::Dict - impacts associated with supply
ImpactYields::Dict - impact coefficients
```
Environmental stakeholder structure
```
V
ID::Array - e.s. IDs
node::Dict - e.s. node
time::Dict - e.s. time point
impact::Dict - .e.s. impact
bid::Dict - e.s. bid
cap::Dict - e.s. capacity
```
Technology structure
```
M
ID::Array - technology ID
Outputs::Dict - technology output products
Inputs::Dict - technology input products
Impacts::Dict - technology impacts
OutputYields::Dict - technology output product yield coefficients
InputYields::Dict - technology input product yield coefficients
ImpactYields::Dict - technology impact yield coefficients
InputRef::Dict - technology reference product
bid::Dict - technology bid
cap::Dict - technology capacity
alias::Dict - technology name
```
Technology mapping structure
```
L
ID::Array - technology mapping ID
node::Dict - technology instance node
time::Dict - technology instance time
tech::Dict - technology instance type
```
Subset structure
```
Subsets
T1::Array - set containing the first time point
Tt::Array - set containing all time points except the last
TT::Array - set containing all time points except the first
Tprior::Dict - maps the prior time point to the current one
Tpost::Dict - maps the subsequent time point to the current one
Ain::Union{Dict, Nothing} - all arcs inbound upon a node
Aout::Union{Dict, Nothing} - all arcs outbound from a node
Dntp::Dict - returns consumers by node, time point, and product indices
Gntp::Dict - returns suppliers by node, time point, and product indices
Dntq::Union{Dict, Nothing} - returns consumers by node, time point, and impact indices
Gntq::Union{Dict, Nothing} - returns suppliers by node, time point, and impact indices
Vntq::Union{Dict, Nothing} - returns environmental stakeholders by node, time point, and impact indices
DQ::Union{Array, Nothing} - returns all consumers with some environmental impact
GQ::Union{Array, Nothing} - returns all suppliers with some environmental impact
NTPgenl::Union{Dict, Nothing} - returns technology instances at a node and time point generating product p
NTPconl::Union{Dict, Nothing} - returns technology instances at a node and time point consuming product p
NTQgenl::Union{Dict, Nothing} - returns technology instances at a node and time point with impact q
```
Parameter structure
```
Pars
gMAX::Dict - supply allocation maxima
dMAX::Dict - demand allocation maxima
eMAX::Union{Dict, Nothing} - environmental stakeholder consumption maxima
Ξ³iq::Union{Dict, Nothing} - environmental impact yield coefficient for suppliers
Ξ³jq::Union{Dict, Nothing} - environmental impact yield coefficient for consumers
Ξ³aq::Union{Dict, Nothing} - environmental impact yield coefficient for transportation
Ξ³mp::Union{Dict, Nothing} - technology product yield coefficients
Ξ³mq::Union{Dict, Nothing} - environmental impact yield coefficient for technologies
ΞΎgenMAX::Union{Dict, Nothing} - technology generation maxima
ΞΎconMAX::Union{Dict, Nothing} - technology consumption maxima
ΞΎenvMAX::Union{Dict, Nothing} - technology impact maxima
```
Model statistics structure
```
ModelStats
Variables::Int - number of model variables
TotalInequalityConstraints::Int - total number of inequality constraints
TotalEqualityConstraints::Int - total number of equality constraints
VariableBounds::Int - number of model variable bounds
ModelInequalityConstrtaints::Int - number of model inequality constraints
ModelEqualityConstraints::Int - number of model equality constraints
```
Model solution data
```
SOL
TermStat::String - termination status
PrimalStat::String - primal solution status
DualStat::String - dual solution status
z::Float64 - objective values
g::JuMP.Containers.DenseAxisArray - supply allocations
d::JuMP.Containers.DenseAxisArray - demand allocations
e::Union{JuMP.Containers.DenseAxisArray,Nothing} - environmental stakeholder allocations
f::Union{JuMP.Containers.DenseAxisArray,Nothing} - transportation allocations
ΞΎ::Union{JuMP.Containers.DenseAxisArray,Nothing} - technology allocations
Οp::JuMP.Containers.DenseAxisArray - product nodal prices
Οq::Union{JuMP.Containers.DenseAxisArray,Nothing} - impact nodal prices
```
Derived solution values
```
POST
gNTP::Dict - nodal supply allocations
dNTP::Dict - nodal demand allocations
eNTQ::Union{Dict,Nothing} - nodal environmental stakeholder allocations
ΞΎgen::Union{Dict,Nothing} - technology allocations, generation
ΞΎcon::Union{Dict,Nothing} - technology allocations, consumption
ΞΎenv::Union{Dict,Nothing} - technology allocations, impact
Ο_iq::Union{Dict,Nothing} - supplier impact prices
Ο_jq::Union{Dict,Nothing} - consumer impact prices
Ο_a::Union{Dict,Nothing} - transportation prices
Ο_aq::Union{Dict,Nothing} - transportation impact prices
Ο_m::Union{Dict,Nothing} - technology prices
Ο_mq::Union{Dict,Nothing} - technology impact prices
Οi::Dict - supplier profits
Οj::Dict - consumer profits
Οv::Union{Dict,Nothing} - environmental stakeholder profits
Οl::Union{Dict,Nothing} - technology profits
Οa::Union{Dict,Nothing} - transportation profits
``` | CoordinatedSupplyChains | https://github.com/Tominapa/CoordinatedSupplyChains.jl.git |
|
[
"MIT"
] | 0.1.0 | 3cea5c88e4ef2433ab63f663eb47f0880e70f54f | code | 128 | module FermionXYModels
using LinearAlgebra
include("fermions.jl")
include("models.jl")
include("montecarlo.jl")
end # module
| FermionXYModels | https://github.com/JaydevSR/FermionXYModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 3cea5c88e4ef2433ab63f663eb47f0880e70f54f | code | 1115 | export FermionBasis
"""
FermionBasis(n_sites; sites=(-1, 1))
Construct a binary fermion basis for a chain of fermions with `n_sites` in form of an interator. The default field is
`(-1, 1)` which can be optionally changed by specifying the keyword argument `states` as a `Tuple`. No allocations are made
during construction. The basis can be materialized using `collect` (not recommended for long chains).
"""
struct FermionBasis
n_sites::Int
states::Tuple{Int,Int}
function FermionBasis(n_sites::Int; states::Tuple=(-1, 1))
if length(states) != 2
throw(ArgumentError("length(states) != 2: Fermion basis requries two states. "))
end
if n_sites <= 0
throw(ArgumentError("n_sites <= 0: Fermion basis requires n_sites > 0. "))
end
new(n_sites, states)
end
end
function Base.iterate(b::FermionBasis, state::Int64=0)
if state >= length(b)
return nothing
end
indices = digits(state, base=2, pad=b.n_sites)
return Tuple(b.states[i+1] for i in indices), state + 1
end
Base.length(b::FermionBasis) = 2^b.n_sites
| FermionXYModels | https://github.com/JaydevSR/FermionXYModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 3cea5c88e4ef2433ab63f663eb47f0880e70f54f | code | 3940 | export FermionXYChain,
FermionIsingChain,
FermionXXChain,
correlation_matrix,
probability_matrix
"""
The **quantum XY model** projected to spinless fermions with **periodic boundary conditions** (PBC). The sites
consist of the elements from set {-1, 1}, where -1 represents no fermion and 1 represents a fermion at that site.
"""
mutable struct FermionXYChain{N}
sites::Vector{Int}
L::Int
J::Float64
h::Float64
gamma::Float64
corr::Matrix{Float64}
parity::Int
function FermionXYChain(; L::Int, J::Real, h::Real, gamma::Real, start::Symbol=:rand, parity::Int=-1)
if L <= 0
throw(ArgumentError("FermionXYChain requires L > 0."))
end
if !(-1 <= gamma <= 1)
throw(ArgumentError("FermionXYChain requires -1 <= gamma <= 1."))
end
if J == 0
throw(ArgumentError("FermionXYChain requires J to be non-zero."))
end
if parity != -1 && parity != 1
throw(ArgumentError("FermionXYChain requires parity to be -1 or 1."))
end
J = convert(Float64, J)
h = convert(Float64, h)
gamma = convert(Float64, gamma)
if start == :rand
sites = rand([-1, 1], L)
elseif start == :vacuum
sites = ones(L)
elseif start == :filled
sites = fill(-1, L)
else
throw(ArgumentError("FermionXYChain allows start = :rand | :vacuum | :filled. "))
end
corr = correlation_matrix(; L=L, J=J, h=h, gamma=gamma, parity=parity)
new{L}(sites, L, J, h, gamma, corr, parity)
end
end
"""
The **XX model**, also known as the isotropic (Ξ³=0) XY model, projected to spinless fermions with PBC.
"""
FermionXXChain(; L::Int, J::Real, h::Real, start::Symbol=:rand) = FermionXYChain(; L=L, J=J, h=h, gamma=0, start=start)
"""
The **Ising model**, i.e. the XY model with Ξ³=1, projected to spinless fermions with PBC.
"""
FermionIsingChain(; L::Int, J::Real, h::Real, start::Symbol=:rand) = FermionXYChain(; L=L, J=J, h=h, gamma=1, start=start)
"""
Returns the correlation matrix of the XY chain.
"""
correlation_matrix(model::FermionXYChain) = model.corr
"""
Returns the probability matrix of the XY chain.
"""
probability_matrix(model::FermionXYChain) = probability_matrix(model.sites;
L=model.L, J=model.J,
h=model.h, gamma=model.gamma,
parity=model.parity, corr=model.corr)
# correlation matrix of the fermion chain
function correlation_matrix(; L::Int, J::Float64, h::Float64, gamma::Float64, parity::Int=-1, float_type::Type=Float64)
return [G_nm(i, j; L=L, J=J, h=h, gamma=gamma, parity=parity, float_type=float_type) for i β 1:L, j β 1:L]
end
# probability matrix of the fermion chain
@inbounds function probability_matrix(sites::Base.AbstractVecOrTuple;
L::Int,
J::Float64,
h::Float64,
gamma::Float64,
parity::Int=-1,
float_type::Type=Float64,
corr::Matrix{<:Union{Float64, BigFloat}}=correlation_matrix(;
L=L, J=J, h=h, gamma=gamma, parity=parity, float_type=float_type))
if length(sites) != L
throw(ArgumentError("The number of sites is not equal to the model's length"))
end
return [(1 / 2)Ξ΄(i, j) - (1 / 2) * sites[i] * corr[i, j] for i β 1:L, j β 1:L]
end
# elements of correlation matrix of the fermion chain
function G_nm(n::Int, m::Int; L::Int, J::Float64, h::Float64, gamma::Float64, parity::Int=-1, float_type::Type=Float64)
g_n = 0
tmp, J, h, gamma = promote(zero(float_type), J, h, gamma)
for k in 1:L
Ο_k = (2k + (parity - 1) // 2) // L # redefinition of Ο_k => Ο_k / Ο
Ο΅_k = hypot(J * cospi(Ο_k) + h, J * gamma * sinpi(Ο_k))
cos_ΞΈ_k = (J * cospi(Ο_k) + h) / Ο΅_k
sin_ΞΈ_k = J * gamma * sinpi(Ο_k) / Ο΅_k
g_n += cos_ΞΈ_k * cospi((n - m) * Ο_k) - sin_ΞΈ_k * sinpi((n - m) * Ο_k)
end
return g_n / L
end
# Kronecker delta
Ξ΄(i, j) = isequal(i, j)
| FermionXYModels | https://github.com/JaydevSR/FermionXYModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 3cea5c88e4ef2433ab63f663eb47f0880e70f54f | code | 654 | export metropolis_update!, config_probability
@inbounds function metropolis_update!(model::FermionXYChain; P_old::Float64=config_probability(model))
site = rand(1:model.L)
model.sites[site] *= -1 # do the flip
P_new = config_probability(model)
if rand() > (P_new / P_old)
model.sites[site] *= -1 # revert the flip
end
return model, P_new
end
function equilibrate!(model::FermionXYChain, steps::Int=1000)
p = config_probability(model)
for _ in 1:steps
model, p = metropolis_update!(model, P_old=p)
end
return model
end
config_probability(model::FermionXYChain) = det(probability_matrix(model))
| FermionXYModels | https://github.com/JaydevSR/FermionXYModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 3cea5c88e4ef2433ab63f663eb47f0880e70f54f | code | 2007 | using FermionXYModels
using LinearAlgebra
using Test
@testset "FermionBasis" begin
@test length(FermionBasis(3)) == 8
@test collect(FermionBasis(4)) == vec(collect(Iterators.product([-1, 1], [-1, 1], [-1, 1], [-1, 1])))
@test_throws ArgumentError FermionBasis(4; states=(-1, 0, 1))
@test_throws ArgumentError FermionBasis(0)
@test_throws ArgumentError FermionBasis(-1)
end
@testset "Models" begin
L = 20
m1 = FermionXYChain(; L=L, J=1.0, h=1.0, gamma=1.0, start=:vacuum)
m2 = FermionXYChain(; L=L, J=1, h=1, gamma=1, start=:vacuum)
fnames = fieldnames(FermionXYChain)
@test all([getfield(m1, f) for f in fnames] .== [getfield(m2, f) for f in fnames])
@test_throws ArgumentError FermionXYChain(; L=0, J=1.0, h=1.0, gamma=1.0, start=:rand)
@test_throws ArgumentError FermionXYChain(; L=L, J=0, h=1.0, gamma=1.0, start=:rand)
@test_throws ArgumentError FermionXYChain(; L=-20, J=1.0, h=1.0, gamma=1.0, start=:rand)
@test_throws ArgumentError FermionXYChain(; L=L, J=1.0, h=0, gamma=-2.0, start=:rand)
m3 = FermionIsingChain(; L=L, J=1.0, h=1.0, start=:vacuum)
m4 = FermionXYChain(; L=L, J=1.0, h=1.0, gamma=1.0, start=:vacuum)
@test all([getfield(m3, f) for f in fnames] .== [getfield(m4, f) for f in fnames])
m4 = FermionXXChain(; L=L, J=1.0, h=1.0, start=:vacuum)
m5 = FermionXYChain(; L=L, J=1.0, h=1.0, gamma=0, start=:vacuum)
@test all([getfield(m4, f) for f in fnames] .== [getfield(m5, f) for f in fnames])
end
@testset "Correlations and Probabilities" begin
p = 0
L = 10
for sites in FermionBasis(L)
p += det(probability_matrix(sites; L=L, J=1.0, h=1.0, gamma=1.0))
end
@test isapprox(p, 1.0)
model = FermionXYChain(;L=L, J=1.0, h=1.2, gamma=0.5)
G_mat = correlation_matrix(model)
p_mat = probability_matrix(model)
@test G_mat == correlation_matrix(;L=L, J=1.0, h=1.2, gamma=0.5)
@test p_mat == probability_matrix(model.sites; L=L, J=1.0, h=1.2, gamma=0.5)
end
| FermionXYModels | https://github.com/JaydevSR/FermionXYModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 3cea5c88e4ef2433ab63f663eb47f0880e70f54f | docs | 1073 | MIT License
Copyright (c) 2022 Jaydev Singh Rao
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| FermionXYModels | https://github.com/JaydevSR/FermionXYModels.jl.git |
|
[
"MIT"
] | 0.1.0 | 3cea5c88e4ef2433ab63f663eb47f0880e70f54f | docs | 3409 | # FermionXYModel.jl
[](https://github.com/JaydevSR/FermionXYModels.jl/actions)
[](http://codecov.io/github/JaydevSR/FermionXYModels.jl?branch=main)
_**Quantum XY Model by projecting the spins to spinless fermions.**_
**Reference**: StΓ©phan, J., Misguich, G., & Pasquier, V. (2010). RΓ©nyi entropy of a line in two-dimensional Ising models. Physical Review B, 82(12). doi:10.1103/physrevb.82.125455
## Installation
```julia
using Pkg
Pkg.add("FermionXYModels.jl")
```
## Models
- `FermionXYChain`: The quantum XY model as a chain of fermions.
**Arguments:**
- `L::Int`: The chain length.
- `J::Real`: The coupling constant.
- `h::Real`: The magnetic field.
- `gamma::Real`: The anisotropy constant.
- `start::Symbol`: The initial state. Can be one of `:rand`, `:vacuum`, `:filled`
- `parity::Int=-1`: Can be -1 or 1.
- `FermionXXChain`: The quantum XY model for $\gamma=0$.
- `FermionIsingChain`: The quantum XY model for $\gamma=1$.
## Iterators
- `FermionBasis`: An Iterator over the binary basis of fermion chains. It accepts the following arguments:
**Arguments:**
- `n_sites::Int`: The number of sites in fermion chain.
- `states::Tuple=(-1, 1)`: A keyword argument taking the integer representation of filled and unfilled sites.
## Correlations and Probabilities
- `correlation_matrix`: Calculates the correlation matrix of the chain given by $G_{ij} = \langle a_i^\dagger a_j\rangle$. Has two methods, one takes a `FermionXYChain` as argument. Other takes the agruments:
- `L::Int`: The chain length.
- `J::Real`: The coupling constant.
- `h::Real`: The magnetic field.
- `gamma::Real`: The anisotropy constant.
- `parity::Int=-1`: Can be -1 or 1.
- `float_type::Type=Float64`: The floating point type (default is `Float64`, can be `BigFloat` for greater precision).
- `probability_matrix`: Calculates the probability matrix of the chain. The probability of the particular configuration is then given by $\det(P)$ where $P$ is the said matrix. Has two methods, one takes a `FermionXYChain` as argument. Other takes the agruments:
- `sites::Vector{Int}`: The sites of the chain having value -1 for no fermion and 1 for a fermion.
- `L::Int`: The chain length.
- `J::Real`: The coupling constant.
- `h::Real`: The magnetic field.
- `gamma::Real`: The anisotropy constant.
- `parity::Int=-1`: Can be -1 or 1.
- `float_type::Type=Float64`: The floating point type (default is `Float64`, can be `BigFloat` for greater precision).
## Monte-Carlo Simulation
- `metropolis_update!(model::FermionXYChain)`: Generates a new configuration for the chain by performing single site updates using acceptance rate $A(P'|P) = \cfrac{P'}{P}$, where $P'$ is the probability of new configuration and $P$ is that of old configuration..
- `equilibrate!(model::FermionXYChain, steps::Int)`: Equilibrates the chain by performing $N$ updates.
**Note: The above methods for monte-carlo sampling are useless from my experience this is probably because single site updates can not capture the transformation from quantum spins to spinless fermions. If anyone knows more about this please inform me by opening an issue**
| FermionXYModels | https://github.com/JaydevSR/FermionXYModels.jl.git |
|
[
"MIT"
] | 0.0.1 | f97919fbde857f05d0e5e9b06cf4528bcda4c6f4 | code | 72 | module MortalityModels
export LC_SVD
include("mort_functions.jl")
end
| MortalityModels | https://github.com/sveekelen/MortalityModels.jl.git |
|
[
"MIT"
] | 0.0.1 | f97919fbde857f05d0e5e9b06cf4528bcda4c6f4 | code | 799 | using Statistics, LinearAlgebra
"""
LC_SVD(mMu)
Input:
mMu = matrix containing log forces of mortality for a given population
Output:
vAx = constant age effects in a LC model
vBx = age effects in a LC model
vKt = period effects in a LC model
Summary: function used to fit a LC model on the log force of mortality with a SVD decomposition
"""
function LC_SVD(mMu)
# Calculate Aβ as the average log ΞΌ over time
vAx = mean(mMu, dims = 2)
# Create time demeaned log ΞΌ matrix
mMu_dm = mMu .- vAx
# Perform SVD on demeaned matrix
objSVD = svd(mMu_dm)
# Get Ξ²β and ΞΊβ
vBx = -objSVD.U[:,1]
vKt = -objSVD.Vt[1,:] * objSVD.S[1]
# return age and period effects
return (Kt = vKt, Ax = vAx, Bx = vBx)
end | MortalityModels | https://github.com/sveekelen/MortalityModels.jl.git |
|
[
"MIT"
] | 0.0.1 | f97919fbde857f05d0e5e9b06cf4528bcda4c6f4 | code | 158 | using MortalityModels
using Test
@testset "MortalityModels.jl" begin
# Perform simple test
LC_SVD([1]) == (Kt = [-0.0], Ax = [1.0], Bx = [-1.0])
end
| MortalityModels | https://github.com/sveekelen/MortalityModels.jl.git |
|
[
"MIT"
] | 0.0.1 | f97919fbde857f05d0e5e9b06cf4528bcda4c6f4 | docs | 779 | # MortalityModels
MortalityModels can be used to fit popular population mortality models in Julia. As of right now, MortalityModels is still under development and can only be used to fit a simple Lee-Carter (LC) model using a SVD decomposition. Other methods are still in development.
The LC model can be fitted in Julia using the following function:
``` julia
LC_SVD(mMu)
```
The function returns age and period effects for the LC model fitted using an SVD methodology. Note that the input here is a log force of mortality matrix with ages on the row and years on the columns.
[](https://github.com/sveekelen/JuMoMo.jl/actions/workflows/CI.yml?query=branch%3Amaster)
| MortalityModels | https://github.com/sveekelen/MortalityModels.jl.git |
|
[
"MIT"
] | 1.2.1 | d0675b5245d2b15fa971ef0dc03c7dfe2cf6ffe3 | code | 2384 | # From https://github.com/JuliaIO/HDF5.jl/blob/master/deps/build.jl
using Libdl
const depsfile = joinpath(@__DIR__, "deps.jl")
libpath = get(ENV, "JULIA_ADIOS2_PATH", nothing)
# We avoid calling Libdl.find_library to avoid possible segfault when calling
# dlclose (#929).
# The only difference with Libdl.find_library is that we allow custom dlopen
# flags via the `flags` argument.
function find_library_alt(libnames, extrapaths=String[]; flags=RTLD_LAZY)
for lib in libnames
for path in extrapaths
l = joinpath(path, lib)
p = dlopen(l, flags; throw_error=false)
if p !== nothing
dlclose(p)
return l
end
end
p = dlopen(lib, flags; throw_error=false)
if p !== nothing
dlclose(p)
return lib
end
end
return ""
end
##
new_contents = if libpath === nothing
# By default, use ADIOS2_jll
"""
# This file is automatically generated
# Do not edit
using ADIOS2_jll
check_deps() = nothing
"""
else
@info "using system ADIOS2"
libpaths = [libpath, joinpath(libpath, "lib"), joinpath(libpath, "lib64")]
flags = RTLD_LAZY | RTLD_NODELETE # RTLD_NODELETE may be needed to avoid segfault (#929)
libadios2_c = find_library_alt(["libadios2_c"], libpaths; flags=flags)
libadios2_c_mpi = find_library_alt(["libadios2_c_mpi"], libpaths;
flags=flags)
isempty(libadios2_c) && error("libadios2_c could not be found")
isempty(libadios2_c_mpi) &&
@info "libadios2_c_mpi could not be found, assuming ADIOS2 serial build"
libadios2_c_size = filesize(dlpath(libadios2_c))
"""
# This file is automatically generated
# Do not edit
function check_deps()
if libadios2_c_size != filesize(Libdl.dlpath(libadios2_c))
error("ADIOS2 library has changed, re-run Pkg.build(\\\"ADIOS2\\\")")
end
end
$(:(const libadios2_c = $libadios2_c))
$(:(const libadios2_c_mpi = $libadios2_c_mpi))
$(:(const libadios2_c_size = $libadios2_c_size))
"""
end
if !isfile(depsfile) || new_contents != read(depsfile, String)
# only write file if contents have changed to avoid triggering re-precompilation each build
open(depsfile, "w") do io
return print(io, new_contents)
end
end
| ADIOS2 | https://github.com/eschnett/ADIOS2.jl.git |
|
[
"MIT"
] | 1.2.1 | d0675b5245d2b15fa971ef0dc03c7dfe2cf6ffe3 | code | 319 | # Generate documentation with this command:
# (cd docs && julia --color=yes make.jl)
push!(LOAD_PATH, "..")
using Documenter
using ADIOS2
makedocs(; sitename="ADIOS2", format=Documenter.HTML(), modules=[ADIOS2])
deploydocs(; repo="github.com/eschnett/ADIOS2.jl.git", devbranch="main",
push_preview=true)
| ADIOS2 | https://github.com/eschnett/ADIOS2.jl.git |
|
[
"MIT"
] | 1.2.1 | d0675b5245d2b15fa971ef0dc03c7dfe2cf6ffe3 | code | 715 | module ADIOS2
using MPI
using Libdl
const depsfile = joinpath(@__DIR__, "..", "deps", "deps.jl")
if isfile(depsfile)
include(depsfile)
else
error("ADIOS2 is not properly installed. Please run Pkg.build(\"ADIOS2\") ",
"and restart Julia.")
end
### Helpers
const Maybe{T} = Union{Nothing,T}
maybe(::Nothing, other) = other
maybe(x, other) = x
function free(ptr::Ptr)
@static Sys.iswindows() ? Libc.free(ptr) :
ccall((:free, libadios2_c), Cvoid, (Ptr{Cvoid},), ptr)
end
include("types.jl")
include("adios.jl")
include("io.jl")
include("variable.jl")
include("attribute.jl")
include("engine.jl")
include("highlevel.jl")
function __init__()
check_deps()
return
end
end
| ADIOS2 | https://github.com/eschnett/ADIOS2.jl.git |
|
[
"MIT"
] | 1.2.1 | d0675b5245d2b15fa971ef0dc03c7dfe2cf6ffe3 | code | 3168 | # Adios functions
export Adios
"""
mutable struct Adios
Holds a C pointer `adios2_adios *`.
This value is finalized automatically. It can also be explicitly
finalized by calling `finalize(adios)`.
"""
mutable struct Adios
ptr::Ptr{Cvoid}
Adios(ptr) = finalizer(adios_finalize, new(ptr))
end
Adios() = Adios(C_NULL)
function Base.show(io::IO, ::MIME"text/plain", adios::Adios)
return print(io, "Adios(0x$(string(UInt(adios.ptr); base=16)))")
end
export adios_init_mpi
"""
adios = adios_init_mpi(comm::MPI.Comm)
adios = adios_init_mpi(config_file::AbstractString, comm::MPI.Comm)
adios::Union{Adios,Nothing}
Starting point for MPI apps. Creates an ADIOS handler. MPI collective
and it calls `MPI_Comm_dup`.
"""
function adios_init_mpi(comm::MPI.Comm)
ptr = ccall((:adios2_init_mpi, libadios2_c_mpi), Ptr{Cvoid},
(MPI.MPI_Comm,), comm)
return ptr == C_NULL ? nothing : Adios(ptr)
end
function adios_init_mpi(config_file::AbstractString, comm::MPI.Comm)
ptr = ccall((:adios2_init_config_mpi, libadios2_c_mpi), Ptr{Cvoid},
(Cstring, MPI.MPI_Comm), config_file, comm)
return ptr == C_NULL ? nothing : Adios(ptr)
end
export adios_init_serial
"""
adios = adios_init_serial()
adios = adios_init_serial(config_file::AbstractString)
adios::Union{Adios,Nothing}
Initialize an Adios struct in a serial, non-MPI application. Doesnβt
require a runtime config file.
See also the [ADIOS2
documentation](https://adios2.readthedocs.io/en/latest/api_full/api_full.html#_CPPv418adios2_init_serialv).
"""
function adios_init_serial()
ptr = ccall((:adios2_init_serial, libadios2_c), Ptr{Cvoid}, ())
return ptr == C_NULL ? nothing : Adios(ptr)
end
function adios_init_serial(config_file::AbstractString)
ptr = ccall((:adios2_init_config_serial, libadios2_c), Ptr{Cvoid},
(Cstring,), config_file)
return ptr == C_NULL ? nothing : Adios(ptr)
end
export declare_io
"""
io = declare_io(adios::Adios, name::AbstractString)
io::Union{AIO,Nothing}
Declare a new IO handler.
See also the [ADIOS2
documentation](https://adios2.readthedocs.io/en/latest/api_full/api_full.html#_CPPv417adios2_declare_ioP12adios2_adiosPKc).
"""
function declare_io(adios::Adios, name::AbstractString)
ptr = ccall((:adios2_declare_io, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring), adios.ptr, name)
return ptr == C_NULL ? nothing : AIO(ptr, adios)
end
export adios_finalize
"""
err = adios_finalize(adios::Adios)
err::Error
Finalize the ADIOS context `adios`. It is usually not necessary to
call this function.
Instead of calling this function, one can also call the finalizer via
`finalize(adios)`. This finalizer is also called automatically when
the Adios object is garbage collected.
See also the [ADIOS2
documentation](https://adios2.readthedocs.io/en/latest/api_full/api_full.html#_CPPv415adios2_finalizeP12adios2_adios)
"""
function adios_finalize(adios::Adios)
adios.ptr == C_NULL && return error_none
err = ccall((:adios2_finalize, libadios2_c), Cint, (Ptr{Cvoid},), adios.ptr)
adios.ptr = C_NULL
return Error(err)
end
| ADIOS2 | https://github.com/eschnett/ADIOS2.jl.git |
|
[
"MIT"
] | 1.2.1 | d0675b5245d2b15fa971ef0dc03c7dfe2cf6ffe3 | code | 4584 | # Attributes
export Attribute
"""
struct Attribute
Holds a C pointer `adios2_attribute *`.
"""
struct Attribute
ptr::Ptr{Cvoid}
adios::Adios
Attribute(ptr::Ptr{Cvoid}, adios::Adios) = new(ptr, adios)
end
function Base.show(io::IO, attribute::Attribute)
nm = name(attribute)
T = type(attribute)
isval = is_value(attribute)
sz = size(attribute)
d = data(attribute)
print(io, "Attribute(name=$nm,type=$T,is_value=$isval,size=$sz,data=$d)")
return
end
function Base.show(io::IO, ::MIME"text/plain", attribute::Attribute)
nm = name(attribute)
return print(io, "Attribute($nm)")
end
export name
"""
attr_name = name(attribute::Attribute)
attr_name::Union{Nothing,String}
Retrieve attribute name.
"""
function name(attribute::Attribute)
size = Ref{Csize_t}()
err = ccall((:adios2_attribute_name, libadios2_c), Cint,
(Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), C_NULL, size,
attribute.ptr)
Error(err) β error_none && return nothing
name = Array{Cchar}(undef, size[])
err = ccall((:adios2_attribute_name, libadios2_c), Cint,
(Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), name, size,
attribute.ptr)
Error(err) β error_none && return nothing
return unsafe_string(pointer(name), size[])
end
export type
"""
attr_type = type(attribute::Attribute)
attr_type::Union{Nothing,Type}
Retrieve attribute type.
"""
function type(attribute::Attribute)
type = Ref{Cint}()
err = ccall((:adios2_attribute_type, libadios2_c), Cint,
(Ref{Cint}, Ptr{Cvoid}), type, attribute.ptr)
Error(err) β error_none && return nothing
return julia_type(AType(type[]))
end
export is_value
"""
attr_is_value = is_value(attribute::Attribute)
attr_is_value::Union{Nothing,Bool}
Retrieve attribute type.
"""
function is_value(attribute::Attribute)
is_value = Ref{Cint}()
err = ccall((:adios2_attribute_is_value, libadios2_c), Cint,
(Ref{Cint}, Ptr{Cvoid}), is_value, attribute.ptr)
Error(err) β error_none && return nothing
return Bool(is_value[])
end
"""
attr_size = size(attribute::Attribute)
attr_size::Union{Nothing,Int}
Retrieve attribute size.
"""
function Base.size(attribute::Attribute)
size = Ref{Csize_t}()
err = ccall((:adios2_attribute_size, libadios2_c), Cint,
(Ref{Csize_t}, Ptr{Cvoid}), size, attribute.ptr)
Error(err) β error_none && return nothing
return Int(size[])
end
export data
"""
attr_data = data(attribute::Attribute)
attr_data::Union{Nothing,AdiosType,Vector{<:AdiosType}}
Retrieve attribute Data.
"""
function data(attribute::Attribute)
T = type(attribute)
T β‘ nothing && return nothing
@assert T != Union{}
isval = is_value(attribute)
isval β‘ nothing && return nothing
sz = size(attribute)
sz β‘ nothing && return nothing
tp = type(attribute)
tp β‘ nothing && return nothing
if tp β‘ String
if isval
buffer = fill(Cchar(0), string_array_element_max_size)
out_sz = Ref{Csize_t}()
err = ccall((:adios2_attribute_data, libadios2_c), Cint,
(Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), buffer, out_sz,
attribute.ptr)
@assert out_sz[] == sz
Error(err) β error_none && return nothing
data = unsafe_string(pointer(buffer))
return data
else
arrays = [Array{Cchar}(undef, string_array_element_max_size)
for i in 1:sz]
buffers = [pointer(array) for array in arrays]::Vector{Ptr{Cchar}}
out_sz = Ref{Csize_t}()
err = ccall((:adios2_attribute_data, libadios2_c), Cint,
(Ptr{Ptr{Cchar}}, Ref{Csize_t}, Ptr{Cvoid}), buffers,
out_sz, attribute.ptr)
@assert out_sz[] == sz
Error(err) β error_none && return nothing
data = unsafe_string.(buffers)::Vector{String}
# Use `arrays` again to ensure it is not GCed too early
buffers .= pointer.(arrays)
return data
end
else
data = Array{T}(undef, sz)
out_sz = Ref{Csize_t}()
err = ccall((:adios2_attribute_data, libadios2_c), Cint,
(Ptr{Cvoid}, Ref{Csize_t}, Ptr{Cvoid}), data, out_sz,
attribute.ptr)
@assert out_sz[] == sz
Error(err) β error_none && return nothing
isval && return data[1]
return data
end
end
| ADIOS2 | https://github.com/eschnett/ADIOS2.jl.git |
|
[
"MIT"
] | 1.2.1 | d0675b5245d2b15fa971ef0dc03c7dfe2cf6ffe3 | code | 10458 | # Engine functions
export Engine
"""
struct Engine
Holds a C pointer `adios2_engine *`.
"""
struct Engine
ptr::Ptr{Cvoid}
adios::Adios
put_sources::Vector{Any}
get_targets::Vector{Any}
get_tasks::Vector{Function}
function Engine(ptr::Ptr{Cvoid}, adios::Adios)
return new(ptr, adios, Any[], Any[], Function[])
end
end
function Base.show(io::IO, ::MIME"text/plain", engine::Engine)
nm = name(engine)
return print(io, "Engine($nm)")
end
export name
"""
engine_name = name(engine::Engine)
engine_name::Union{Nothing,String}
Retrieve engine name.
"""
function name(engine::Engine)
size = Ref{Csize_t}()
err = ccall((:adios2_engine_name, libadios2_c), Cint,
(Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), C_NULL, size,
engine.ptr)
Error(err) β error_none && return nothing
name = Array{Cchar}(undef, size[])
err = ccall((:adios2_engine_name, libadios2_c), Cint,
(Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), name, size, engine.ptr)
Error(err) β error_none && return nothing
return unsafe_string(pointer(name), size[])
end
export type
"""
engine_type = type(engine::Engine)
engine_type::Union{Nothing,String}
Retrieve engine type.
"""
function type(engine::Engine)
size = Ref{Csize_t}()
err = ccall((:adios2_engine_get_type, libadios2_c), Cint,
(Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), C_NULL, size,
engine.ptr)
Error(err) β error_none && return nothing
type = Array{Cchar}(undef, size[])
err = ccall((:adios2_engine_get_type, libadios2_c), Cint,
(Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), type, size, engine.ptr)
Error(err) β error_none && return nothing
return unsafe_string(pointer(type), size[])
end
export openmode
"""
engine_openmode = openmode(engine::Engine)
engine_openmode::Union{Nothing,Mode}
Retrieve engine openmode.
"""
function openmode(engine::Engine)
mode = Ref{Cint}()
err = ccall((:adios2_engine_openmode, libadios2_c), Cint,
(Ptr{Cint}, Ptr{Cvoid}), mode, engine.ptr)
Error(err) β error_none && return nothing
return Mode(mode[])
end
export begin_step
"""
status = begin_step(engine::Engine, mode::StepMode,
timeout_seconds::Union{Integer,AbstractFloat})
status = begin_step(engine::Engine)
status::Union{Noting,StepStatus}
Begin a logical adios2 step stream.
"""
function begin_step(engine::Engine, mode::StepMode,
timeout_seconds::Union{Integer,AbstractFloat})
status = Ref{Cint}()
err = ccall((:adios2_begin_step, libadios2_c), Cint,
(Ptr{Cvoid}, Cint, Cfloat, Ptr{Cint}), engine.ptr, mode,
timeout_seconds, status)
Error(err) β error_none && return nothing
return StepStatus(status[])
end
function begin_step(engine::Engine)
if openmode(engine) == mode_read
return begin_step(engine, step_mode_read, -1)
else
return begin_step(engine, step_mode_append, -1)
end
end
export current_step
"""
step = current_step(engine::Engine)
step::Union{Noting,Int}
Inspect current logical step.
"""
function current_step(engine::Engine)
step = Ref{Csize_t}()
err = ccall((:adios2_current_step, libadios2_c), Cint,
(Ptr{Csize_t}, Ptr{Cvoid}), step, engine.ptr)
Error(err) β error_none && return nothing
return Int(step)
end
export steps
"""
step = steps(engine::Engine)
step::Union{Noting,Int}
Inspect total number of available steps.
"""
function steps(engine::Engine)
steps = Ref{Csize_t}()
err = ccall((:adios2_steps, libadios2_c), Cint, (Ptr{Csize_t}, Ptr{Cvoid}),
steps, engine.ptr)
Error(err) β error_none && return nothing
return Int(steps[])
end
"""
err = Base.put!(engine::Engine, variable::Variable,
data::Union{Ref,DenseArray,SubArray,Ptr},
launch::Mode=mode_deferred)
err = Base.put!(engine::Engine, variable::Variable, data::AdiosType,
launch::Mode=mode_deferred)
err::Error
Schedule writing a variable to file. The buffer `data` must be
contiguous in memory.
Call `perform_puts!` to perform the actual write operations.
The reference/array/pointer target must not be modified before
`perform_puts!` is called. It is most efficient to schedule multiple
`put!` operations before calling `perform_puts!`.
"""
function Base.put!(engine::Engine, variable::Variable,
data::Union{Ref,DenseArray,SubArray,Ptr},
launch::Mode=mode_deferred)
if data isa AbstractArray && length(data) β 0
np = 1
for (str, sz) in zip(strides(data), size(data))
str β np &&
throw(ArgumentError("ADIOS2: `data` argument to `put!` must be contiguous"))
np *= sz
end
end
T = type(variable)
if T β‘ String
eltype(data) <: Union{Cchar,Cuchar} ||
throw(ArgumentError("ADIOS2: `data` element type for string variables must be either `Cchar` or `Cuchar`"))
else
eltype(data) β‘ T ||
throw(ArgumentError("ADIOS2: `data` element type for non-string variables must be the same as the variable type"))
end
co = count(variable)
len = data isa Ptr ? typemax(Int) : length(data)
(co β‘ nothing ? 1 : prod(co)) β€ len ||
throw(ArgumentError("ADIOS2: `data` length must be at least as large as the count of the variable"))
if launch β‘ mode_deferred
push!(engine.put_sources, (engine, variable, data))
end
err = ccall((:adios2_put, libadios2_c), Cint,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Cint), engine.ptr,
variable.ptr, data, Cint(launch))
return Error(err)
end
function Base.put!(engine::Engine, variable::Variable, data::AdiosType,
launch::Mode=mode_deferred)
return put!(engine, variable, Ref(data), launch)
end
function Base.put!(engine::Engine, variable::Variable, data::AbstractString,
launch::Mode=mode_deferred)
if launch β‘ mode_deferred
push!(engine.put_sources, data)
end
return put!(engine, variable, pointer(data), launch)
end
export perform_puts!
"""
perform_puts!(engine::Engine)
Execute all currently scheduled write operations.
"""
function perform_puts!(engine::Engine)
err = ccall((:adios2_perform_puts, libadios2_c), Cint, (Ptr{Cvoid},),
engine.ptr)
empty!(engine.put_sources)
return Error(err)
end
"""
err = Base.get(engine::Engine, variable::Variable,
data::Union{Ref,DenseArray,SubArray,Ptr},
launch::Mode=mode_deferred)
err::Error
Schedule reading a variable from file into the provided buffer `data`.
`data` must be contiguous in memory.
Call `perform_gets` to perform the actual read operations.
The reference/array/pointer target must not be modified before
`perform_gets` is called. It is most efficient to schedule multiple
`get` operations before calling `perform_gets`.
"""
function Base.get(engine::Engine, variable::Variable,
data::Union{Ref,DenseArray,SubArray,Ptr},
launch::Mode=mode_deferred)
if data isa AbstractArray && !isempty(data)
np = 1
for (str, sz) in zip(strides(data), size(data))
str β np &&
throw(ArgumentError("ADIOS2: `data` argument to `get` must be contiguous"))
np *= sz
end
end
co = count(variable)
len = data isa Ptr ? typemax(Int) : length(data)
(co β‘ nothing ? 1 : prod(co)) β€ len ||
throw(ArgumentError("ADIOS2: `data` length must be at least as large as the count of the variable"))
T = type(variable)
if T β‘ String
eltype(data) <: AbstractString ||
throw(ArgumentError("ADIOS2: `data` element type for string variables must be a subtype of `AbstractString`"))
buffer = fill(Cchar(0), string_array_element_max_size + 1)
err = ccall((:adios2_get, libadios2_c), Cint,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Cint), engine.ptr,
variable.ptr, buffer, Cint(launch))
if launch β‘ mode_deferred
push!(engine.get_targets, (engine, variable))
if data isa Ref
push!(engine.get_tasks,
() -> data[] = unsafe_string(pointer(buffer)))
else
push!(engine.get_tasks,
() -> data[begin] = unsafe_string(pointer(buffer)))
end
else
data[] = unsafe_string(pointer(buffer))
end
else
eltype(data) β‘ T ||
throw(ArgumentError("ADIOS2: `data` element type for non-string variables must be the same as the variable type"))
err = ccall((:adios2_get, libadios2_c), Cint,
(Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, Cint), engine.ptr,
variable.ptr, data, Cint(launch))
if launch β‘ mode_deferred
push!(engine.get_targets, (engine, variable, data))
end
end
return Error(err)
end
export perform_gets
"""
perform_gets(engine::Engine)
Execute all currently scheduled read operations.
"""
function perform_gets(engine::Engine)
err = ccall((:adios2_perform_gets, libadios2_c), Cint, (Ptr{Cvoid},),
engine.ptr)
empty!(engine.get_targets)
for task in engine.get_tasks
task()
end
empty!(engine.get_tasks)
return Error(err)
end
export end_step
"""
end_step(engine::Engine)
Terminate interaction with current step.
"""
function end_step(engine::Engine)
err = ccall((:adios2_end_step, libadios2_c), Cint, (Ptr{Cvoid},),
engine.ptr)
empty!(engine.get_targets)
for task in engine.get_tasks
task()
end
empty!(engine.get_tasks)
return Error(err)
end
"""
flush(engine::Engine)
Flush all buffered data to file. Call this after `perform_puts!` to
ensure data are actually written to file.
"""
function Base.flush(engine::Engine)
err = ccall((:adios2_flush, libadios2_c), Cint, (Ptr{Cvoid},), engine.ptr)
return Error(err)
end
"""
close(engine::Engine)
Close a file. This implicitly also flushed all buffered data.
"""
function Base.close(engine::Engine)
err = ccall((:adios2_close, libadios2_c), Cint, (Ptr{Cvoid},), engine.ptr)
return Error(err)
end
| ADIOS2 | https://github.com/eschnett/ADIOS2.jl.git |
|
[
"MIT"
] | 1.2.1 | d0675b5245d2b15fa971ef0dc03c7dfe2cf6ffe3 | code | 10816 | # Julia-specific high-level API
################################################################################
export AdiosFile
"""
struct AdiosFile
Context for the high-level API for an ADIOS file
"""
struct AdiosFile
adios::Adios
io::AIO
engine::Engine
AdiosFile(adios::Adios, io::AIO, engine::Engine) = new(adios, io, engine)
end
export adios_open_serial
"""
adios = adios_open_serial(filename::AbstractString, mode::Mode)
adios::AdiosFile
Open an ADIOS file. Use `mode = mode_write` for writing and `mode =
mode_read` for reading.
"""
function adios_open_serial(filename::AbstractString, mode::Mode)
adios = adios_init_serial()
io = declare_io(adios, "IO")
engine = open(io, filename, mode)
return AdiosFile(adios, io, engine)
end
export adios_open_mpi
"""
adios = adios_open_mpi(comm::MPI.Comm, filename::AbstractString, mode::Mode)
adios::AdiosFile
Open an ADIOS file for parallel I/O. Use `mode = mode_write` for
writing and `mode = mode_read` for reading.
"""
function adios_open_mpi(comm::MPI.Comm, filename::AbstractString, mode::Mode)
adios = adios_init_mpi(comm)
io = declare_io(adios, "IO")
engine = open(io, filename, mode)
return AdiosFile(adios, io, engine)
end
"""
flush(file::AdiosFile)
Flush an ADIOS file. When writing, flushing or closing a file is
necesssary to ensure that data are actually written to the file.
"""
function Base.flush(file::AdiosFile)
flush(file.engine)
return nothing
end
"""
close(file::AdiosFile)
Close an ADIOS file. When writing, flushing or closing a file is
necesssary to ensure that data are actually written to the file.
"""
function Base.close(file::AdiosFile)
close(file.engine)
adios_finalize(file.adios)
return nothing
end
################################################################################
export adios_subgroup_names
"""
groups = adios_subgroup_names(file::AdiosFile, groupname::AbstractString)
vars::Vector{String}
List (non-recursively) all subgroups in the group `groupname` in the
file.
"""
function adios_subgroup_names(file::AdiosFile, groupname::AbstractString)
vars = inquire_subgroups(file.io, groupname)
vars β‘ nothing && return String[]
return vars
end
export adios_define_attribute
"""
adios_define_attribute(file::AdiosFile, name::AbstractString,
value::AdiosType)
Write a scalar attribute.
"""
function adios_define_attribute(file::AdiosFile, name::AbstractString,
value::AdiosType)
define_attribute(file.io, name, value)
return nothing
end
"""
adios_define_attribute(file::AdiosFile, name::AbstractString,
value::AbstractArray{<:AdiosType})
Write an array-valued attribute.
"""
function adios_define_attribute(file::AdiosFile, name::AbstractString,
value::AbstractArray{<:AdiosType})
define_attribute_array(file.io, name, value)
return nothing
end
"""
adios_define_attribute(file::AdiosFile, path::AbstractString,
name::AbstractString, value::AdiosType)
Write a scalar attribute into the path `path` in the file.
"""
function adios_define_attribute(file::AdiosFile, path::AbstractString,
name::AbstractString, value::AdiosType)
define_variable_attribute(file.io, name, value, path)
return nothing
end
"""
adios_define_attribute(file::AdiosFile, path::AbstractString,
name::AbstractString,
value::AbstractArray{<:AdiosType})
Write an array-valued attribute into the path `path` in the file.
"""
function adios_define_attribute(file::AdiosFile, path::AbstractString,
name::AbstractString,
value::AbstractArray{<:AdiosType})
define_variable_attribute_array(file.io, name, value, path)
return nothing
end
export adios_all_attribute_names
"""
attrs = adios_all_attribute_names(file::AdiosFile)
attrs::Vector{String}
List (recursively) all attributes in the file.
"""
function adios_all_attribute_names(file::AdiosFile)
attrs = inquire_all_attributes(file.io)
attrs β‘ nothing && return String[]
return name.(attrs)
end
export adios_group_attribute_names
"""
vars = adios_group_attribute_names(file::AdiosFile, groupname::AbstractString)
vars::Vector{String}
List (non-recursively) all attributes in the group `groupname` in the
file.
"""
function adios_group_attribute_names(file::AdiosFile, groupname::AbstractString)
vars = inquire_group_attributes(file.io, groupname)
vars β‘ nothing && return String[]
return name.(vars)
end
export adios_attribute_data
"""
attr_data = adios_attribute_data(file::AdiosFile, name::AbstractString)
attr_data::Union{Nothing,AdiosType}
Read an attribute from a file. Return `nothing` if the attribute is
not found.
"""
function adios_attribute_data(file::AdiosFile, name::AbstractString)
attr = inquire_attribute(file.io, name)
attr β‘ nothing && return nothing
attr_data = data(attr)
return attr_data
end
"""
attr_data = adios_attribute_data(file::AdiosFile, path::AbstractString,
name::AbstractString)
attr_data::Union{Nothing,AdiosType}
Read an attribute from a file in path `path`. Return `nothing` if the
attribute is not found.
"""
function adios_attribute_data(file::AdiosFile, path::AbstractString,
name::AbstractString)
attr = inquire_variable_attribute(file.io, name, path)
attr β‘ nothing && return nothing
attr_data = data(attr)
return attr_data
end
################################################################################
export adios_put!
"""
adios_put!(file::AdiosFile, name::AbstractString, scalar::AdiosType)
Schedule writing a scalar variable to a file.
The variable is not written until `adios_perform_puts!` is called and
the file is flushed or closed.
"""
function adios_put!(file::AdiosFile, name::AbstractString, scalar::AdiosType)
var = define_variable(file.io, name, scalar)
put!(file.engine, var, scalar)
return var
end
"""
adios_put!(file::AdiosFile, name::AbstractString,
array::AbstractArray{<:AdiosType}; make_copy::Bool=false)
Schedule writing an array-valued variable to a file.
`make_copy` determines whether to make a copy of the array, which is
expensive for large arrays. When no copy is made, then the array must
not be modified before `adios_perform_puts!` is called.
The variable is not written until `adios_perform_puts!` is called and
the file is flushed or closed.
"""
function adios_put!(file::AdiosFile, name::AbstractString,
array::AbstractArray{<:AdiosType}; make_copy::Bool=false)
# 0-dimensional arrays need to be passed as scalars
ndims(array) == 0 &&
return adios_put!(file, name, make_copy ? copy(array)[] : array[])
var = define_variable(file.io, name, array)
put!(file.engine, var, make_copy ? copy(array) : array)
return var
end
export adios_perform_puts!
"""
adios_perform_puts!(file::AdiosFile)
Execute all scheduled `adios_put!` operations.
The data might not be in the file yet; they might be buffered. Call
`adios_flush` or `adios_close` to ensure all data are written to file.
"""
function adios_perform_puts!(file::AdiosFile)
perform_puts!(file.engine)
return nothing
end
export adios_all_variable_names
"""
vars = adios_all_variable_names(file::AdiosFile)
vars::Vector{String}
List (recursively) all variables in the file.
"""
function adios_all_variable_names(file::AdiosFile)
vars = inquire_all_variables(file.io)
vars β‘ nothing && return String[]
return name.(vars)
end
export adios_group_variable_names
"""
vars = adios_group_variable_names(file::AdiosFile, groupname::AbstractString)
vars::Vector{String}
List (non-recursively) all variables in the group `groupname` in the
file.
"""
function adios_group_variable_names(file::AdiosFile, groupname::AbstractString)
vars = inquire_group_variables(file.io, groupname)
vars β‘ nothing && return String[]
return name.(vars)
end
export IORef
"""
mutable struct IORef{T,D}
A reference to the value of a variable that has been scheduled to be
read from disk. This value cannot be accessed bofre the read
operations have actually been executed.
Use `fetch(ioref::IORef)` to access the value. `fetch` will trigger
the actual reading from file if necessary. It is most efficient to
schedule multiple read operations at once.
Use `adios_perform_gets` to trigger reading all currently scheduled
variables.
"""
mutable struct IORef{T,D}
engine::Union{Nothing,Engine}
array::Array{T,D}
function IORef{T,D}(engine::Engine, array::Array{T,D}) where {T,D}
return new{T,D}(engine, array)
end
end
"""
isready(ioref::IORef)::Bool
Check whether an `IORef` has already been read from file.
"""
Base.isready(ioref::IORef) = ioref.engine β‘ nothing
"""
value = fetch(ioref::IORef{T,D}) where {T,D}
value::Array{T,D}
Access an `IORef`. If necessary, the variable is read from file and
then cached. (Each `IORef` is read at most once.)
Scalars are handled as zero-dimensional arrays. To access the value of
a zero-dimensional array, write `array[]` (i.e. use array indexing,
but without any indices).
"""
function Base.fetch(ioref::IORef)
isready(ioref) || perform_gets(ioref.engine)
@assert isready(ioref)
# return 0-D arrays as scalars
# D == 0 && return ioref.array[]
return ioref.array
end
export adios_get
"""
ioref = adios_get(file::AdiosFile, name::AbstractString)
ioref::Union{Nothing,IORef}
Schedule reading a variable from a file.
The variable is not read until `adios_perform_gets` is called. This
happens automatically when the `IORef` is accessed (via `fetch`). It
is most efficient to first schedule multiple variables for reading,
and then executing the reads together.
"""
function adios_get(file::AdiosFile, name::AbstractString)
var = inquire_variable(file.io, name)
var β‘ nothing && return nothing
T = type(var)
T β‘ nothing && return nothing
D = ndims(var)
D β‘ nothing && return nothing
sh = count(var)
sh β‘ nothing && return nothing
ioref = IORef{T,D}(file.engine, Array{T,D}(undef, Tuple(sh)))
get(file.engine, var, ioref.array)
push!(file.engine.get_tasks, () -> (ioref.engine = nothing))
return ioref
end
export adios_perform_gets
"""
adios_perform_gets(file::AdiosFile)
Execute all currently scheduled read opertions. This makes all pending
`IORef`s ready.
"""
function adios_perform_gets(file::AdiosFile)
perform_gets(file.engine)
return nothing
end
| ADIOS2 | https://github.com/eschnett/ADIOS2.jl.git |
|
[
"MIT"
] | 1.2.1 | d0675b5245d2b15fa971ef0dc03c7dfe2cf6ffe3 | code | 16231 | # IO functions
export AIO
"""
struct AIO
Holds a C pointer `adios2_io *`.
"""
struct AIO
ptr::Ptr{Cvoid}
adios::Adios
AIO(ptr::Ptr{Cvoid}, adios::Adios) = new(ptr, adios)
end
function Base.show(io::IO, ::MIME"text/plain", aio::AIO)
return print(io, "AIO(0x$(string(UInt(aio.ptr); base=16)))")
end
export set_engine
"""
err = set_engine(io::AIO, engine_type::AbstractString)
err::Error
Set the engine type for current io handler.
# Arguments
- `io`: handler
- `engine_type`: predefined engine type, default is bpfile
"""
function set_engine(io::AIO, engine_type::AbstractString)
err = ccall((:adios2_set_engine, libadios2_c), Cint, (Ptr{Cvoid}, Cstring),
io.ptr, engine_type)
return Error(err)
end
export define_variable
"""
variable = define_variable(io::AIO, name::AbstractString, type::Type,
shape::Union{Nothing,NTuple{N,Int} where N,CartesianIndex}=nothing,
start::Union{Nothing,NTuple{N,Int} where N,CartesianIndex}=nothing,
count::Union{Nothing,NTuple{N,Int} where N,CartesianIndex}=nothing,
constant_dims::Bool=false)
variable::Union{Nothing,Variable}
Define a variable within `io`.
# Arguments
- `io`: handler that owns the variable
- `name`: unique variable identifier
- `type`: primitive type
- `ndims`: number of dimensions
- `shape`: global dimension
- `start`: local offset
- `count`: local dimension
- `constant_dims`: `true`: shape, start, count won't change; `false`:
shape, start, count will change after definition
"""
function define_variable(io::AIO, name::AbstractString, type::Type,
shape::LocalValue)
ptr = ccall((:adios2_define_variable, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cint, Csize_t, Ptr{Csize_t}, Ptr{Csize_t},
Ptr{Csize_t}, Cint), io.ptr, name, adios_type(type), 1,
Ref(local_value_dim), C_NULL, C_NULL, true)
return ptr == C_NULL ? nothing : Variable(ptr, io.adios)
end
function define_variable(io::AIO, name::AbstractString, type::Type,
shape::Union{Nothing,NTuple{N,Int} where N,
CartesianIndex}=nothing,
start::Union{Nothing,NTuple{N,Int} where N,
CartesianIndex}=nothing,
count::Union{Nothing,NTuple{N,Int} where N,
CartesianIndex}=nothing;
constant_dims::Bool=false)
ndims = max(length(maybe(shape, ())), length(maybe(start, ())),
length(maybe(count, ())))
@assert all(x -> x β‘ nothing || length(x) == ndims, [shape, start, count])
ptr = ccall((:adios2_define_variable, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cint, Csize_t, Ptr{Csize_t}, Ptr{Csize_t},
Ptr{Csize_t}, Cint), io.ptr, name, adios_type(type), ndims,
shape β‘ nothing ? C_NULL : Csize_t[reverse(Tuple(shape))...],
start β‘ nothing ? C_NULL : Csize_t[reverse(Tuple(start))...],
count β‘ nothing ? C_NULL : Csize_t[reverse(Tuple(count))...],
constant_dims)
return ptr == C_NULL ? nothing : Variable(ptr, io.adios)
end
function define_variable(io::AIO, name::AbstractString, var::AdiosType)
return define_variable(io, name, typeof(var), LocalValue())
end
function define_variable(io::AIO, name::AbstractString,
arr::AbstractArray{<:AdiosType})
return define_variable(io, name, eltype(arr), nothing, nothing, size(arr);
constant_dims=true)
end
export inquire_variable
"""
variable = inquire_variable(io::AIO, name::AbstractString)
variable::Union{Nothing,Variable}
Retrieve a variable handler within current `io` handler.
# Arguments
- `io`: handler to variable `io` owner
- `name`: unique variable identifier within `io` handler
"""
function inquire_variable(io::AIO, name::AbstractString)
ptr = ccall((:adios2_inquire_variable, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring), io.ptr, name)
return ptr == C_NULL ? nothing : Variable(ptr, io.adios)
end
export inquire_all_variables
"""
variables = inquire_all_variables(io::AIO)
variables::Union{Nothing,Vector{Variable}}
Returns an array of variable handlers for all variable present in the
`io` group.
# Arguments
- `io`: handler to variables io owner
"""
function inquire_all_variables(io::AIO)
c_variables = Ref{Ptr{Ptr{Cvoid}}}()
size = Ref{Csize_t}()
err = ccall((:adios2_inquire_all_variables, libadios2_c), Cint,
(Ref{Ptr{Ptr{Cvoid}}}, Ref{Csize_t}, Ptr{Cvoid}), c_variables,
size, io.ptr)
Error(err) β error_none && return nothing
variables = Array{Variable}(undef, size[])
for n in 1:length(variables)
ptr = unsafe_load(c_variables[], n)
@assert ptr β C_NULL
variables[n] = Variable(ptr, io.adios)
end
free(c_variables[])
return variables
end
export inquire_group_variables
"""
vars = inquire_group_variables(io::AIO, full_prefix::AbstractString)
vars::Vector{String}
List all variables in the group `full_prefix`.
"""
function inquire_group_variables(io::AIO, full_prefix::AbstractString)
c_variables = Ref{Ptr{Ptr{Cvoid}}}(C_NULL)
size = Ref{Csize_t}()
err = ccall((:adios2_inquire_group_variables, libadios2_c), Cint,
(Ref{Ptr{Ptr{Cvoid}}}, Cstring, Ref{Csize_t}, Ptr{Cvoid}),
c_variables, full_prefix, size, io.ptr)
Error(err) β error_none && return nothing
variables = Array{Variable}(undef, size[])
for n in 1:length(variables)
ptr = unsafe_load(c_variables[], n)
@assert ptr β C_NULL
variables[n] = Variable(ptr, io.adios)
end
free(c_variables[])
return variables
end
export define_attribute
"""
attribute = define_attribute(io::AIO, name::AbstractString, value)
attribute::Union{Nothing,Attribute}
Define an attribute value inside `io`.
"""
function define_attribute(io::AIO, name::AbstractString, value::AdiosType)
T = typeof(value)
if T <: AbstractString
ptr = ccall((:adios2_define_attribute, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cint, Cstring), io.ptr, name,
adios_type(T), value)
else
ptr = ccall((:adios2_define_attribute, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cint, Ptr{Cvoid}), io.ptr, name,
adios_type(T), Ref(value))
end
return ptr == C_NULL ? nothing : Attribute(ptr, io.adios)
end
export define_attribute_array
"""
attribute = define_attribute_array(io::AIO, name::AbstractString,
values::AbstractVector)
attribute::Union{Nothing,Attribute}
Define an attribute array inside `io`.
"""
function define_attribute_array(io::AIO, name::AbstractString,
values::AbstractVector{<:AdiosType})
T = eltype(values)
if T <: AbstractString
cvalues = pointer.(values)
ptr = ccall((:adios2_define_attribute_array, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cint, Ptr{Ptr{Cchar}}, Csize_t),
io.ptr, name, adios_type(T), cvalues, length(values))
# Use `values` again to ensure it is not GCed too early
cvalues = pointer.(values)
else
ptr = ccall((:adios2_define_attribute_array, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cint, Ptr{Cvoid}, Csize_t), io.ptr,
name, adios_type(T), values, length(values))
end
return ptr == C_NULL ? nothing : Attribute(ptr, io.adios)
end
export define_variable_attribute
"""
attribute = define_variable_attribute(io::AIO, name::AbstractString, value,
variable_name::AbstractString,
separator::AbstractString="/")
attribute::Union{Nothing,Attribute}
Define an attribute single value associated to an existing variable by
its name.
"""
function define_variable_attribute(io::AIO, name::AbstractString,
value::AdiosType,
variable_name::AbstractString,
separator::AbstractString="/")
T = typeof(value)
if T <: AbstractString
ptr = ccall((:adios2_define_variable_attribute, libadios2_c),
Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cint, Cstring, Cstring, Cstring),
io.ptr, name, adios_type(T), value, variable_name,
separator)
else
ptr = ccall((:adios2_define_variable_attribute, libadios2_c),
Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cint, Ptr{Cvoid}, Cstring, Cstring),
io.ptr, name, adios_type(T), Ref(value), variable_name,
separator)
end
return ptr == C_NULL ? nothing : Attribute(ptr, io.adios)
end
export define_variable_attribute_array
"""
attribute = define_variable_attribute_array(io::AIO, name::AbstractString,
values::AbstractVector,
variable_name::AbstractString,
separator::AbstractString="/")
attribute::Union{Nothing,Attribute}
Define an attribute array associated to an existing variable by its
name.
"""
function define_variable_attribute_array(io::AIO, name::AbstractString,
values::AbstractVector{<:AdiosType},
variable_name::AbstractString,
separator::AbstractString="/")
T = eltype(values)
if T <: AbstractString
cvalues = pointer.(values)
ptr = ccall((:adios2_define_variable_attribute_array, libadios2_c),
Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cint, Ptr{Ptr{Cchar}}, Csize_t,
Cstring, Cstring), io.ptr, name, adios_type(T), cvalues,
length(values), variable_name, separator)
# Use `values` again to ensure it is not GCed too early
cvalues = pointer.(values)
else
ptr = ccall((:adios2_define_variable_attribute_array, libadios2_c),
Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cint, Ptr{Cvoid}, Csize_t, Cstring,
Cstring), io.ptr, name, adios_type(T), values,
length(values), variable_name, separator)
end
return ptr == C_NULL ? nothing : Attribute(ptr, io.adios)
end
export inquire_attribute
"""
attribute = inquire_attribute(io::AIO, name::AbstractString)
attribute::Union{Nothing,Attribute}
Return a handler to a previously defined attribute by name.
"""
function inquire_attribute(io::AIO, name::AbstractString)
ptr = ccall((:adios2_inquire_attribute, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring), io.ptr, name)
return ptr == C_NULL ? nothing : Attribute(ptr, io.adios)
end
export inquire_variable_attribute
"""
attribute = inquire_variable_attribute(io::AIO, name::AbstractString,
variable_name::AbstractString,
separator::AbstractString="/")
attribute::Union{Nothing,Attribute}
Return a handler to a previously defined attribute by name.
"""
function inquire_variable_attribute(io::AIO, name::AbstractString,
variable_name::AbstractString,
separator::AbstractString="/")
ptr = ccall((:adios2_inquire_variable_attribute, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cstring, Cstring), io.ptr, name,
variable_name, separator)
return ptr == C_NULL ? nothing : Attribute(ptr, io.adios)
end
export inquire_all_attributes
"""
attributes = inquire_all_attributes(io::AIO)
attributes::Union{Nothing,Vector{Attribute}}
Return an array of attribute handlers for all attribute present in the
io group.
"""
function inquire_all_attributes(io::AIO)
c_attributes = Ref{Ptr{Ptr{Cvoid}}}()
size = Ref{Csize_t}()
err = ccall((:adios2_inquire_all_attributes, libadios2_c), Cint,
(Ref{Ptr{Ptr{Cvoid}}}, Ref{Csize_t}, Ptr{Cvoid}), c_attributes,
size, io.ptr)
Error(err) β error_none && return nothing
attributes = Array{Attribute}(undef, size[])
for n in 1:length(attributes)
ptr = unsafe_load(c_attributes[], n)
@assert ptr β C_NULL
attributes[n] = Attribute(ptr, io.adios)
end
free(c_attributes[])
return attributes
end
export inquire_group_attributes
"""
vars = inquire_group_attributes(io::AIO, full_prefix::AbstractString)
vars::Vector{String}
List all attributes in the group `full_prefix`.
"""
function inquire_group_attributes(io::AIO, full_prefix::AbstractString)
c_attributes = Ref{Ptr{Ptr{Cvoid}}}(C_NULL)
size = Ref{Csize_t}()
err = ccall((:adios2_inquire_group_attributes, libadios2_c), Cint,
(Ref{Ptr{Ptr{Cvoid}}}, Cstring, Ref{Csize_t}, Ptr{Cvoid}),
c_attributes, full_prefix, size, io.ptr)
Error(err) β error_none && return nothing
attributes = Array{Attribute}(undef, size[])
for n in 1:length(attributes)
ptr = unsafe_load(c_attributes[], n)
@assert ptr β C_NULL
attributes[n] = Attribute(ptr, io.adios)
end
free(c_attributes[])
return attributes
end
export inquire_subgroups
"""
groups = inquire_subgroups(io::AIO, full_prefix::AbstractString)
groups::Vector{String}
List all subgroups in the group `full_prefix`.
"""
function inquire_subgroups(io::AIO, full_prefix::AbstractString)
c_subgroups = Ref{Ptr{Ptr{Cchar}}}()
size = Ref{Csize_t}()
err = ccall((:adios2_inquire_subgroups, libadios2_c), Cint,
(Ref{Ptr{Ptr{Cchar}}}, Cstring, Ref{Csize_t}, Ptr{Cvoid}),
c_subgroups, full_prefix, size, io.ptr)
Error(err) β error_none && return nothing
subgroups = Array{String}(undef, size[])
for n in 1:length(subgroups)
ptr = unsafe_load(c_subgroups[], n)
@assert ptr β C_NULL
subgroups[n] = unsafe_string(ptr)
free(ptr)
end
free(c_subgroups[])
return subgroups
end
"""
engine = open(io::AIO, name::AbstractString, mode::Mode)
engine::Union{Nothing,Engine}
Open an Engine to start heavy-weight input/output operations.
In MPI version reuses the communicator from [`adios_init_mpi`](@ref).
MPI Collective function as it calls `MPI_Comm_dup`.
# Arguments
- `io`: engine owner
- `name`: unique engine identifier
- `mode`: `mode_write`, `mode_read`, `mode_append` (not yet supported)
"""
function Base.open(io::AIO, name::AbstractString, mode::Mode)
ptr = ccall((:adios2_open, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring, Cint), io.ptr, name, mode)
return ptr == C_NULL ? nothing : Engine(ptr, io.adios)
end
export engine_type
"""
type = engine_type(io::AIO)
type::Union{Nothing,String}
Return engine type string. See [`set_engine`](@ref).
"""
function engine_type(io::AIO)
size = Ref{Csize_t}()
err = ccall((:adios2_engine_type, libadios2_c), Cint,
(Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), C_NULL, size, io.ptr)
Error(err) β error_none && return nothing
type = '\0'^size[]
err = ccall((:adios2_engine_type, libadios2_c), Cint,
(Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), type, size, io.ptr)
Error(err) β error_none && return nothing
return type
end
export get_engine
"""
engine = get_engine(io::AIO, name::AbstractString)
engine::Union{Nothing,Engine}
"""
function get_engine(io::AIO, name::AbstractString)
ptr = ccall((:adios2_get_engin, libadios2_c), Ptr{Cvoid},
(Ptr{Cvoid}, Cstring), io.ptr, name)
return ptr == C_NULL ? nothing : Engine(ptr, io.adios)
end
| ADIOS2 | https://github.com/eschnett/ADIOS2.jl.git |
|
[
"MIT"
] | 1.2.1 | d0675b5245d2b15fa971ef0dc03c7dfe2cf6ffe3 | code | 5225 | # Types
export Error
export error_none, error_invalid_argument, error_system_error,
error_runtime_error, error_exception
"""
@enum Error begin
error_none
error_invalid_argument
error_system_error
error_runtime_error
error_exception
end
`Error` return types for all ADIOS2 functions
Based on the [library C++ standardized
exceptions](https://en.cppreference.com/w/cpp/error/exception). Each
error will issue a more detailed description in the standard error
output, stderr
"""
@enum Error begin
error_none = 0
error_invalid_argument = 1
error_system_error = 2
error_runtime_error = 3
error_exception = 4
end
@enum AType begin
type_unknown = -1
type_string = 0
type_float = 1
type_double = 2
type_float_complex = 3
type_double_complex = 4
type_int8_t = 5
type_int16_t = 6
type_int32_t = 7
type_int64_t = 8
type_uint8_t = 9
type_uint16_t = 10
type_uint32_t = 11
type_uint64_t = 12
type_long_double = 13
end
# We omit `type_long_double` that we cannot handle
const adios_types = AType[type_string, type_float, type_double,
type_float_complex, type_double_complex, type_int8_t,
type_int16_t, type_int32_t, type_int64_t,
type_uint8_t, type_uint16_t, type_uint32_t,
type_uint64_t]
adios_type(::Type{String}) = type_string
adios_type(::Type{Float32}) = type_float
adios_type(::Type{Float64}) = type_double
adios_type(::Type{Complex{Float32}}) = type_float_complex
adios_type(::Type{Complex{Float64}}) = type_double_complex
adios_type(::Type{Int8}) = type_int8_t
adios_type(::Type{Int16}) = type_int16_t
adios_type(::Type{Int32}) = type_int32_t
adios_type(::Type{Int64}) = type_int64_t
adios_type(::Type{UInt8}) = type_uint8_t
adios_type(::Type{UInt16}) = type_uint16_t
adios_type(::Type{UInt32}) = type_uint32_t
adios_type(::Type{UInt64}) = type_uint64_t
const julia_types = Type[String, Float32, Float64, Complex{Float32},
Complex{Float64}, Int8, Int16, Int32, Int64, UInt8,
UInt16, UInt32, UInt64]
julia_type(type::AType) = julia_types[Int(type) + 1]
export AdiosType
"""
const AdiosType = Union{AbstractString,
Float32,Float64,
Complex{Float32},Complex{Float64},
Int8,Int16,Int32,Int64,
UInt8,UInt16,UInt32,UInt64}
A Union of all scalar types supported in ADIOS files.
"""
const AdiosType = Union{AbstractString,Float32,Float64,Complex{Float32},
Complex{Float64},Int8,Int16,Int32,Int64,UInt8,UInt16,
UInt32,UInt64}
export Mode
export mode_undefined, mode_write, mode_read, mode_append,
mode_readRandomAccess, mode_deferred, mode_sync
"""
@enum Mode begin
mode_undefined
mode_write
mode_read
mode_append
mode_readRandomAccess
mode_deferred
mode_sync
end
Mode specifies for various functions. `write`, `read`, `append`, and
`readRandomAccess` are used for file operations. `deferred` and `sync` are used
for get and put operations.
"""
@enum Mode begin
mode_undefined = 0
mode_write = 1
mode_read = 2
mode_append = 3
mode_readRandomAccess = 6 # ADIOS2 2.9
mode_deferred = 4
mode_sync = 5
end
export StepMode
export step_mode_append, step_mode_update, step_mode_read
"""
@enum StepMode begin
step_mode_append
step_mode_update
step_mode_read
end
"""
@enum StepMode begin
step_mode_append = 0
step_mode_update = 1
step_mode_read = 2
end
export StepStatus
export step_status_other_error, step_status_ok, step_status_not_ready,
step_status_end_of_stream
"""
@enum StepStatus begin
step_status_other_error
step_status_ok
step_status_not_ready
step_status_end_of_stream
end
"""
@enum StepStatus begin
step_status_other_error = -1
step_status_ok = 0
step_status_not_ready = 1
step_status_end_of_stream = 2
end
export ShapeId
export shapeid_unknown, shapeid_global_value, shapeid_global_array,
shapeid_joined_array, shapeid_local_value, shapeid_local_array
"""
@enum ShapeId begin
shapeid_unknown
shapeid_global_value
shapeid_global_array
shapeid_joined_array
shapeid_local_value
shapeid_local_array
end
"""
@enum ShapeId begin
shapeid_unknown = -1
shapeid_global_value = 0
shapeid_global_array = 1
shapeid_joined_array = 2
shapeid_local_value = 3
shapeid_local_array = 4
end
const shapeid_strings = String["shapeid_unknown", "shapeid_global_value",
"shapeid_global_array", "shapeid_joined_array",
"shapeid_local_value", "shapeid_local_array"]
shapeid_string(shapeid::ShapeId) = shapeid_strings[Int(shapeid) + 2]
Base.show(io::IO, shapeid::ShapeId) = print(io, shapeid_string(shapeid))
const string_array_element_max_size = 4096
const local_value_dim = typemax(Csize_t) - 2
export LocalValue
struct LocalValue end
| ADIOS2 | https://github.com/eschnett/ADIOS2.jl.git |
|
[
"MIT"
] | 1.2.1 | d0675b5245d2b15fa971ef0dc03c7dfe2cf6ffe3 | code | 9582 | # Variable functions
export Variable
"""
struct Variable
Holds a C pointer `adios2_variable *`.
"""
struct Variable
ptr::Ptr{Cvoid}
adios::Adios
Variable(ptr::Ptr{Cvoid}, adios::Adios) = new(ptr, adios)
end
function Base.show(io::IO, variable::Variable)
nm = name(variable)
T = type(variable)
sid = shapeid(variable)
sh = shape(variable)
print(io, "Variable(name=$nm,type=$T,shapeid=$sid,shape=$sh)")
return
end
function Base.show(io::IO, ::MIME"text/plain", variable::Variable)
nm = name(variable)
print(io, "Variable($nm)")
return
end
export set_block_selection
"""
set_block_selection(variable::Variable, block_id::Int)
"""
function set_block_selection(variable::Variable, block_id::Int)
err = ccall((:adios2_set_block_selection, libadios2_c), Cint,
(Ptr{Cvoid}, Csize_t), variable.ptr, block_id)
Error(err) β error_none && return nothing
return ()
end
export set_selection
"""
set_selection(variable::Variable, start::Union{Nothing,NTuple{N,Int} where N,
CartesianIndex}, count::Union{Nothing,NTuple{N,Int} where N,
CartesianIndex})
"""
function set_selection(variable::Variable,
start::Union{Nothing,NTuple{N,Int} where N,
CartesianIndex}=nothing,
count::Union{Nothing,NTuple{N,Int} where N,
CartesianIndex}=nothing)
D = ndims(variable)
err = ccall((:adios2_set_selection, libadios2_c), Cint,
(Ptr{Cvoid}, Csize_t, Ptr{Csize_t}, Ptr{Csize_t}), variable.ptr,
D,
start β‘ nothing ? C_NULL : Csize_t[reverse(Tuple(start))...],
count β‘ nothing ? C_NULL : Csize_t[reverse(Tuple(count))...])
Error(err) β error_none && return nothing
end
export set_step_selection
"""
set_step_selection(variable::Variable, step_start::Int, step_count::Int)
Only works with file-based engines, not streaming engines (e.g. does not work with SST)
"""
function set_step_selection(variable::Variable, step_start::Int,
step_count::Int)
err = ccall((:adios2_set_step_selection, libadios2_c), Cint,
(Ptr{Cvoid}, Csize_t, Csize_t), variable.ptr, step_start,
step_count)
Error(err) β error_none && return nothing
return ()
end
export name
"""
var_name = name(variable::Variable)
var_name::Union{Nothing,String}
Retrieve variable name.
"""
function name(variable::Variable)
size = Ref{Csize_t}()
err = ccall((:adios2_variable_name, libadios2_c), Cint,
(Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), C_NULL, size,
variable.ptr)
Error(err) β error_none && return nothing
name = Array{Cchar}(undef, size[])
err = ccall((:adios2_variable_name, libadios2_c), Cint,
(Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), name, size,
variable.ptr)
Error(err) β error_none && return nothing
return unsafe_string(pointer(name), size[])
end
export type
"""
var_type = type(variable::Variable)
var_type::Union{Nothing,Type}
Retrieve variable type.
"""
function type(variable::Variable)
type = Ref{Cint}()
err = ccall((:adios2_variable_type, libadios2_c), Cint,
(Ref{Cint}, Ptr{Cvoid}), type, variable.ptr)
Error(err) β error_none && return nothing
return julia_type(AType(type[]))
end
# export type_string
# """
# type_string = variable_type_string(variable::Variable)
# type_string::Union{Nothing,String}
#
# Retrieve variable type in string form "char", "unsigned long", etc.
# This reports C type names.
# """
# function type_string(variable::Variable)
# size = Ref{Csize_t}()
# err = ccall((:adios2_variable_type_string, libadios2_c), Cint,
# (Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), C_NULL, size,
# variable.ptr)
# Error(err) β error_none && return nothing
# type = '\0'^size[]
# err = ccall((:adios2_variable_type_string, libadios2_c), Cint,
# (Ptr{Cchar}, Ref{Csize_t}, Ptr{Cvoid}), name, size,
# variable.ptr)
# Error(err) β error_none && return nothing
# return type
# end
export shapeid
"""
var_shapeid = shapeid(variable::Variable)
var_shapeid::Union{Nothing,ShapeId}
Retrieve variable shapeid.
"""
function shapeid(variable::Variable)
shapeid = Ref{Cint}()
err = ccall((:adios2_variable_shapeid, libadios2_c), Cint,
(Ref{Cint}, Ptr{Cvoid}), shapeid, variable.ptr)
Error(err) β error_none && return nothing
return ShapeId(shapeid[])
end
"""
var_ndims = ndims(variable::Variable)
var_ndims::Union{Nothing,Int}
Retrieve current variable number of dimensions.
"""
function Base.ndims(variable::Variable)
var_shapeid = shapeid(variable)
var_shapeid β‘ nothing && return nothing
var_shapeid == shapeid_local_value && return 0
ndims = Ref{Csize_t}()
err = ccall((:adios2_variable_ndims, libadios2_c), Cint,
(Ref{Csize_t}, Ptr{Cvoid}), ndims, variable.ptr)
Error(err) β error_none && return nothing
return Int(ndims[])
end
export shape
"""
var_shape = shape(variable::Variable)
var_shape::Union{Nothing,NTuple{N,Int} where N}
Retrieve current variable shape.
"""
function shape(variable::Variable)
var_shapeid = shapeid(variable)
var_shapeid β‘ nothing && return nothing
var_shapeid β (shapeid_local_value, shapeid_local_array) && return nothing
D = ndims(variable)
shape = Array{Csize_t}(undef, D)
err = ccall((:adios2_variable_shape, libadios2_c), Cint,
(Ptr{Csize_t}, Ptr{Cvoid}), shape, variable.ptr)
Error(err) β error_none && return nothing
return Tuple(reverse!(shape))
end
export start
"""
var_start = start(variable::Variable)
var_start::Union{Nothing,NTuple{N,Int} where N}
Retrieve current variable start.
"""
function start(variable::Variable)
var_shapeid = shapeid(variable)
var_shapeid β‘ nothing && return nothing
var_shapeid β (shapeid_local_value, shapeid_local_array) && return nothing
D = ndims(variable)
start = Array{Csize_t}(undef, D)
err = ccall((:adios2_variable_start, libadios2_c), Cint,
(Ptr{Csize_t}, Ptr{Cvoid}), start, variable.ptr)
Error(err) β error_none && return nothing
return Tuple(reverse!(start))
end
"""
var_count = count(variable::Variable)
var_count::Union{Nothing,NTuple{N,Int} where N}
Retrieve current variable count.
"""
function Base.count(variable::Variable)
var_shapeid = shapeid(variable)
var_shapeid β‘ nothing && return nothing
var_shapeid == shapeid_local_value && return ()
D = ndims(variable)
count = Array{Csize_t}(undef, D)
err = ccall((:adios2_variable_count, libadios2_c), Cint,
(Ptr{Csize_t}, Ptr{Cvoid}), count, variable.ptr)
Error(err) β error_none && return nothing
return Tuple(reverse!(count))
end
export steps_start
"""
var_steps_start = steps_start(variable::Variable)
var_steps_start::Union{Nothing,Int}
Read API, get available steps start from available steps count (e.g.
in a file for a variable).
This returns the absolute first available step, don't use with
`adios2_set_step_selection` as inputs are relative, use `0` instead.
"""
function steps_start(variable::Variable)
steps_start = Ref{Csize_t}()
err = ccall((:adios2_variable_steps_start, libadios2_c), Cint,
(Ref{Csize_t}, Ptr{Cvoid}), steps_start, variable.ptr)
Error(err) β error_none && return nothing
return Int(steps_start[])
end
export steps
"""
var_steps = steps(variable::Variable)
var_steps::Union{Nothing,Int}
Read API, get available steps count from available steps count (e.g.
in a file for a variable). Not necessarily contiguous.
"""
function steps(variable::Variable)
steps = Ref{Csize_t}()
err = ccall((:adios2_variable_steps, libadios2_c), Cint,
(Ref{Csize_t}, Ptr{Cvoid}), steps, variable.ptr)
Error(err) β error_none && return nothing
return Int(steps[])
end
export selection_size
"""
var_selection_size = selection_size(variable::Variable)
var_selection_size::Union{Nothing,Int}
Return the minimum required allocation (in number of elements of a
certain type, not bytes) for the current selection.
"""
function selection_size(variable::Variable)
selection_size = Ref{Csize_t}()
err = ccall((:adios2_selection_size, libadios2_c), Cint,
(Ref{Csize_t}, Ptr{Cvoid}), selection_size, variable.ptr)
Error(err) β error_none && return nothing
return Int(selection_size[])
end
"""
var_min = minimum(variable::Variable)
var_min::Union{Nothing,T}
Read mode only: return the absolute minimum for variable.
"""
function Base.minimum(variable::Variable)
T = type(variable)
T β‘ nothing && return nothing
varmin = Ref{T}()
err = ccall((:adios2_variable_min, libadios2_c), Cint,
(Ptr{Cvoid}, Ptr{Cvoid}), varmin, variable.ptr)
Error(err) β error_none && return nothing
return varmin[]
end
"""
var_max = maximum(variable::Variable)
var_max::Union{Nothing,T}
Read mode only: return the absolute maximum for variable.
"""
function Base.maximum(variable::Variable)
T = type(variable)
T β‘ nothing && return nothing
varmax = Ref{T}()
err = ccall((:adios2_variable_max, libadios2_c), Cint,
(Ptr{Cvoid}, Ptr{Cvoid}), varmax, variable.ptr)
Error(err) β error_none && return nothing
return varmax[]
end
| ADIOS2 | https://github.com/eschnett/ADIOS2.jl.git |
|
[
"MIT"
] | 1.2.1 | d0675b5245d2b15fa971ef0dc03c7dfe2cf6ffe3 | code | 20661 | # Test basic API
const rankstr = @sprintf "%06d" comm_rank
if comm_rank == comm_root
const dirname = Filesystem.mktempdir()
end
if use_mpi
if comm_rank == comm_root
MPI.bcast(dirname, comm_root, comm)
else
const dirname = MPI.bcast(nothing, comm_root, comm)
end
end
const filename = "$dirname/test.bp"
# "BP3", "BP4", "BP5", "HDF5", "SST", "SSC", "DataMan", "Inline", "Null"
const ENGINE_TYPE = "BP4"
@testset "File write tests" begin
# Set up ADIOS
if use_mpi
adios = adios_init_mpi(comm)
else
adios = adios_init_serial()
end
@test adios isa Adios
@test match(r"Adios\(.+\)", string(adios)) β’ nothing
@test match(r"Adios\(0x[0-9a-f]+\)", showmime(adios)) β’ nothing
io = declare_io(adios, "IO")
@test io isa AIO
@test match(r"AIO\(.+\)", string(io)) β’ nothing
@test match(r"AIO\(0x[0-9a-f]+\)", showmime(io)) β’ nothing
# Define some variables
variables = Dict()
for T in
Type[String, Float32, Float64, Complex{Float32}, Complex{Float64}, Int8,
Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64]
val = T β‘ String ? "42" : T(42)
val::T
# Global value
nm = "gvalue.p$rankstr.$T"
gval = define_variable(io, nm, T)
@test gval isa Variable
@test match(r"Variable\(name=.+,type=.+,shapeid=.+,shape=.+\)",
string(gval)) β’ nothing
@test match(r"Variable\(.+\)", showmime(gval)) β’ nothing
variables[(shapeid_global_value, -1, -1, T)] = (nm, gval)
# Local value
nm = "lvalue.p$rankstr.$T"
lval = define_variable(io, nm, val)
@test lval isa Variable
@test match(r"Variable\(name=.+,type=.+,shapeid=.+,shape=.+\)",
string(lval)) β’ nothing
@test match(r"Variable\(.+\)", showmime(lval)) β’ nothing
variables[(shapeid_local_value, -1, -1, T)] = (nm, lval)
# String arrays are not supported
if T β’ String
for D in 1:3, len in 0:2
# size
sz = ntuple(d -> len == 0 ? 0 : len == 1 ? 1 : d, D)
arr = fill(val, sz)
# shape, start, count of global array
cs = ntuple(d -> d < D ? 1 : comm_size, D)
cr = ntuple(d -> d < D ? 0 : comm_rank, D)
sh = cs .* sz
st = cr .* sz
co = sz
# Global array
nm = "garray.$T.$D.$len"
garr = define_variable(io, nm, T, sh, st, co)
@test garr isa Variable
@test match(r"Variable\(name=.+,type=.+,shapeid=.+,shape=.+\)",
string(garr)) β’ nothing
@test match(r"Variable\(.+\)", showmime(garr)) β’ nothing
variables[(shapeid_global_array, D, len, T)] = (nm, garr)
# Local array
nm = "larray.p$rankstr.$T.$D.$len"
larr = define_variable(io, nm, arr)
@test larr isa Variable
@test match(r"Variable\(name=.+,type=.+,shapeid=.+,shape=.+\)",
string(larr)) β’ nothing
@test match(r"Variable\(.+\)", showmime(larr)) β’ nothing
variables[(shapeid_local_array, D, len, T)] = (nm, larr)
end
end
end
# Check variables
allvars = inquire_all_variables(io)
@test Set(allvars) == Set([var for (name, var) in values(variables)])
var0 = inquire_variable(io, "not a variable")
@test var0 isa Nothing
for ((si, D, len, T), (nm, var)) in variables
var1 = inquire_variable(io, nm)
@test var1 isa Variable
@test nm == name(var1)
@test type(var1) == T
@test shapeid(var1) == si
if si == shapeid_global_value
@test ndims(var1) == 0
@test shape(var1) == ()
@test start(var1) == ()
@test count(var1) == ()
elseif si == shapeid_local_value
@test ndims(var1) == 0
@test shape(var1) β‘ nothing
@test start(var1) β‘ nothing
@test count(var1) β‘ ()
elseif si == shapeid_global_array
sz = ntuple(d -> len == 0 ? 0 : len == 1 ? 1 : d, D)
cs = ntuple(d -> d < D ? 1 : comm_size, D)
cr = ntuple(d -> d < D ? 0 : comm_rank, D)
sh = cs .* sz
st = cr .* sz
co = sz
@test ndims(var1) == D
@test shape(var1) == sh
@test start(var1) == st
@test count(var1) == co
elseif si == shapeid_local_array
sz = ntuple(d -> len == 0 ? 0 : len == 1 ? 1 : d, D)
@test ndims(var1) == D
@test shape(var1) β‘ nothing
@test start(var1) β‘ nothing
@test count(var1) == sz
else
error("internal error")
end
end
# Define some attributes
attributes = Dict()
for T in Type[String, Float32, Float64,
# Currently broken, see
# <https://github.com/ornladios/ADIOS2/issues/2734>
# Complex{Float32}, Complex{Float64},
Int8, Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64]
val = T β‘ String ? "42" : T(42)
val::T
# Attribute
nm = "value.p$rankstr.$T"
attr = define_attribute(io, nm, val)
@test attr isa Attribute
@test match(r"Attribute\(name=.+,type=.+,is_value=(false|true),size=.+,data=.+\)",
string(attr)) β’ nothing
@test match(r"Attribute\(.+\)", showmime(attr)) β’ nothing
attributes[(-1, -1, "", T)] = (nm, attr)
# Variable attribute
nm = "value.p$rankstr.$T"
varname = variables[(shapeid_global_value, -1, -1, T)][1]
attr = define_variable_attribute(io, nm, val, varname)
@test attr isa Attribute
@test match(r"Attribute\(name=.+,type=.+,is_value=(false|true),size=.+,data=.+\)",
string(attr)) β’ nothing
@test match(r"Attribute\(.+\)", showmime(attr)) β’ nothing
attributes[(-1, -1, varname, T)] = ("$varname/$nm", attr)
# Attribute arrays need to have at least one element (why?)
for len in 1:2:3
vals = (T β‘ String ? String["42", "", "44"] : T[42, 0, 44])[1:len]
# Attribute array
nm = "array.p$rankstr.$T.$len"
arr = define_attribute_array(io, nm, vals)
@test arr isa Attribute
@test match(r"Attribute\(name=.+,type=.+,is_value=(false|true),size=.+,data=.+\)",
string(arr)) β’ nothing
@test match(r"Attribute\(.+\)", showmime(arr)) β’ nothing
attributes[(1, len, "", T)] = (nm, arr)
# Variable attribute array
nm = "array.p$rankstr.$T.$len"
varname = variables[(shapeid_global_value, -1, -1, T)][1]
arr = define_variable_attribute_array(io, nm, vals, varname)
@test match(r"Attribute\(name=.+,type=.+,is_value=(false|true),size=.+,data=.+\)",
string(arr)) β’ nothing
@test match(r"Attribute\(.+\)", showmime(arr)) β’ nothing
@test arr isa Attribute
attributes[(1, len, varname, T)] = ("$varname/$nm", arr)
end
end
# Check attributes
allattrs = inquire_all_attributes(io)
@test Set(allattrs) == Set([attr for (name, attr) in values(attributes)])
attr0 = inquire_attribute(io, "not an attribute")
@test attr0 isa Nothing
attr0 = inquire_variable_attribute(io, "not an attribute", "not a variable")
@test attr0 isa Nothing
for ((D, len, varname, T), (nm, attr)) in attributes
attr1 = inquire_attribute(io, nm)
@test attr1 isa Attribute
@test nm == name(attr1)
@test type(attr1) == T
if D == -1
@test is_value(attr)
@test size(attr) == 1
val = T β‘ String ? "42" : T(42)
@test data(attr) == val
else
@test !is_value(attr)
@test size(attr) == len
vals = (T β‘ String ? String["42", "", "44"] : T[42, 0, 44])[1:len]
@test data(attr) == vals
end
end
err = set_engine(io, ENGINE_TYPE)
@test err β‘ error_none
etype = engine_type(io)
# @test etype == "File"
@test etype == ENGINE_TYPE
# Write the file
engine = open(io, filename, mode_write)
@test engine isa Engine
# Schedule variables for writing
for ((si, D, len, T), (nm, var)) in variables
val = T β‘ String ? "42" : T(42)
if si β (shapeid_global_value, shapeid_local_value)
err = put!(engine, var, val)
@test err β‘ error_none
elseif si β (shapeid_global_array, shapeid_local_array)
sz = ntuple(d -> len == 0 ? 0 : len == 1 ? 1 : d, D)
# Construct a single number from a tuple
ten(i, s) = i == () ? s : ten(i[2:end], 10s + i[1])
# Convert to type `T`
function mkT(T, s)
return (T β‘ String ? "$s" :
T <: Union{AbstractFloat,Complex} ? T(s) : s % T)::T
end
arr = T[mkT(T, ten(Tuple(i), 0)) for i in CartesianIndices(sz)]
arr1 = rand(Bool) ? arr : @view arr[:]
err = put!(engine, var, arr1)
@test err β‘ error_none
else
error("internal error")
end
end
# Write the variables
err = perform_puts!(engine)
@test err β‘ error_none
# Minima and maxima are only available when reading a file
# for ((si, D, T), (nm, var)) in variables
# val = T β‘ String ? "42" : T(42)
# @test minimum(var) == val
# @test maximum(var) == val
# end
err = close(engine)
@test err β‘ error_none
finalize(adios)
end
# Call gc to test finalizing the Adios object
GC.gc(true)
@testset "File read tests" begin
# Set up ADIOS
if use_mpi
adios = adios_init_mpi(comm)
else
adios = adios_init_serial()
end
@test adios isa Adios
io = declare_io(adios, "IO")
@test io isa AIO
# Open the file
if ADIOS2_VERSION < v"2.9.0"
# We need to use `mode_read` for ADIOS2 <2.9, and `mode_readRandomAccess` for ADIOS2 β₯2.9
engine = open(io, filename, mode_read)
else
engine = open(io, filename, mode_readRandomAccess)
end
@test engine isa Engine
err = set_engine(io, ENGINE_TYPE)
@test err β‘ error_none
etype = engine_type(io)
# @test etype == "File"
@test etype == ENGINE_TYPE
# Inquire about all variables
variables = Dict()
for T in
Type[String, Float32, Float64, Complex{Float32}, Complex{Float64}, Int8,
Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64]
# Global value
nm = "gvalue.p$rankstr.$T"
gval = inquire_variable(io, nm)
@test gval isa Variable
variables[(shapeid_global_value, -1, -1, T)] = (nm, gval)
# Local value
nm = "lvalue.p$rankstr.$T"
lval = inquire_variable(io, nm)
@test lval isa Variable
variables[(shapeid_local_value, -1, -1, T)] = (nm, lval)
# String arrays are not supported
if T β’ String
for D in 1:3, len in 0:2
# Global array
nm = "garray.$T.$D.$len"
garr = inquire_variable(io, nm)
@test garr isa Variable
variables[(shapeid_global_array, D, len, T)] = (nm, garr)
# Local array
nm = "larray.p$rankstr.$T.$D.$len"
larr = inquire_variable(io, nm)
@test larr isa Variable
variables[(shapeid_local_array, D, len, T)] = (nm, larr)
end
end
end
# Check variables
allvars = inquire_all_variables(io)
if comm_size == 1
@test Set(allvars) == Set([var for (name, var) in values(variables)])
else
@test Set(allvars) β Set([var for (name, var) in values(variables)])
end
var0 = inquire_variable(io, "not a variable")
@test var0 isa Nothing
for ((si, D, len, T), (nm, var)) in variables
var1 = inquire_variable(io, nm)
@test var1 isa Variable
@test nm == name(var1)
@test type(var1) == T
if si == shapeid_global_value
# Global values are re-interpreted as global arrays
@test shapeid(var1) == si
@test ndims(var1) == 0
@test shape(var1) == ()
@test start(var1) == ()
@test count(var1) == ()
elseif si == shapeid_local_value
# Local values are re-interpreted as global arrays
@test shapeid(var1) == shapeid_global_array
@test ndims(var1) == 1
if ENGINE_TYPE == "BP4"
@test shape(var1) == (1,)
else
@test shape(var1) == (comm_size,)
end
@test start(var1) == (0,)
if ENGINE_TYPE == "BP4"
@test count(var1) == (1,)
else
@test count(var1) == (comm_size,)
end
elseif si == shapeid_global_array
sz = ntuple(d -> len == 0 ? 0 : len == 1 ? 1 : d, D)
cs = ntuple(d -> d < D ? 1 : comm_size, D)
cr = ntuple(d -> d < D ? 0 : comm_rank, D)
sh = cs .* sz
st = cr .* sz
co = sz
if ENGINE_TYPE == "BP4" && len == 0
# Empty global arrays are mis-interpreted as global values
@test shapeid(var1) == shapeid_global_value
@test ndims(var1) == 0
@test shape(var1) == ()
@test start(var1) == ()
@test count(var1) == ()
else
@test shapeid(var1) == si
@test ndims(var1) == D
@test shape(var1) == sh
if comm_size == 1
# With multiple processes, there are multiple
# blocks, and they each have a different starting
# offset
@test start(var1) == st
@test count(var1) == co
else
@test_broken false
end
#TODO "need to select block to access variable"
end
elseif si == shapeid_local_array
sz = ntuple(d -> len == 0 ? 0 : len == 1 ? 1 : d, D)
if ENGINE_TYPE == "BP4" && len == 0
# Empty local arrays are mis-interpreted as global values
@test shapeid(var1) == shapeid_global_value
@test ndims(var1) == 0
@test shape(var1) == ()
@test start(var1) == ()
@test count(var1) == ()
else
@test shapeid(var1) == si
@test ndims(var1) == D
@test shape(var1) β‘ nothing
@test start(var1) β‘ nothing
@test count(var1) == sz
end
else
error("internal error")
end
end
# Read attributes
attributes = Dict()
for T in
Type[String, Float32, Float64, Complex{Float32}, Complex{Float64}, Int8,
Int16, Int32, Int64, UInt8, UInt16, UInt32, UInt64]
# Complex attributes cannot be read via the C API (see
# <https://github.com/ornladios/ADIOS2/issues/2734>)
T <: Complex && continue
# Attribute
nm = "value.p$rankstr.$T"
attr = inquire_attribute(io, nm)
@test attr isa Attribute
attributes[(-1, -1, "", T)] = (nm, attr)
# Variable attribute
nm = "value.p$rankstr.$T"
varname = variables[(shapeid_global_value, -1, -1, T)][1]
attr = inquire_variable_attribute(io, nm, varname)
@test attr isa Attribute
attributes[(-1, -1, varname, T)] = (nm, attr)
# Attribute arrays need to have at least one element (why?)
for len in 1:2:3
# Attribute array
nm = "array.p$rankstr.$T.$len"
arr = inquire_attribute(io, nm)
@test arr isa Attribute
attributes[(1, len, "", T)] = (nm, arr)
# Variable attribute array
nm = "array.p$rankstr.$T.$len"
varname = variables[(shapeid_global_value, -1, -1, T)][1]
arr = inquire_variable_attribute(io, nm, varname)
@test arr isa Attribute
attributes[(1, len, varname, T)] = (nm, arr)
end
end
# Check attributes
allattrs = inquire_all_attributes(io)
if comm_size == 1
@test Set(allattrs) ==
Set([attr for (name, attr) in values(attributes)])
else
@test Set(allattrs) β Set([attr for (name, attr) in values(attributes)])
end
attr0 = inquire_attribute(io, "not an attribute")
@test attr0 isa Nothing
attr0 = inquire_variable_attribute(io, "not an attribute", "not a variable")
@test attr0 isa Nothing
for ((D, len, varname, T), (nm, attr)) in attributes
val = T β‘ String ? "42" : T(42)
val::T
attr1 = inquire_attribute(io, nm)
@test attr1 isa Attribute
@test nm == name(attr1)
@test type(attr1) == T
if D == -1
@test is_value(attr)
@test size(attr) == 1
@test data(attr) == val
else
if ENGINE_TYPE == "BP4" && T β’ String && len == 1
# Length-1 non-string attribute arrays are mis-interpreted as values
@test is_value(attr)
@test size(attr) == len
vals = (T β‘ String ? String["42", "", "44"] : T[42, 0, 44])[1:len]
@test [data(attr)] == vals
else
@test !is_value(attr)
@test size(attr) == len
vals = (T β‘ String ? String["42", "", "44"] : T[42, 0, 44])[1:len]
@test data(attr) == vals
end
end
end
# Schedule variables for reading
buffers = Dict()
for ((si, D, len, T), (nm, var)) in variables
if si β (shapeid_global_value, shapeid_local_value)
# Local values are re-interpreted as global arrays
if ENGINE_TYPE == "BP4" || comm_size == 1
ref = Ref{T}()
else
ref = Array{T}(undef, comm_size)
end
err = get(engine, var, ref)
@test err β‘ error_none
buffers[(si, D, len, T)] = ref
elseif si β (shapeid_global_array, shapeid_local_array)
co = count(var)
arr = Array{T}(undef, co)
arr1 = rand(Bool) ? arr : @view arr[:]
err = get(engine, var, arr1)
@test err β‘ error_none
buffers[(si, D, len, T)] = arr
else
error("internal error")
end
end
# Read variables
err = perform_gets(engine)
@test err β‘ error_none
# Check variables
for ((si, D, len, T), (nm, var)) in variables
# String variables cannot be read (compare
# <https://github.com/ornladios/ADIOS2/issues/2735>)
T β‘ String && continue
if si β (shapeid_global_value, shapeid_local_value)
val = T β‘ String ? "42" : T(42)
@test all(buffers[(si, D, len, T)] .== val)
elseif si β (shapeid_global_array, shapeid_local_array)
sz = ntuple(d -> len == 0 ? 0 : len == 1 ? 1 : d, D)
# Construct a single number from a tuple
ten(i, s) = i == () ? s : ten(i[2:end], 10s + i[1])
# Convert to type `T`
function mkT(T, s)
return (T β‘ String ? "$s" :
T <: Union{AbstractFloat,Complex} ? T(s) : s % T)::T
end
arr = T[mkT(T, ten(Tuple(i), 0)) for i in CartesianIndices(sz)]
if ENGINE_TYPE == "BP4" && len == 0
# Empty global arrays are mis-interpreted as global values
# There is a spurious value `0`
@test buffers[(si, D, len, T)] == fill(T(0))
else
if comm_size == 1
@test buffers[(si, D, len, T)] == arr
else
@test_broken false
end
end
else
error("internal error")
end
end
err = close(engine)
@test err β‘ error_none
err = adios_finalize(adios)
@test err β‘ error_none
end
# Call gc to test finalizing the Adios object
GC.gc(true)
| ADIOS2 | https://github.com/eschnett/ADIOS2.jl.git |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.