licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 16421 | ## NODES
"""
Node{T<:Union{Machine,Nothing}}
Type for nodes in a learning network that are not `Source` nodes.
The key components of a Node are:
- An *operation*, which will either be static (a fixed function) or
dynamic (such as `predict` or `transform`).
- A `Machine` object, on which to dispatch the operation (`nothing` if the
operation is static). The training arguments of the machine are
generally other nodes, including `Source` nodes.
- Upstream connections to other nodes, called its *arguments*,
possibly including `Source` nodes, one for each data argument of the
operation (typically there's just one).
When a node `N` is called, as in `N()`, it applies the operation on
the machine (if there is one) together with the outcome of calls to
its node arguments, to compute the return value. For details on a
node's calling behavior, see [`node`](@ref).
See also [`node`](@ref), [`Source`](@ref), [`origins`](@ref),
[`sources`](@ref), [`fit!`](@ref).
"""
struct Node{T<:Union{Machine, Nothing},Oper} <: AbstractNode
operation::Oper # eg, `predict` or a static operation, such as `exp`
machine::T # is `nothing` for static operations
# nodes called to get args for `operation(model, ...) ` or
# `operation(...)`:
args::Tuple{Vararg{AbstractNode}}
# sources of ancestor graph (training edges excluded)
origins::Vector{Source}
# all ancestors (training edges included) listed in
# order consistent with extended graph, excluding self
nodes::Vector{AbstractNode}
function Node(
operation::Oper,
machine::T,
args::AbstractNode...,
) where {T<:Union{Machine, Nothing}, Oper}
# check the number of arguments:
# if machine === nothing && isempty(args)
# error("`args` in `Node(::Function, args...)` must be non-empty. ")
# end
origins_ = unique(vcat([origins(arg) for arg in args]...))
# length(origins_) == 1 ||
# @warn "A node referencing multiple origins when called " *
# "has been defined:\n$(origins_). "
# initialize the list of upstream nodes:
nodes_ = AbstractNode[]
# merge the lists from arguments:
nodes_ =
vcat(AbstractNode[], (nodes(n) for n in args)...) |> unique
# merge the lists from training arguments:
if machine !== nothing
nodes_ =
vcat(nodes_, (nodes(n) for n in machine.args)...) |> unique
end
return new{T,Oper}(operation, machine, args, origins_, nodes_)
end
end
"""
nrows_at_source(N::node)
Return the number of rows of data wrapped at the source of `N`,
assumming this is unique.
Not to be confused with `J = nrows(N)`, which is a new node such that
`J() = nrows(N())`.
See also [`nrows`](@ref)
"""
function nrows_at_source(X::Node)
ss = sources(X)
length(ss) == 1 ||
error("Node does not have a unique source. ")
return nrows_at_source(first(ss))
end
"""
origins(N)
Return a list of all origins of a node `N` accessed by a call `N()`.
These are the source nodes of ancestor graph of `N` if edges
corresponding to training arguments are excluded. A `Node` object
cannot be called on new data unless it has a unique origin.
Not to be confused with `sources(N)` which refers to the same graph
but without the training edge deletions.
See also: [`node`](@ref), [`source`](@ref).
"""
origins(X::Node) = X.origins
"""
nodes(N)
Return all nodes upstream of a node `N`, including `N` itself, in an
order consistent with the extended directed acyclic graph of the
network. Here "extended" means edges corresponding to training
arguments are included.
*Warning.* Not the same as `N.nodes`, which may not include `N`
itself.
"""
nodes(X::Node) = AbstractNode[X.nodes..., X]
color(N::Node{Nothing}) = :green
color(N::Node) = (N.machine.frozen ? :red : :green)
# constructor for static operations:
Node(operation, args::AbstractNode...) = Node(operation, nothing, args...)
_check(y::Node) = nothing
_check(y::Node{Nothing}) = length(y.origins) == 1 ? nothing :
error("Node $y has multiple origins and cannot be called "*
"on new data. ")
# make nodes callable:
(y::Node)(; rows=:) = _apply((y, y.machine); rows=rows)
(y::Node)(Xnew) = (_check(y); _apply((y, y.machine), Xnew))
(y::Node{Nothing})(; rows=:) = _apply((y, ); rows=rows)
(y::Node{Nothing})(Xnew)= (_check(y); _apply((y, ), Xnew))
function _apply(y_plus, input...; kwargs...)
y = y_plus[1]
mach = y_plus[2:end] # in static case this is ()
raw_args = map(y.args) do arg
arg(input...; kwargs...)
end
try
(y.operation)(mach..., raw_args...)
catch exception
diagnostics = MLJBase.diagnostics(y, input...; kwargs...) # defined in sources.jl
if !isempty(mach)
@error "Failed "*
"to apply the operation `$(y.operation)` to the machine "*
"$(y.machine), which receives it's data arguments from one or more "*
"nodes in a learning network. Possibly, one of these nodes "*
"is delivering data that is incompatible "*
"with the machine's model.\n"*diagnostics
else
@error "Failed "*
"to apply the operation `$(y.operation)`."*diagnostics
end
rethrow(exception)
end
end
ScientificTypes.elscitype(N::Node) = Unknown
function ScientificTypes.elscitype(
N::Node{<:Machine{<:Union{Deterministic,Unsupervised}}})
if N.operation == MLJBase.predict
return target_scitype(N.machine.model)
elseif N.operation == MLJBase.transform
return output_scitype(N.machine.model)
elseif N.operation == MLJBase.inverse_transform
return input_scitype(N.machine.model)
end
return Unknown
end
# TODO after
# https://github.com/JuliaAI/ScientificTypesBase.jl/issues/102 :
# Add Probabilistic case to above
ScientificTypes.scitype(N::Node) = CallableReturning{elscitype(N)}
## FITTING A NODE
# flush a (possibly remote) channel"
GenericChannel{T} = Union{Channel{T}, Distributed.RemoteChannel{<:Channel{T}}}
function flush!(c::GenericChannel{T}) where T
ret = T[]
while isready(c)
push!(ret, take!(c))
end
return ret
end
"""
fit!(N::Node;
rows=nothing,
verbosity=1,
force=false,
acceleration=CPU1())
Train all machines required to call the node `N`, in an appropriate
order, but parallelizing where possible using specified `acceleration`
mode. These machines are those returned by `machines(N)`.
Supported modes of `acceleration`: `CPU1()`, `CPUThreads()`.
"""
fit!(y::Node; acceleration=CPU1(), kwargs...) =
fit!(y::Node, acceleration; kwargs...)
fit!(y::Node, ::AbstractResource; kwargs...) =
error("Only `acceleration=CPU1()` and `acceleration=CPUThreads()` currently supported")
function fit!(y::Node, ::CPU1; kwargs...)
_machines = machines(y)
# flush the fit_okay channels:
@sync begin
for mach in _machines
@async flush!(mach.fit_okay)
end
end
# fit the machines asynchronously;
@sync begin
for mach in _machines
@async fit_only!(mach, true; kwargs...)
end
end
return y
end
function fit!(y::Node, ::CPUThreads; kwargs...)
_machines = machines(y)
# flush the fit_okay channels:
for mach in _machines
flush!(mach.fit_okay)
end
# fit the machines in Multithreading mode
@sync for mach in _machines
Threads.@spawn fit_only!(mach, true; kwargs...)
end
return y
end
fit!(S::Source; args...) = S
# allow arguments of `Nodes` and `Machine`s to appear
# at REPL:
istoobig(d::Tuple{AbstractNode}) = length(d) > 10
# # DISPLAY
_formula(stream::IO, X::AbstractNode, indent) =
(print(stream, repeat(' ', indent));_formula(stream, X, 0, indent))
_formula(stream::IO, X::Source, depth, indent) = show(stream, X)
function _formula(stream, X::Node, depth, indent)
operation_name = string(typeof(X.operation).name.mt.name)
anti = max(length(operation_name) - INDENT)
print(stream, operation_name, "(")
n_args = length(X.args)
if X.machine !== nothing
print(stream, crind(indent + length(operation_name) - anti))
printstyled(IOContext(stream, :color=>SHOW_COLOR[]),
#handle(X.machine),
X.machine,
bold=SHOW_COLOR[])
n_args == 0 || print(stream, ", ")
end
for k in 1:n_args
print(stream, crind(indent + length(operation_name) - anti))
_formula(stream, X.args[k],
depth + 1,
indent + length(operation_name) - anti )
k == n_args || print(stream, ",")
end
print(stream, ")")
end
function Base.show(io::IO, ::MIME"text/plain", X::Node)
println(io, "$X")
println(io, " args:")
for i in eachindex(X.args)
arg = X.args[i]
println(io, " $i:\t$arg")
end
print(io, " formula:\n")
_formula(io, X, 4)
end
# for displaying withing other objects:
function Base.show(stream::IO, object::Node)
str = simple_repr(typeof(object)) * " $(handle(object))"
mach = object.machine
extra = isnothing(mach) ? "" :
mach.model isa Symbol ? " → :$(mach.model)" :
" → $(simple_repr(typeof(mach.model)))(…)"
str *= extra
print(stream, str)
return nothing
end
## REPORTS AND FITRESULTS FOR NODES
# Both of these exposed but not intended for public use
# here `f` is `report` or `fitted_params`; returns a named tuple:
function item_given_machine(f, N)
machs = machines(N) |> reverse
items = map(machs) do m
try
f(m)
catch exception
if exception isa UndefRefError
error("UndefRefError intercepted. Perhaps "*
"you forgot to `fit!` a machine or node?")
else
throw(exception)
end
end
end
key = f isa typeof(MLJBase.report) ?
:report_given_machine :
:fitted_params_given_machine
dict = LittleDict(machs[j] => items[j] for j in eachindex(machs))
return NamedTuple{(:machines, key)}((machs, dict))
end
report(N::Node) = item_given_machine(MLJBase.report, N)
report(::Source) = NamedTuple()
MLJModelInterface.fitted_params(N::Node) =
item_given_machine(fitted_params, N)
MLJModelInterface.fitted_params(S::Source) = NamedTuple()
## SYNTACTIC SUGAR FOR LEARNING NETWORKS
"""
N = node(f::Function, args...)
Defines a `Node` object `N` wrapping a static operation `f` and arguments
`args`. Each of the `n` elements of `args` must be a `Node` or `Source`
object. The node `N` has the following calling behaviour:
N() = f(args[1](), args[2](), ..., args[n]())
N(rows=r) = f(args[1](rows=r), args[2](rows=r), ..., args[n](rows=r))
N(X) = f(args[1](X), args[2](X), ..., args[n](X))
"""
node(args...) = Node(args...)
"""
J = node(f, mach::Machine, args...)
Defines a dynamic `Node` object `J` wrapping a dynamic operation `f`
(`predict`, `predict_mean`, `transform`, etc), a nodal machine `mach` and
arguments `args`. Its calling behaviour, which depends on the outcome of
training `mach` (and, implicitly, on training outcomes affecting its
arguments) is this:
J() = f(mach, args[1](), args[2](), ..., args[n]())
J(rows=r) = f(mach, args[1](rows=r), args[2](rows=r), ..., args[n](rows=r))
J(X) = f(mach, args[1](X), args[2](X), ..., args[n](X))
Generally `n=1` or `n=2` in this latter case.
predict(mach, X::AbsractNode, y::AbstractNode)
predict_mean(mach, X::AbstractNode, y::AbstractNode)
predict_median(mach, X::AbstractNode, y::AbstractNode)
predict_mode(mach, X::AbstractNode, y::AbstractNode)
transform(mach, X::AbstractNode)
inverse_transform(mach, X::AbstractNode)
Shortcuts for `J = node(predict, mach, X, y)`, etc.
Calling a node is a recursive operation which terminates in the call
to a source node (or nodes). Calling nodes on *new* data `X` fails unless the
number of such nodes is one.
See also: [`Node`](@ref), [`@node`](@ref), [`source`](@ref), [`origins`](@ref).
"""
node
"""
@node f(...)
Construct a new node that applies the function `f` to some combination
of nodes, sources and other arguments.
*Important.* An argument not in global scope is assumed to be a node
or source.
### Examples
```julia-repl
julia> X = source(π)
julia> W = @node sin(X)
julia> W()
0
julia> X = source(1:10)
julia> Y = @node selectrows(X, 3:4)
julia> Y()
3:4
julia> Y(["one", "two", "three", "four"])
2-element Array{Symbol,1}:
"three"
"four"
julia> X1 = source(4)
julia> X2 = source(5)
julia> add(a, b, c) = a + b + c
julia> N = @node add(X1, 1, X2)
julia> N()
10
```
See also [`node`](@ref)
"""
macro node(ex)
ex.head == :call || error("@node syntax error")
exs = ex.args
f_ex = first(exs)
arg_exs = exs[2:end]
# build lambda expression lambda_left -> lambda_right
stuff =
first.(map(arg_exs) do ex
pair = (:nothing, false)
try
evaluated = __module__.eval(ex)
if evaluated isa AbstractNode
pair = gensym("node"), true
else
pair = ex, false
end
catch e
if e isa UndefVarError
pair = gensym("node"), true
else
error()
end
end
end |> zip)
right = first.(stuff)
mask = last.(stuff)
left = right[mask]
lambda_left = Expr(:tuple, left...)
lambda_right = Expr(:call, f_ex, right...)
lambda_ex = Expr(:->, lambda_left, lambda_right)
# the node-only arguments:
node_arg_exs = arg_exs[mask]
esc(quote
node($lambda_ex, $(node_arg_exs...))
end)
end
"""
glb(N1, N2, ...)
Given nodes `N1`, `N2`, ... , construct a node `N` with the behaviour
`N() = (N1(), N2(), ...)`. That is, `glb` is `tuple` overloaded for
nodes.
Equivalent to `@tuple N1 N2 ...`
"""
glb(X::AbstractNode...) = node(tuple, X...)
"""
@tuple N1 N2 ...
Construct a node `N` whose calling behaviour is `N() = (N1(), N2(), ...)`.
"""
macro tuple(ex...)
esc(quote
glb($(ex...))
end)
end
"""
nrows(X::AbstractNode)
Return a new node `N` such that `N() = nrows(X())` and `N(rows=rows) =
nrows(X(rows=rows))`. To obtain the number of rows of data at the
source of `X`, use `nrows_at_source(X)`.
"""
MLJModelInterface.nrows(X::AbstractNode) = node(nrows, X)
MMI.matrix(X::AbstractNode) = node(matrix, X)
MMI.table(X::AbstractNode) = node(table, X)
Base.vcat(args::AbstractNode...) = node(vcat, args...)
Base.hcat(args::AbstractNode...) = node(hcat, args...)
Statistics.mean(X::AbstractNode) = node(v->mean.(v), X)
Statistics.median(X::AbstractNode) = node(v->median.(v), X)
StatsBase.mode(X::AbstractNode) = node(v->mode.(v), X)
Base.log(X::AbstractNode) = node(v->log.(v), X)
Base.exp(X::AbstractNode) = node(v->exp.(v), X)
Base.first(X::AbstractNode) = node(first, X)
Base.last(X::AbstractNode) = node(last, X)
+(y1::AbstractNode, y2::AbstractNode) = node(+, y1, y2)
+(x, y::AbstractNode) = node(y->x + y, y)
+(y::AbstractNode, x) = node(y->y + x, y)
*(y1::AbstractNode, y2::AbstractNode) = node(*, y1, y2)
*(x, y::AbstractNode) = node(y->x*y, y)
*(y::AbstractNode, x) = node(y->y*x, y)
/(y1::AbstractNode, y2::AbstractNode) = node(/, y1, y2)
/(x, y::AbstractNode) = node(y->x/y, y)
/(y::AbstractNode, x) = node(y->y/x, y)
"""
selectcols(X::AbstractNode, c)
Returns `Node` object `N` such that `N() = selectcols(X(), c)`.
"""
MMI.selectcols(X::AbstractNode, r) = node(XX->selectcols(XX, r), X)
"""
selectrows(X::AbstractNode, r)
Returns a `Node` object `N` such that `N() = selectrows(X(), r)` (and
`N(rows=s) = selectrows(X(rows=s), r)`).
"""
MMI.selectrows(X::AbstractNode, r) = node(XX->selectrows(XX, r), X)
# for accessing and setting model hyperparameters at node:
getindex(n::Node{<:Machine{<:Model}}, s::Symbol) =
getproperty(n.machine.model, s)
setindex!(n::Node{<:Machine{<:Model}}, v, s::Symbol) =
setproperty!(n.machine.model, s, v)
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 8752 | # `Base.replace` overloadings for use in learning networks
# # HELPERS
"""
machine_replacement(node, newmodel_given_old, newnode_given_old, serializable)
**Private method** only called by [`update_mappings_with_node`](@ref).
If `serializable=false`, return a new machine instance by copying `node.mach` and changing
the `model` and `args` field values as derived from the provided dictionaries. In this way
the returned machine is hooked into the new learning network defined by the values of
`newnode_given_old`.
If `serializable=true`, return a serializable copy instead, but make no model replacement.
The `newmodel_given_old` dictionary is still used, but now to look up the concrete model
corresponding to the symbolic one stored in `node`'s machine.
See also [`serializable`](@ref).
"""
function machine_replacement(
N::AbstractNode,
newmodel_given_old,
newnode_given_old,
serializable
)
# the `replace` called below is defined in src/machines.jl.
newmodel = newmodel_given_old[N.machine.model]
mach = serializable ? MLJBase.serializable(N.machine, newmodel) :
replace(N.machine, :model => newmodel)
mach.args = Tuple(newnode_given_old[arg] for arg in N.machine.args)
return mach
end
"""
update_mappings_with_node!(
newnode_given_old,
newmach_given_old,
newmodel_given_old,
serializable,
node::AbstractNode)
**Private method.**
This is a method called, in appropriate sequence, over each `node` in a learning network
being duplicated. If `node` is not a `Source`, it updates the three dictionary arguments
which link the new network to the old one, and otherwise does nothing.
Only `_replace` calls this method.
"""
function update_mappings_with_node!(
newnode_given_old,
newmach_given_old,
newmodel_given_old,
serializable,
N::AbstractNode,
)
args = [newnode_given_old[arg] for arg in N.args]
if isnothing(N.machine)
newnode_given_old[N] = node(N.operation, args...)
else
if N.machine in keys(newmach_given_old)
m = newmach_given_old[N.machine]
else
m = machine_replacement(N, newmodel_given_old, newnode_given_old, serializable)
newmach_given_old[N.machine] = m
end
newnode_given_old[N] = Node(N.operation, m, args...)
end
end
update_mappings_with_node!(::Any, ::Any, ::Any, ::Any, N::Source) = nothing
# # REPLACE METHODS
const DOC_REPLACE_OPTIONS =
"""
# Options
- `empty_unspecified_sources=false`: If `true`, any source nodes not specified are
replaced with empty source nodes.
- `copy_unspecified_deeply=true`: If `false`, models or sources not listed for
replacement are identically equal in the original and returned node.
- `serializable=false`: If `true`, all machines in the new network are made
serializable and the specified model replacements are only used for serialization
purposes: for each pair `s => model` (`s` assumed to be a symbolic model) each
machine with model `s` is replaced with `serializable(mach, model)`. All unspecified
sources are always replaced with empty ones.
"""
"""
replace(node, a1=>b1, a2=>b2, ...; options...)
Recursively copy `node` and all nodes in the learning network for which it is a least
upper bound, but replacing any specified sources and models `a1, a2, ...` of that network
with `b1, b2, ...`.
$DOC_REPLACE_OPTIONS
"""
function Base.replace(W::AbstractNode, pairs::Pair...; kwargs...)
newnode_given_old = _replace(W, pairs...; kwargs...)
return newnode_given_old[W]
end
"""
replace(signature, a1=>b1, a2=>b2, ...; options...)
Copy the provided learning network signature, including the complete underlying learning
network, but replacing any specified sources and models `a1, a2, ...` of the original
underlying network with `b1, b2, ...`.
$DOC_REPLACE_OPTIONS
See also [`MLJBase.Signature`](@ref).
"""
function Base.replace(signature::Signature, pairs::Pair...; node_dict=false, kwargs...)
# If `node_dict` is true, then we additionally return `newnode_given_old` computed
# below.
operation_nodes = values(MLJBase.operation_nodes(signature))
report_nodes = values(MLJBase.report_nodes(signature))
fitted_params_nodes = values(MLJBase.fitted_params_nodes(signature))
W = glb(operation_nodes..., fitted_params_nodes..., report_nodes...)
newnode_given_old = _replace(W, pairs...; kwargs...)
# instantiate special node dictionaries:
newoperation_node_given_old =
IdDict{AbstractNode,AbstractNode}()
newfitted_params_node_given_old =
IdDict{AbstractNode,AbstractNode}()
newreport_node_given_old =
IdDict{AbstractNode,AbstractNode}()
# update those dictionaries based on the output of `_replace`:
for N in Set(operation_nodes) ∪ Set(report_nodes) ∪ Set(fitted_params_nodes)
if N in operation_nodes # could be `Source`
newoperation_node_given_old[N] = newnode_given_old[N]
elseif N in fitted_params_nodes
k= collect(keys(newnode_given_old))
newfitted_params_node_given_old[N] = newnode_given_old[N]
elseif N in report_nodes
k= collect(keys(newnode_given_old))
newreport_node_given_old[N] = newnode_given_old[N]
end
end
# assemble the new signature:
newoperation_nodes = Tuple(newoperation_node_given_old[N] for N in
operation_nodes)
newfitted_params_nodes =
Tuple(newfitted_params_node_given_old[N] for N in fitted_params_nodes)
newreport_nodes =
Tuple(newreport_node_given_old[N] for N in report_nodes)
fitted_params_tuple =
NamedTuple{keys(MLJBase.fitted_params_nodes(signature))}(newfitted_params_nodes)
report_tuple =
NamedTuple{keys(MLJBase.report_nodes(signature))}(newreport_nodes)
operation_tuple =
NamedTuple{MLJBase.operations(signature)}(newoperation_nodes)
_clean(named_tuple) = isempty(first(named_tuple)) ? NamedTuple() : named_tuple
newsignature = merge(
operation_tuple,
(fitted_params=fitted_params_tuple,) |> _clean,
(report=report_tuple,) |> _clean,
) |> MLJBase.Signature
node_dict || return newsignature
return newsignature, newnode_given_old
end
# Copy the complete learning network having `W` as a greatest lower bound, executing the
# specified replacements, and return the dictionary mapping old nodes to new nodes.
function _replace(
W::AbstractNode,
pairs::Pair...;
empty_unspecified_sources=false,
copy_unspecified_deeply=true,
serializable=false,
)
serializable && (empty_unspecified_sources = true)
clone(item) = copy_unspecified_deeply ? deepcopy(item) : item
# Instantiate model dictionary:
model_pairs = filter(collect(pairs)) do pair
first(pair) isa Model || first(pair) isa Symbol
end
models_ = models(W)
models_to_copy = setdiff(models_, first.(model_pairs))
model_copy_pairs = [model=>clone(model) for model in models_to_copy]
newmodel_given_old = IdDict(vcat(model_pairs, model_copy_pairs))
# build complete source replacement pairs:
sources_ = sources(W)
specified_source_pairs = filter(collect(pairs)) do pair
first(pair) isa Source
end
unspecified_sources = setdiff(sources_, first.(specified_source_pairs))
unspecified_sources_wrapping_something =
filter(s -> !isempty(s), unspecified_sources)
if !isempty(unspecified_sources_wrapping_something) &&
!empty_unspecified_sources && copy_unspecified_deeply
@warn "No replacement specified for one or more non-empty source "*
"nodes. Contents will be duplicated. Perhaps you want to specify "*
"`copy_unspecified_deeply=false` to prevent copying of sources "*
"and models, or `empty_unspecified_sources=true`."
end
if empty_unspecified_sources
unspecified_source_pairs = [s => source() for
s in unspecified_sources]
else
unspecified_source_pairs = [s => clone(s) for
s in unspecified_sources]
end
all_source_pairs = vcat(specified_source_pairs, unspecified_source_pairs)
# inititialization:
newnode_given_old = IdDict{AbstractNode,AbstractNode}(all_source_pairs)
newmach_given_old = IdDict{Machine,Machine}()
# build the new network:
for N in nodes(W)
MLJBase.update_mappings_with_node!(
newnode_given_old,
newmach_given_old,
newmodel_given_old,
serializable,
N
)
end
return newnode_given_old
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 11146 | # a signature is just a thin wrapper for what the user knows as a "learning network
# interface"; see constant DOC_NETWORK_INTERFACES below for details.
# # HELPERS
"""
machines_given_model(node::AbstractNode)
**Private method.**
Return a dictionary of machines, keyed on model, for the all machines in the
completed learning network for which `node` is the greatest lower bound. Only
machines bound to symbolic models are included. Values are always vectors,
even if they contain only a single machine.
"""
function machines_given_model(node::AbstractNode)
ret = LittleDict{Symbol,Any}()
for mach in machines(node)
model = mach.model
model isa Symbol || continue
if !haskey(ret, model)
ret[model] = Any[mach,]
else
push!(ret[model], mach)
end
end
return ret
end
attempt_scalarize(v) = length(v) == 1 ? v[1] : v
"""
tuple_keyed_on_model(f, machines_given_model; scalarize=true, drop_nothings=true)
**Private method.**
Given a dictionary of machine vectors, keyed on model names (symbols), broadcast
`f` over each vector, and make the result, in the returned named tuple, the
value associated with the corresponding model name as key.
Singleton vector values are scalarized, unless `scalarize = false`.
If a value in the computed named tuple is `nothing`, or a vector of `nothing`s,
then the entry is dropped from the tuple, unless `drop_nothings=false`.
"""
function tuple_keyed_on_model(f, machines_given_model; scalarize=true, drop_nothings=true)
models = keys(machines_given_model) |> collect
named_tuple_values = map(models) do model
value = [f(m) for m in machines_given_model[model]]
scalarize && return attempt_scalarize(value)
return value
end
if drop_nothings
mask = map(named_tuple_values) do v
!(isnothing(v) || (v isa AbstractVector && eltype(v) === Nothing))
end |> collect
models = models[mask]
named_tuple_values = named_tuple_values[mask]
end
return NamedTuple{tuple(models...)}(tuple(named_tuple_values...))
end
const ERR_CALL_AND_COPY = ArgumentError(
"Expected something of `AbstractNode` type in a learning network interface "*
"but got something else. "
)
"""
call_and_copy(x)
**Private method.**
If `x` is an `AbstractNode`, then return a deep copy of `x()`. If `x` is a named tuple
`(k1=n1, k2=n2, ...)`, then "broadcast" `call_and_copy` over the values `n1`, `n2`, ...,
to get a new named tuple with the same keys.
"""
call_and_copy(::Any) = throw(ERR_CALL_AND_COPY)
call_and_copy(n::AbstractNode) = deepcopy(n())
function call_and_copy(nt::NamedTuple)
_keys = keys(nt)
_values = deepcopy(values(nt))
return NamedTuple{_keys}(call_and_copy.(_values))
end
# # DOC STRING
const DOC_NETWORK_INTERFACES =
"""
A *learning network interface* is a named tuple declaring certain interface points in
a learning network, to be used when "exporting" the network as a new stand-alone model
type. Examples are
(predict=yhat,)
(transform=Xsmall, acceleration=CPUThreads())
(predict=yhat, transform=W, report=(loss=loss_node,))
Here `yhat`, `Xsmall`, `W` and `loss_node` are nodes in the network.
The keys of the learning network interface always one of the following:
- The name of an operation, such as `:predict`, `:predict_mode`, `:transform`,
`:inverse_transform`. See "Operation keys" below.
- `:report`, for exposing results of calling a node *with no arguments* in the
composite model report. See "Including report nodes" below.
- `:fitted_params`, for exposing results of calling a node *with no arguments* as
fitted parameters of the composite model. See "Including fitted parameter nodes"
below.
- `:acceleration`, for articulating acceleration mode for training the network, e.g.,
`CPUThreads()`. Corresponding value must be an `AbstractResource`. If not included,
`CPU1()` is used.
### Operation keys
If the key is an operation, then the value must be a node `n` in the network with a
unique origin (`length(origins(n)) === 1`). The intention of a declaration such as
`predict=yhat` is that the exported model type implements `predict`, which, when
applied to new data `Xnew`, should return `yhat(Xnew)`.
#### Including report nodes
If the key is `:report`, then the corresponding value must be a named tuple
(k1=n1, k2=n2, ...)
whose values are all nodes. For each `k=n` pair, the key `k` will appear as a key in
the composite model report, with a corresponding value of `deepcopy(n())`, called
immediatately after training or updating the network. For examples, refer to the
"Learning Networks" section of the MLJ manual.
#### Including fitted parameter nodes
If the key is `:fitted_params`, then the behaviour is as for report nodes but results
are exposed as fitted parameters of the composite model instead of the report.
"""
# # SIGNATURES
"""
Signature(interface::NamedTuple)
**Private type.**
Return a thinly wrapped version of a learning network interface (defined below). Unwrap
with `MLJBase.unwrap`:
```julia
interface = (predict=source(), report=(loss=source(),))
signature = MLJBase.Signature(interface)
@assert MLJBase.unwrap(signature) === interface
```
$DOC_NETWORK_INTERFACES
"""
struct Signature{S<:NamedTuple}
interface::S
end
unwrap(signature::Signature) = signature.interface
# # METHODS
"""
operation_nodes(signature)
**Private method.**
Return the operation nodes of `signature`, as a named tuple keyed on operation names.
See also [`MLJBase.Signature`](@ref).
"""
function operation_nodes(signature::Signature)
interface = unwrap(signature)
ops = filter(in(OPERATIONS), keys(interface))
return NamedTuple{ops}(map(op->getproperty(interface, op), ops))
end
"""
report_nodes(signature)
**Private method.**
Return the report nodes of `signature`, as a named tuple.
See also [`MLJBase.Signature`](@ref).
"""
function report_nodes(signature::Signature)
interface = unwrap(signature)
:report in keys(interface) || return NamedTuple()
return interface.report
end
"""
fitted_params_nodes(signature)
**Private method.**
Return the fitted parameter nodes of `signature`, as a named tuple.
See also [`MLJBase.Signature`](@ref).
"""
function fitted_params_nodes(signature::Signature)
interface = unwrap(signature)
:fitted_params in keys(interface) || return NamedTuple()
return interface.fitted_params
end
"""
acceleration(signature)
**Private method.**
Return the acceleration mode of `signature`.
See also [`MLJBase.Signature`](@ref).
"""
function acceleration(signature::Signature)
interface = unwrap(signature)
:acceleration in keys(interface) || return CPU1()
return interface.acceleration
end
"""
operations(signature)
**Private method.**
Return the names of all operations in `signature`.
See also [`MLJBase.Signature`](@ref).
"""
operations(signature::Signature) = keys(operation_nodes(signature))
"""
glb(signature::Signature)
**Private method.**
Return the greatest lower bound of all operation nodes, report nodes and fitted parameter
nodes associated with `signature`.
See also [`MLJBase.Signature`](@ref).
"""
function glb(signature::Signature)
grab(f) = values(f(signature)) |> collect
nodes = vcat(
grab(operation_nodes),
grab(report_nodes),
grab(fitted_params_nodes),
)
return glb(nodes...)
end
"""
age(signature::Signature)
**Private method.**
Return the sum of the ages of all machines in the underlying network of `signature`.
See also [`MLJBase.Signature`](@ref).
"""
age(signature::Signature) = sum(age, machines(glb(signature)))
"""
report_supplement(signature)
**Private method.**
Generate a deep copy of the supplementary report defined by the signature (that part of
the composite model report coming from report nodes in the signature). This is a named
tuple.
See also [`MLJBase.Signature`](@ref).
"""
report_supplement(signature::Signature) = call_and_copy(report_nodes(signature))
"""
fitted_params_supplement(signature)
**Private method.**
Generate a deep copy of the supplementary fitted parameters defined by the signature (that
part of the composite model fitted parameters coming from fitted parameter nodes in the
signature). This is a named tuple.
See also [`MLJBase.Signature`](@ref).
"""
fitted_params_supplement(signature::Signature) = call_and_copy(fitted_params_nodes(signature))
"""
report(signature; supplement=true)
**Private method.**
Generate a report for the learning network associated with `signature`, including the
supplementary report.
Suppress calling of the report nodes of `signature`, and excluded their contribution to
the output, by specifying `supplement=false`.
See also [`MLJBase.report_supplement`](@ref).
See also [`MLJBase.Signature`](@ref).
"""
function report(signature::Signature; supplement=true)
greatest_lower_bound = glb(signature)
supplement_report = supplement ? MLJBase.report_supplement(signature) : NamedTuple()
d = MLJBase.machines_given_model(greatest_lower_bound)
internal_report = MLJBase.tuple_keyed_on_model(report, d)
merge(internal_report, supplement_report)
end
"""
fitted_params(signature; supplement=true)
**Private method.**
Generate a fitted_params for the learning network associated with `signature`, including
the supplementary fitted_params.
Suppress calling of the fitted_params nodes of `signature`, and excluded their
contribution to the output, by specifying `supplement=false`.
See also [`MLJBase.fitted_params_supplement`](@ref).
See also [`MLJBase.Signature`](@ref).
"""
function fitted_params(signature::Signature; supplement=true)
greatest_lower_bound = glb(signature)
supplement_fitted_params =
supplement ? MLJBase.fitted_params_supplement(signature) : NamedTuple()
d = MLJBase.machines_given_model(greatest_lower_bound)
internal_fitted_params = MLJBase.tuple_keyed_on_model(fitted_params, d)
merge(internal_fitted_params, supplement_fitted_params)
end
"""
output_and_report(signature, operation, Xnew...)
**Private method.**
Duplicate `signature` and return appropriate output for the specified `operation` (a key
of `signature`) applied to the duplicate, together with the operational report. Report
nodes of `signature` are not called, and they make no contribution to that report.
Return value has the form `(output, report)`.
See also [`MLJBase.Signature`](@ref).
"""
function output_and_report(signature, operation, Xnew)
signature_clone = replace(signature, copy_unspecified_deeply=false)
output = getproperty(MLJBase.unwrap(signature_clone), operation)(Xnew)
report = MLJBase.report(signature_clone; supplement=false)
return output, report
end
# special case for static transformers with multiple inputs:
output_and_report(signature, operation, Xnew...) =
output_and_report(signature, operation, Xnew)
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 4190 | # See network_composite_types.jl for type definitions
caches_data_by_default(::Type{<:NetworkComposite}) = false
# # PREFIT STUB
"""
MLJBase.prefit(model, verbosity, data...)
Returns a learning network interface (see below) for a learning network with source nodes
that wrap `data`.
A user overloads `MLJBase.prefit` when exporting a learning network as a new stand-alone
model type, of which `model` above will be an instance. See the MLJ reference manual for
details.
$DOC_NETWORK_INTERFACES
"""
function prefit end
function MLJModelInterface.fit(composite::NetworkComposite, verbosity, data...)
# fitresult is the signature of a learning network:
fitresult = prefit(composite, verbosity, data...) |> MLJBase.Signature
# train the network:
greatest_lower_bound = MLJBase.glb(fitresult)
acceleration = MLJBase.acceleration(fitresult)
fit!(greatest_lower_bound; verbosity, composite, acceleration)
report = MLJBase.report(fitresult)
# for passing to `update` so changes in `composite` can be detected:
cache = deepcopy(composite)
return fitresult, cache, report
end
"""
start_over(composite, old_composite, greatest_lower_bound)
**Private method.**
Return `true` if and only if `old_composite` and `composite` differ in the value of a
property that is *not* also the name of a (symbolic) model in the network with specified
`greates_lower_bound` (a "non-model" hyperparameter).
"""
function start_over(composite, old_composite, greatest_lower_bound)
model_fields = MLJBase.models(greatest_lower_bound)
any(propertynames(composite)) do field
field in model_fields && return false
old_value = getproperty(old_composite, field)
value = getproperty(composite, field)
value != old_value
end
end
function MLJModelInterface.update(
composite::NetworkComposite,
verbosity,
fitresult,
old_composite,
data...,
)
greatest_lower_bound = MLJBase.glb(fitresult)
start_over = MLJBase.start_over(composite, old_composite, greatest_lower_bound)
start_over && return MLJModelInterface.fit(composite, verbosity, data...)
# retrain the network:
fit!(greatest_lower_bound; verbosity, composite)
report = MLJBase.report(fitresult)
# for passing to `update` so changes in `composite` can be detected:
cache = deepcopy(composite)
return fitresult, cache, report
end
MLJModelInterface.fitted_params(composite::NetworkComposite, signature) =
fitted_params(signature)
MLJModelInterface.reporting_operations(::Type{<:NetworkComposite}) = OPERATIONS
# here `fitresult` has type `Signature`.
function save(model::NetworkComposite, fitresult)
# The network includes machines with symbolic models. These machines need to be
# replaced by serializable versions, but we cannot naively use `serializable(mach)`,
# because the absence of the concrete model means this just returns `mach` (because
# `save(::Symbol, fitresult)` returns `fitresult`). We need to use the special
# `serialiable(mach, model)` instead. This is what `replace` below does, because we
# pass it the flag `serializable=true` but we must also pass `symbol =>
# concrete_model` replacements, which we calculate first:
greatest_lower_bound = MLJBase.glb(fitresult)
machines_given_model = MLJBase.machines_given_model(greatest_lower_bound)
atomic_models = keys(machines_given_model)
pairs = [atom => getproperty(model, atom) for atom in atomic_models]
replace(fitresult, pairs...; serializable=true)
end
function MLJModelInterface.restore(model::NetworkComposite, serializable_fitresult)
greatest_lower_bound = MLJBase.glb(serializable_fitresult)
machines_given_model = MLJBase.machines_given_model(greatest_lower_bound)
atomic_models = keys(machines_given_model)
# the following indirectly mutates `serialiable_fiteresult`, returning it to
# usefulness:
for atom in atomic_models
for mach in machines_given_model[atom]
mach.fitresult = MLJBase.restore(getproperty(model, atom), mach.fitresult)
mach.state = 1
end
end
return serializable_fitresult
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 759 | # For example, we want to define
# abstract type ProbabilisticNetwork <: Probabilistic end
# but also want this for all the abstract `Model` subtypes:
const NETWORK_COMPOSITE_TYPES = Symbol[]
const network_composite_types = Any[]
for T in MLJModelInterface.ABSTRACT_MODEL_SUBTYPES
network_composite_type_name = string(T, "NetworkComposite") |> Symbol
@eval(abstract type $network_composite_type_name <: $T end)
push!(NETWORK_COMPOSITE_TYPES, network_composite_type_name)
push!(network_composite_types, @eval($network_composite_type_name))
end
const NetworkComposite = Union{network_composite_types...}
MLJModelInterface.is_wrapper(::Type{<:NetworkComposite}) = true
MLJModelInterface.package_name(::Type{<:NetworkComposite}) = "MLJBase"
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 21737 | # Code to construct pipelines without macros
# ## Note on mutability.
# The components in a pipeline, as defined here, can be replaced so
# long as their "abstract supertype" (eg, `Probabilistic`) remains the
# same. This is the type returned by `abstract_type()`; in the present
# code it will always be one of the types listed in
# `SUPPORTED_TYPES_FOR_PIPELINES` below, or `Any`, if `component` is
# not a model (which, by assumption, means it is callable).
# # HELPERS
# modify collection of symbols to guarantee uniqueness. For example,
# individuate([:x, :y, :x, :x]) = [:x, :y, :x2, :x3])
function individuate(v)
isempty(v) && return v
ret = Symbol[first(v),]
for s in v[2:end]
s in ret || (push!(ret, s); continue)
n = 2
candidate = s
while true
candidate = string(s, n) |> Symbol
candidate in ret || break
n += 1
end
push!(ret, candidate)
end
return ret
end
function as_type(prediction_type::Symbol)
if prediction_type == :deterministic
return Deterministic
elseif prediction_type == :probabilistic
return Probabilistic
elseif prediction_type == :interval
return Interval
else
return Unsupervised
end
end
_instance(x) = x
_instance(T::Type{<:Model}) = T()
# # TYPES
const SUPPORTED_TYPES_FOR_PIPELINES = [
:Deterministic,
:Probabilistic,
:Interval,
:Unsupervised,
:Static]
const PIPELINE_TYPE_GIVEN_TYPE = Dict(
:Deterministic => :DeterministicPipeline,
:Probabilistic => :ProbabilisticPipeline,
:Interval => :IntervalPipeline,
:Unsupervised => :UnsupervisedPipeline,
:Static => :StaticPipeline)
const COMPOSITE_TYPE_GIVEN_TYPE = Dict(
:Deterministic => :DeterministicNetworkComposite,
:Probabilistic => :ProbabilisticNetworkComposite,
:Interval => :IntervalNetworkComposite,
:Unsupervised => :UnsupervisedNetworkComposite,
:Static => :StaticNetworkComposite)
const PREDICTION_TYPE_OPTIONS = [:deterministic,
:probabilistic,
:interval]
for T_ex in SUPPORTED_TYPES_FOR_PIPELINES
P_ex = PIPELINE_TYPE_GIVEN_TYPE[T_ex]
C_ex = COMPOSITE_TYPE_GIVEN_TYPE[T_ex]
quote
mutable struct $P_ex{N<:NamedTuple,operation} <: $C_ex
named_components::N
cache::Bool
$P_ex(operation, named_components::N, cache) where N =
new{N,operation}(named_components, cache)
end
end |> eval
end
# hack an alias for the union type, `SomePipeline{N,operation}` (not
# exported):
const _TYPE_EXS = map(values(PIPELINE_TYPE_GIVEN_TYPE)) do P_ex
:($P_ex{N,operation})
end
quote
const SomePipeline{N,operation} =
Union{$(_TYPE_EXS...)}
end |> eval
# not exported:
const SupervisedPipeline{N,operation} =
Union{DeterministicPipeline{N,operation},
ProbabilisticPipeline{N,operation},
IntervalPipeline{N,operation}}
components(p::SomePipeline) = values(getfield(p, :named_components))
names(p::SomePipeline) = keys(getfield(p, :named_components))
operation(p::SomePipeline{N,O}) where {N,O} = O
component_name_pairs(p::SomePipeline) = broadcast(Pair, components(p), names(p))
# # GENERIC CONSTRUCTOR
const PRETTY_PREDICTION_OPTIONS =
join([string("`:", opt, "`") for opt in PREDICTION_TYPE_OPTIONS],
", ",
", and ")
const ERR_TOO_MANY_SUPERVISED = ArgumentError(
"More than one supervised model in a pipeline is not permitted")
const ERR_EMPTY_PIPELINE = ArgumentError(
"Cannot create an empty pipeline. ")
err_prediction_type_conflict(supervised_model, prediction_type) =
ArgumentError("The pipeline's last component model has type "*
"`$(typeof(supervised_model))`, which conflicts "*
"with the declaration "*
"`prediction_type=$prediction_type`. ")
const INFO_TREATING_AS_DETERMINISTIC =
"Treating pipeline as a `Deterministic` predictor.\n"*
"To override, use `Pipeline` constructor with `prediction_type=...`. "*
"Options are $PRETTY_PREDICTION_OPTIONS. "
const ERR_INVALID_PREDICTION_TYPE = ArgumentError(
"Invalid `prediction_type`. Options are $PRETTY_PREDICTION_OPTIONS. ")
const WARN_IGNORING_PREDICTION_TYPE =
"Pipeline appears to have no supervised "*
"component models. Ignoring declaration "*
"`prediction_type=$(prediction_type)`. "
const ERR_MIXED_PIPELINE_SPEC = ArgumentError(
"Either specify all pipeline components without names, as in "*
"`Pipeline(model1, model2)` or specify names for all "*
"components, as in `Pipeline(myfirstmodel=model1, mysecondmodel=model2)`. ")
# The following combines its arguments into a named tuple, performing
# a number of checks and modifications. Specifically, it checks
# `components` is a valid sequence, modifies `names` to make them
# unique, and replaces the types appearing in the named tuple type
# parameters with their abstract supertypes. See the "Note on
# mutability" above.
function pipe_named_tuple(names, components)
isempty(names) && throw(ERR_EMPTY_PIPELINE)
# make keys unique:
names = names |> individuate |> Tuple
# check sequence:
supervised_components = filter(components) do c
c isa Supervised
end
length(supervised_components) < 2 ||
throw(ERR_TOO_MANY_SUPERVISED)
# return the named tuple:
types = abstract_type.(components)
NamedTuple{names,Tuple{types...}}(components)
end
"""
Pipeline(component1, component2, ... , componentk; options...)
Pipeline(name1=component1, name2=component2, ..., namek=componentk; options...)
component1 |> component2 |> ... |> componentk
Create an instance of a composite model type which sequentially composes
the specified components in order. This means `component1` receives
inputs, whose output is passed to `component2`, and so forth. A
"component" is either a `Model` instance, a model type (converted
immediately to its default instance) or any callable object. Here the
"output" of a model is what `predict` returns if it is `Supervised`,
or what `transform` returns if it is `Unsupervised`.
Names for the component fields are automatically generated unless
explicitly specified, as in
```julia
Pipeline(encoder=ContinuousEncoder(drop_last=false),
stand=Standardizer())
```
The `Pipeline` constructor accepts keyword `options` discussed further
below.
Ordinary functions (and other callables) may be inserted in the
pipeline as shown in the following example:
Pipeline(X->coerce(X, :age=>Continuous), OneHotEncoder, ConstantClassifier)
### Syntactic sugar
The `|>` operator is overloaded to construct pipelines out of models,
callables, and existing pipelines:
```julia
LinearRegressor = @load LinearRegressor pkg=MLJLinearModels add=true
PCA = @load PCA pkg=MultivariateStats add=true
pipe1 = MLJBase.table |> ContinuousEncoder |> Standardizer
pipe2 = PCA |> LinearRegressor
pipe1 |> pipe2
```
At most one of the components may be a supervised model, but this
model can appear in any position. A pipeline with a `Supervised`
component is itself `Supervised` and implements the `predict`
operation. It is otherwise `Unsupervised` (possibly `Static`) and
implements `transform`.
### Special operations
If all the `components` are invertible unsupervised models (ie,
implement `inverse_transform`) then `inverse_transform` is implemented
for the pipeline. If there are no supervised models, then `predict` is
nevertheless implemented, assuming the last component is a model that
implements it (some clustering models). Similarly, calling `transform`
on a supervised pipeline calls `transform` on the supervised
component.
### Transformers that need a target in training
Some transformers that have type `Unsupervised` (so that the output of `transform` is
propagated in pipelines) may require a target variable for training. An example are
so-called target encoders (which transform categorical input features, based on some
target observations). Provided they appear before any `Supervised` component in the
pipelines, such models are supported. Of course a target must be provided whenever
training such a pipeline, whether or not it contains a `Supervised` component.
### Optional key-word arguments
- `prediction_type` -
prediction type of the pipeline; possible values: `:deterministic`,
`:probabilistic`, `:interval` (default=`:deterministic` if not inferable)
- `operation` - operation applied to the supervised component model,
when present; possible values: `predict`, `predict_mean`,
`predict_median`, `predict_mode` (default=`predict`)
- `cache` - whether the internal machines created for component models
should cache model-specific representations of data (see
[`machine`](@ref)) (default=`true`)
!!! warning
Set `cache=false` to guarantee data anonymization.
To build more complicated non-branching pipelines, refer to the MLJ
manual sections on composing models.
"""
function Pipeline(args...; prediction_type=nothing,
operation=predict,
cache=true,
kwargs...)
# Components appear either as `args` (with names to be
# automatically generated) or in `kwargs`, but not both.
# This public constructor does checks and constructs a valid named
# tuple, `named_components`, to be passed onto a secondary
# constructor.
isempty(args) || isempty(kwargs) ||
throw(ERR_MIXED_PIPELINE_SPEC)
operation in eval.(PREDICT_OPERATIONS) ||
throw(ERR_INVALID_OPERATION)
prediction_type in PREDICTION_TYPE_OPTIONS || prediction_type === nothing ||
throw(ERR_INVALID_PREDICTION_TYPE)
# construct the named tuple of components:
if isempty(args)
_names = keys(kwargs)
_components = values(values(kwargs))
else
_names = Symbol[]
for c in args
generate_name!(c, _names, only=Model)
end
_components = args
end
# in case some components are specified as model *types* instead
# of instances:
components = _instance.(_components)
named_components = pipe_named_tuple(_names, components)
pipe = _pipeline(named_components, prediction_type, operation, cache)
message = clean!(pipe)
isempty(message) || @warn message
return pipe
end
function _pipeline(named_components::NamedTuple,
prediction_type,
operation,
cache)
# This method assumes all arguments are valid and includes the
# logic that determines which concrete pipeline's constructor
# needs calling.
components = values(named_components)
# Is this a supervised pipeline?
idx = findfirst(components) do c
c isa Supervised
end
is_supervised = idx !== nothing
is_supervised && @inbounds supervised_model = components[idx]
# Is this a static pipeline? A component is *static* if it is an
# instance of `Static <: Unsupervised` *or* a callable (anything
# that is not a model, by assumption). When all the components are
# static, the pipeline will be a `StaticPipeline`.
static_components = filter(components) do m
!(m isa Model) || m isa Static
end
is_static = length(static_components) == length(components)
# To make final pipeline type determination, we need to determine
# the corresonding abstract type (eg, `Probablistic`) here called
# `super_type`:
if is_supervised
supervised_is_last = last(components) === supervised_model
if prediction_type !== nothing
super_type = as_type(prediction_type)
supervised_is_last && !(supervised_model isa super_type) &&
throw(err_prediction_type_conflict(supervised_model,
prediction_type))
elseif supervised_is_last
if operation != predict
super_type = Deterministic
else
super_type = abstract_type(supervised_model)
end
else
A = abstract_type(supervised_model)
A == Deterministic || operation !== predict ||
@info INFO_TREATING_AS_DETERMINISTIC
super_type = Deterministic
end
else
prediction_type === nothing ||
@warn WARN_IGNORING_PREDICTION_TYPE
super_type = is_static ? Static : Unsupervised
end
# dispatch on `super_type` to construct the appropriate type:
_pipeline(super_type, operation, named_components, cache)
end
# where the method called in the last line will be one of these:
for T_ex in SUPPORTED_TYPES_FOR_PIPELINES
P_ex = PIPELINE_TYPE_GIVEN_TYPE[T_ex]
quote
_pipeline(::Type{<:$T_ex}, args...) =
$P_ex(args...)
end |> eval
end
# # CLEAN METHOD
clean!(pipe::SomePipeline) = ""
# # PROPERTY ACCESS
err_pipeline_bad_property(p, name) = ErrorException(
"pipeline has no component `$name`")
Base.propertynames(p::SomePipeline{<:NamedTuple{names}}) where names =
(names..., :cache)
function Base.getproperty(p::SomePipeline{<:NamedTuple{names}},
name::Symbol) where names
name === :cache && return getfield(p, :cache)
name in names && return getproperty(getfield(p, :named_components), name)
throw(err_pipeline_bad_property(p, name))
end
function Base.setproperty!(p::SomePipeline{<:NamedTuple{names,types}},
name::Symbol, value) where {names,types}
name === :cache && return setfield!(p, :cache, value)
idx = findfirst(==(name), names)
idx === nothing && throw(err_pipeline_bad_property(p, name))
components = getfield(p, :named_components) |> values |> collect
@inbounds components[idx] = value
named_components = NamedTuple{names,types}(Tuple(components))
setfield!(p, :named_components, named_components)
end
# # LEARNING NETWORK INTERFACE
# https://JuliaAI.github.io/MLJ.jl/dev/composing_models/#Learning-network-machines
# ## Methods to extend a pipeline learning network
# The "front" of a pipeline network, as we grow it, consists of a "predict" and a
# "transform" node. Once the pipeline is complete (after a series of `extend` operations -
# see below) the "transform" node is what is used to deliver the output of
# `transform(pipe, ...)` in the exported model, and the "predict" node is what will be
# used to deliver the output of `predict(pipe, ...). Both nodes can be changed by `extend`
# but only the "active" node is propagated. Initially "transform" is active and "predict"
# only becomes active when a supervised model is encountered; this change is permanent.
# https://github.com/JuliaAI/MLJClusteringInterface.jl/issues/10
abstract type ActiveNodeOperation end
struct Trans <: ActiveNodeOperation end
struct Pred <: ActiveNodeOperation end
struct Front{A<:ActiveNodeOperation,P<:AbstractNode,N<:AbstractNode}
predict::P
transform::N
Front(p::P, t::N, a::A) where {P,N,A<:ActiveNodeOperation} = new{A,P,N}(p, t)
end
active(f::Front{Trans}) = f.transform
active(f::Front{Pred}) = f.predict
function extend(front::Front{Trans},
::Supervised,
name,
cache,
op,
sources...)
a = active(front)
mach = machine(name, a, sources...; cache=cache)
Front(op(mach, a), transform(mach, a), Pred())
end
function extend(front::Front{Trans}, ::Static, name, cache, args...)
mach = machine(name; cache=cache)
Front(front.predict, transform(mach, active(front)), Trans())
end
function extend(front::Front{Pred}, ::Static, name, cache, args...)
mach = machine(name; cache=cache)
Front(transform(mach, active(front)), front.transform, Pred())
end
function extend(front::Front{Trans}, component::Unsupervised, name, cache, ::Any, sources...)
a = active(front)
if target_in_fit(component)
mach = machine(name, a, first(sources); cache=cache)
else
mach = machine(name, a; cache=cache)
end
Front(predict(mach, a), transform(mach, a), Trans())
end
function extend(front::Front{Pred}, ::Unsupervised, name, cache, args...)
a = active(front)
mach = machine(name, a; cache=cache)
Front(transform(mach, a), front.transform, Pred())
end
# fallback assumes `component` is a callable object:
extend(front::Front{Trans}, component, ::Any, args...) =
Front(front.predict, node(component, active(front)), Trans())
extend(front::Front{Pred}, component, ::Any, args...) =
Front(node(component, active(front)), front.transform, Pred())
# ## The learning network interface
function pipeline_network_interface(
cache,
operation,
component_name_pairs,
source0,
sources...,
)
components = first.(component_name_pairs)
# initialize the network front:
front = Front(source0, source0, Trans())
# closure to use in reduction:
_extend(front, pair) =
extend(front, first(pair), last(pair), cache, operation, sources...)
# reduce to get the `predict` and `transform` nodes:
final_front = foldl(_extend, component_name_pairs, init=front)
pnode, tnode = final_front.predict, final_front.transform
interface = (; predict=pnode, transform=tnode)
# `inverse_transform` node (constructed even if not supported for some component):
if all(c -> c isa Unsupervised, components)
inode = source0
for mach in machines(tnode)
inode = inverse_transform(mach, inode)
end
interface = merge(interface, (; inverse_transform=inode))
end
return interface
end
# # PREFIT METHOD
function prefit(
pipe::SomePipeline{N,operation},
verbosity::Integer,
arg0=source(),
args...,
) where {N,operation}
source0 = source(arg0)
sources = source.(args)
component_name_pairs = MLJBase.component_name_pairs(pipe)
return pipeline_network_interface(
pipe.cache,
operation,
component_name_pairs,
source0,
sources...,
)
end
# # SYNTACTIC SUGAR
const INFO_AMBIGUOUS_CACHE =
"Joining pipelines with conflicting `cache` values. Using `cache=false`. "
import Base.(|>)
const compose = (|>)
Pipeline(p::SomePipeline) = p
const FuzzyModel = Union{Model,Type{<:Model}}
function compose(m1::FuzzyModel, m2::FuzzyModel)
# no-ops for pipelines:
p1 = Pipeline(m1)
p2 = Pipeline(m2)
_components = (components(p1)..., components(p2)...)
_names = (names(p1)..., names(p2)...)
named_components = pipe_named_tuple(_names, _components)
# `cache` is only `true` if `true` for both pipelines:
cache = false
if p1.cache && p2.cache
cache = true
elseif p1.cache ⊻ p2.cache
@info INFO_AMBIGUOUS_CACHE
end
_pipeline(named_components, nothing, operation(p2), cache)
end
compose(p1, p2::FuzzyModel) = compose(Pipeline(p1), p2)
compose(p1::FuzzyModel, p2) = compose(p1, Pipeline(p2))
# # TRAINING LOSSES
# ## Helpers
function supervised_component_name(pipe::SupervisedPipeline)
idx = findfirst(model -> model isa Supervised, components(pipe))
return names(pipe)[idx]
end
function supervised_component(pipe::SupervisedPipeline)
name = supervised_component_name(pipe)
named_components = getfield(pipe, :named_components)
return getproperty(named_components, name)
end
# ## Traits
# We cannot provide the following traits at the level of types because
# the pipeline type does not know the precise type of the supervised
# component, only its `abstract_type`. See comment at top of page.
MMI.supports_training_losses(pipe::SupervisedPipeline) =
MMI.supports_training_losses(supervised_component(pipe))
MMI.reports_feature_importances(pipe::SupervisedPipeline) =
MMI.reports_feature_importances(supervised_component(pipe))
# This trait cannot be defined at the level of types (see previous comment):
function MMI.iteration_parameter(pipe::SupervisedPipeline)
model = supervised_component(pipe)
name = supervised_component_name(pipe)
MLJBase.prepend(name, iteration_parameter(model))
end
MMI.target_scitype(p::SupervisedPipeline) = target_scitype(supervised_component(p))
MMI.target_in_fit(p::SomePipeline) = any(target_in_fit, components(p))
MMI.package_name(::Type{<:SomePipeline}) = "MLJBase"
MMI.load_path(::Type{<:SomePipeline}) = "MLJBase.Pipeline"
MMI.constructor(::Type{<:SomePipeline}) = Pipeline
# ## Training losses
# If supervised model does not support training losses, we won't find an entry in the
# report and so we need to return `nothing` (and not throw an error).
function MMI.training_losses(pipe::SupervisedPipeline, pipe_report)
supervised = MLJBase.supervised_component(pipe)
supervised_name = MLJBase.supervised_component_name(pipe)
supervised_name in propertynames(pipe_report) || return nothing
report = getproperty(pipe_report, supervised_name)
return training_losses(supervised, report)
end
# ## Feature importances
function feature_importances(pipe::SupervisedPipeline, fitresult, report)
# locate the machine associated with the supervised component:
supervised_name = MLJBase.supervised_component_name(pipe)
predict_node = fitresult.interface.predict
mach = only(MLJBase.machines_given_model(predict_node)[supervised_name])
# To extract the feature_importances, we can't do `feature_importances(mach)` because
# `mach.model` is just a symbol; instead we do:
supervised = MLJBase.supervised_component(pipe)
return feature_importances(supervised, mach.fitresult, mach.report[:fit])
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 21451 | ############################################
################ Helpers ###################
############################################
function _glb(types...)
# If a lower bound is in the types then it is greatest
# else we just return Unknown for now
for type in types
all(type <: t_ for t_ in types) && return type
end
return Unknown
end
function input_target_scitypes(models, metalearner)
# The target scitype is defined as the greatest lower bound of the
# metalearner and the base models in the library
all_tg_scitypes = [target_scitype(m) for m in models]
tg_scitype = _glb(target_scitype(metalearner), all_tg_scitypes...)
# The input scitype is defined as the greatest lower bound of the
# base models in the library
inp_scitype = _glb([input_scitype(m) for m in models]...)
return inp_scitype, tg_scitype
end
############################################
################ Structures ################
############################################
const ERR_BAD_METALEARNER = ArgumentError(
"The metalearner should be a subtype "*
"of $(Union{Deterministic, Probabilistic})"
)
ERR_BAD_BASEMODEL(model) = ArgumentError(
"The base model $model is not supported as it appears to "*
"be a classifier predicting point values. Ordinarily, the "*
"the model must either be a "*
"probabilistic classifier (output of `predict` is a vector of "*
"`UnivariateFinite`) "*
"or a regressor (`target_scitype(model) <: "*
"AbstractVector{<:Union{Continuous,Missing}}`). "
)
const ERR_NO_METALEARNER = ArgumentError(
"No metalearner specified. Use Stack(metalearner=..., model1=..., model2=...)"
)
# checks `model` is either a probabilistic classifier or a regressor:
function check_valid_basemodel(model)
problem = prediction_type(model) === :deterministic &&
target_scitype(model) <: AbstractVector{<:Union{Finite,Missing}}
problem && throw(ERR_BAD_BASEMODEL(model))
return nothing
end
mutable struct DeterministicStack{
modelnames,
inp_scitype,
tg_scitype
} <: DeterministicNetworkComposite
models::Vector{Supervised}
metalearner::Deterministic
resampling
measures::Union{Nothing,AbstractVector}
cache::Bool
acceleration::AbstractResource
function DeterministicStack(
modelnames,
models,
metalearner,
resampling,
measures,
cache,
acceleration
)
map(models) do m
check_ismodel(m, spelling=true)
end
inp_scitype, tg_scitype = input_target_scitypes(models, metalearner)
return new{modelnames, inp_scitype, tg_scitype}(
models,
metalearner,
resampling,
measures,
cache,
acceleration
)
end
end
mutable struct ProbabilisticStack{
modelnames,
inp_scitype,
tg_scitype
} <: ProbabilisticNetworkComposite
models::Vector{Supervised}
metalearner::Probabilistic
resampling
measures::Union{Nothing,AbstractVector}
cache::Bool
acceleration::AbstractResource
function ProbabilisticStack(
modelnames,
models,
metalearner,
resampling,
measures,
cache,
acceleration
)
map(models) do m
check_ismodel(m, spelling=true)
end
inp_scitype, tg_scitype = input_target_scitypes(models, metalearner)
return new{modelnames, inp_scitype, tg_scitype}(
models,
metalearner,
resampling,
measures,
cache,
acceleration
)
end
end
const Stack{modelnames, inp_scitype, tg_scitype} = Union{
DeterministicStack{modelnames, inp_scitype, tg_scitype},
ProbabilisticStack{modelnames, inp_scitype, tg_scitype}
}
function Stack(
;metalearner=nothing,
resampling=CV(),
measure=nothing,
measures=measure,
cache=true,
acceleration=CPU1(),
named_models...
)
metalearner === nothing && throw(ERR_NO_METALEARNER)
nt = NamedTuple(named_models)
modelnames = keys(nt)
models = collect(nt)
if (measures !== nothing) && !(measures isa AbstractVector)
measures = [measures, ]
end
check_ismodel(metalearner)
if metalearner isa Deterministic
stack = DeterministicStack(
modelnames,
models,
metalearner,
resampling,
measures,
cache,
acceleration
)
elseif metalearner isa Probabilistic
stack = ProbabilisticStack(
modelnames,
models,
metalearner,
resampling,
measures,
cache,
acceleration,
)
else
throw(ERR_BAD_METALEARNER)
end
# Issuing clean! statement
message = MMI.clean!(stack)
isempty(message) || @warn message
# Warning if either input_scitype/target_scitype is
# Unknown at construction time
params = typeof(stack).parameters
params[end-1] == Unknown && @warn "Could not infer input_scitype of the stack"
params[end] == Unknown && @warn "Could not infer target_scitype of the stack"
return stack
end
function MMI.clean!(stack::Stack{modelnames, inp_scitype, tg_scitype}) where {
modelnames,
inp_scitype,
tg_scitype
}
# We only carry out checks and don't try to correct the arguments here
message = ""
# check basemodels:
basemodels = map(name -> getproperty(stack, name), modelnames)
check_valid_basemodel.(basemodels)
# Checking target_scitype and input_scitype have not been changed from the original
# stack:
glb_inp_scitype, glb_tg_scitype =
input_target_scitypes(getfield(stack, :models), stack.metalearner)
glb_inp_scitype == inp_scitype ||throw(DomainError(
inp_scitype,
"The newly inferred input_scitype of the stack doesn't "*
"match its original one. You have probably changed one of "*
"the base models or the metalearner to a non compatible type."
))
glb_tg_scitype == tg_scitype || throw(DomainError(
tg_scitype,
"The newly inferred target_scitype of the stack doesn't "*
"match its original one. You have probably changed one of "*
"the base model or the metalearner to a non compatible type."
))
# Checking the target scitype is consistent with either Probabilistic/Deterministic
# Stack:
target_scitype(stack.metalearner) <: Union{
AbstractArray{<:Union{Missing,Continuous}},
AbstractArray{<:Union{Missing,Finite}},
} || throw(ArgumentError(
"The metalearner should have target_scitype: "*
"$(Union{AbstractArray{<:Continuous}, AbstractArray{<:Finite}})"
))
return message
end
Base.propertynames(::Stack{modelnames}) where modelnames =
tuple(:metalearner, :resampling, :measures, :cache, :acceleration, modelnames...)
function Base.getproperty(stack::Stack{modelnames}, name::Symbol) where modelnames
name === :metalearner && return getfield(stack, :metalearner)
name === :resampling && return getfield(stack, :resampling)
name == :measures && return getfield(stack, :measures)
name === :cache && return getfield(stack, :cache)
name == :acceleration && return getfield(stack, :acceleration)
models = getfield(stack, :models)
for j in eachindex(modelnames)
name === modelnames[j] && return models[j]
end
error("type Stack has no property $name")
end
function Base.setproperty!(stack::Stack{modelnames}, _name::Symbol, val) where modelnames
_name === :metalearner && return setfield!(stack, :metalearner, val)
_name === :resampling && return setfield!(stack, :resampling, val)
_name === :measures && return setfield!(stack, :measures, val)
_name === :cache && return setfield!(stack, :cache, val)
_name === :acceleration && return setfield!(stack, :acceleration, val)
idx = findfirst(==(_name), modelnames)
idx isa Nothing || return getfield(stack, :models)[idx] = val
error("type Stack has no property $name")
end
# # TRAITS
MMI.target_scitype(::Type{<:Stack{modelnames, input_scitype, target_scitype}}) where
{modelnames, input_scitype, target_scitype} = target_scitype
MMI.input_scitype(::Type{<:Stack{modelnames, input_scitype, target_scitype}}) where
{modelnames, input_scitype, target_scitype} = input_scitype
MMI.constructor(::Type{<:Stack}) = Stack
MLJBase.load_path(::Type{<:Stack}) = "MLJBase.Stack"
MLJBase.package_name(::Type{<:Stack}) = "MLJBase"
MLJBase.package_uuid(::Type{<:Stack}) = "a7f614a8-145f-11e9-1d2a-a57a1082229d"
MLJBase.package_url(::Type{<:Stack}) = "https://github.com/JuliaAI/MLJBase.jl"
MLJBase.package_license(::Type{<:Stack}) = "MIT"
###########################################################
################# Node operations Methods #################
###########################################################
pre_judge_transform(
ŷ::Node,
::Type{<:Probabilistic},
::Type{<:AbstractArray{<:Union{Missing,Finite}}},
) = node(ŷ -> pdf(ŷ, levels(first(ŷ))), ŷ)
pre_judge_transform(
ŷ::Node,
::Type{<:Probabilistic},
::Type{<:AbstractArray{<:Union{Missing,Continuous}}},
) = node(ŷ->mean.(ŷ), ŷ)
pre_judge_transform(
ŷ::Node,
::Type{<:Deterministic},
::Type{<:AbstractArray{<:Union{Missing,Continuous}}},
) = ŷ
store_for_evaluation(
mach::Machine,
Xtest::AbstractNode,
ytest::AbstractNode,
measures::Nothing,
) = nothing
store_for_evaluation(
mach::Machine,
Xtest::AbstractNode,
ytest::AbstractNode,
measures,
) = node((ytest, Xtest) -> [mach, Xtest, ytest], ytest, Xtest)
"""
internal_stack_report(
m::Stack,
verbosity::Int,
y::AbstractNode,
folds_evaluations::Vararg{Nothing},
)
When measure/measures is a Nothing, the folds_evaluation won't have been filled by
`store_for_evaluation` and we thus return an empty NamedTuple.
"""
internal_stack_report(
m::Stack,
verbosity::Int,
tt_pairs,
folds_evaluations::Vararg{Nothing},
) = NamedTuple{}()
"""
internal_stack_report(
m::Stack,
verbosity::Int,
y::AbstractNode,
folds_evaluations::Vararg{AbstractNode},
)
When measure/measures is provided, the folds_evaluation will have been filled by
`store_for_evaluation`. This function is not doing any heavy work (not constructing nodes
corresponding to measures) but just unpacking all the folds_evaluations in a single node
that can be evaluated later.
"""
function internal_stack_report(
m::Stack,
verbosity::Int,
tt_pairs,
folds_evaluations::Vararg{AbstractNode}
)
_internal_stack_report(folds_evaluations...) =
internal_stack_report(m, verbosity, tt_pairs, folds_evaluations...)
return (report=(cv_report=node(_internal_stack_report, folds_evaluations...),),)
end
"""
internal_stack_report(
stack::Stack{modelnames,},
verbosity::Int,
y,
folds_evaluations...
) where modelnames
Returns a `NamedTuple` of `PerformanceEvaluation` objects, one for each model. The
folds_evaluations are built in a flatten array respecting the order given by:
(fold_1:(model_1:[mach, Xtest, ytest], model_2:[mach, Xtest, ytest], ...), fold_2:(model_1,
model_2, ...), ...)
"""
function internal_stack_report(
stack::Stack{modelnames,},
verbosity::Int,
tt_pairs, # train_test_pairs
folds_evaluations...
) where modelnames
n_measures = length(stack.measures)
nfolds = length(tt_pairs)
test_fold_sizes = map(tt_pairs) do train_test_pair
test = last(train_test_pair)
length(test)
end
# weights to be used to aggregate per-fold measurements (averaging to 1):
fold_weights(mode) = nfolds .* test_fold_sizes ./ sum(test_fold_sizes)
fold_weights(::StatisticalMeasuresBase.Sum) = nothing
# For each model we record the results mimicking the fields of PerformanceEvaluation
results = NamedTuple{modelnames}(
[(
model = model,
measure = stack.measures,
measurement = Vector{Any}(undef, n_measures),
operation = _actual_operations(nothing, stack.measures, model, verbosity),
per_fold = [Vector{Any}(undef, nfolds) for _ in 1:n_measures],
per_observation = [Vector{Vector{Any}}(undef, nfolds) for _ in 1:n_measures],
fitted_params_per_fold = [],
report_per_fold = [],
train_test_pairs = tt_pairs,
resampling = stack.resampling,
repeats = 1
)
for model in getfield(stack, :models)
]
)
# Update the results
index = 1
for foldid in 1:nfolds
for modelname in modelnames
model_results = results[modelname]
mach, Xtest, ytest = folds_evaluations[index]
# Update report and fitted_params per fold
push!(model_results.fitted_params_per_fold, fitted_params(mach))
push!(model_results.report_per_fold, report(mach))
# Loop over measures to update per_observation and per_fold
for (i, (measure, operation)) in enumerate(zip(
stack.measures,
model_results.operation,
))
ypred = operation(mach, Xtest)
measurements = StatisticalMeasuresBase.measurements(measure, ypred, ytest)
# Update per observation:
model_results.per_observation[i][foldid] = measurements
# Update per_fold
model_results.per_fold[i][foldid] = measure(ypred, ytest)
end
index += 1
end
end
# Update measurement field by aggregating per-fold measurements
for modelname in modelnames
for (i, measure) in enumerate(stack.measures)
model_results = results[modelname]
mode = StatisticalMeasuresBase.external_aggregation_mode(measure)
model_results.measurement[i] =
StatisticalMeasuresBase.aggregate(
model_results.per_fold[i];
mode,
weights=fold_weights(mode),
)
end
end
return NamedTuple{modelnames}([PerformanceEvaluation(r...) for r in results])
end
check_stack_measures(stack, verbosity::Int, measures::Nothing, y) = nothing
"""
check_stack_measures(stack, measures, y)
Check the measures compatibility for each model in the Stack.
"""
function check_stack_measures(stack, verbosity::Int, measures, y)
for model in getfield(stack, :models)
operations = _actual_operations(nothing, measures, model, verbosity)
_check_measures(measures, operations, model, y)
end
end
"""
oos_set(m::Stack, folds::AbstractNode, Xs::Source, ys::Source)
This function is building the out-of-sample dataset that is later used by the `judge` for
its own training. It also returns the folds_evaluations object if internal cross-validation
results are requested.
"""
function oos_set(m::Stack{modelnames}, Xs::Source, ys::Source, tt_pairs) where modelnames
Zval = []
yval = []
folds_evaluations = []
# Loop over the cross validation folds to build a training set for the metalearner.
for (training_rows, test_rows) in tt_pairs
Xtrain = selectrows(Xs, training_rows)
ytrain = selectrows(ys, training_rows)
Xtest = selectrows(Xs, test_rows)
ytest = selectrows(ys, test_rows)
# Train each model on the train fold and predict on the validation fold
# predictions are subsequently used as an input to the metalearner
Zfold = []
for symbolic_model in modelnames
model = getproperty(m, symbolic_model)
mach = machine(symbolic_model, Xtrain, ytrain, cache=m.cache)
ypred = predict(mach, Xtest)
# Internal evaluation on the fold if required
push!(folds_evaluations, store_for_evaluation(mach, Xtest, ytest, m.measures))
# Dispatch the computation of the expected mean based on
# the model type and target_scytype
ypred = pre_judge_transform(ypred, typeof(model), target_scitype(model))
push!(Zfold, ypred)
end
Zfold = hcat(Zfold...)
push!(Zval, Zfold)
push!(yval, ytest)
end
Zval = MLJBase.table(vcat(Zval...))
yval = vcat(yval...)
Zval, yval, folds_evaluations
end
#######################################
################# Prefit ##############
#######################################
function prefit(m::Stack{modelnames}, verbosity::Int, X, y) where modelnames
check_stack_measures(m, verbosity, m.measures, y)
tt_pairs = train_test_pairs(m.resampling, 1:nrows(y), X, y)
Xs = source(X)
ys = source(y)
Zval, yval, folds_evaluations = oos_set(m, Xs, ys, tt_pairs)
metamach = machine(:metalearner, Zval, yval, cache=m.cache)
# Each model is retrained on the original full training set
Zpred = []
for symbolic_model in modelnames
model = getproperty(m, symbolic_model)
mach = machine(symbolic_model, Xs, ys, cache=m.cache)
ypred = predict(mach, Xs)
ypred = pre_judge_transform(ypred, typeof(model), target_scitype(model))
push!(Zpred, ypred)
end
Zpred = MLJBase.table(hcat(Zpred...))
ŷ = predict(metamach, Zpred)
internal_report = internal_stack_report(m, verbosity, tt_pairs, folds_evaluations...)
# return learning network interface:
(;
predict = ŷ,
acceleration=m.acceleration,
internal_report..., # `internal_report` has form `(; report=(; cv_report=some_node))`
)
end
# # DOC STRINGS
const DOC_STACK =
"""
Stack(; metalearner=nothing, name1=model1, name2=model2, ..., keyword_options...)
Implements the two-layer generalized stack algorithm introduced by
[Wolpert (1992)](https://www.sciencedirect.com/science/article/abs/pii/S0893608005800231)
and generalized by [Van der Laan et al
(2007)](https://biostats.bepress.com/ucbbiostat/paper222/). Returns an
instance of type `ProbabilisticStack` or `DeterministicStack`,
depending on the prediction type of `metalearner`.
When training a machine bound to such an instance:
- The data is split into training/validation sets according to the
specified `resampling` strategy.
- Each base model `model1`, `model2`, ... is trained on each training
subset and outputs predictions on the corresponding validation
sets. The multi-fold predictions are spliced together into a
so-called out-of-sample prediction for each model.
- The adjudicating model, `metalearner`, is subsequently trained on
the out-of-sample predictions to learn the best combination of base
model predictions.
- Each base model is retrained on all supplied data for purposes of
passing on new production data onto the adjudicator for making new
predictions
### Arguments
- `metalearner::Supervised`: The model that will optimize the desired
criterion based on its internals. For instance, a LinearRegression
model will optimize the squared error.
- `resampling`: The resampling strategy used
to prepare out-of-sample predictions of the base learners.
- `measures`: A measure or iterable over measures, to perform an internal
evaluation of the learners in the Stack while training. This is not for the
evaluation of the Stack itself.
- `cache`: Whether machines created in the learning network will cache data or not.
- `acceleration`: A supported `AbstractResource` to define the training parallelization
mode of the stack.
- `name1=model1, name2=model2, ...`: the `Supervised` model instances
to be used as base learners. The provided names become properties
of the instance created to allow hyper-parameter access
### Example
The following code defines a `DeterministicStack` instance for
learning a `Continuous` target, and demonstrates that:
- Base models can be `Probabilistic` models even if the stack
itself is `Deterministic` (`predict_mean` is applied in such cases).
- As an alternative to hyperparameter optimization, one can stack
multiple copies of given model, mutating the hyper-parameter used in
each copy.
```julia
using MLJ
DecisionTreeRegressor = @load DecisionTreeRegressor pkg=DecisionTree
EvoTreeRegressor = @load EvoTreeRegressor
XGBoostRegressor = @load XGBoostRegressor
KNNRegressor = @load KNNRegressor pkg=NearestNeighborModels
LinearRegressor = @load LinearRegressor pkg=MLJLinearModels
X, y = make_regression(500, 5)
stack = Stack(;metalearner=LinearRegressor(),
resampling=CV(),
measures=rmse,
constant=ConstantRegressor(),
tree_2=DecisionTreeRegressor(max_depth=2),
tree_3=DecisionTreeRegressor(max_depth=3),
evo=EvoTreeRegressor(),
knn=KNNRegressor(),
xgb=XGBoostRegressor())
mach = machine(stack, X, y)
evaluate!(mach; resampling=Holdout(), measure=rmse)
```
The internal evaluation report can be accessed like this
and provides a PerformanceEvaluation object for each model:
```julia
report(mach).cv_report
```
"""
@doc DOC_STACK Stack
@doc DOC_STACK ProbabilisticStack
@doc DOC_STACK DeterministicStack
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 9008 | # wrapper `TransformedTargetModel`
# "TT" is shorthand for "TransformedTargetModel"
# # TYPES
const TT_SUPPORTED_ATOMS = (
:Probabilistic,
:ProbabilisticUnsupervisedDetector,
:ProbabilisticSupervisedDetector,
:Deterministic,
:DeterministicUnsupervisedDetector,
:DeterministicSupervisedDetector,
:Interval,
)
# Each supported atomic type gets its own wrapper:
const TT_TYPE_GIVEN_ATOM =
Dict(atom =>
Symbol("TransformedTargetModel$atom") for atom in TT_SUPPORTED_ATOMS)
# ...which must have appropriate supertype:
const TT_SUPER_GIVEN_ATOM =
Dict(atom =>
Symbol("$(atom)NetworkComposite") for atom in TT_SUPPORTED_ATOMS)
# The type definitions:
for From in TT_SUPPORTED_ATOMS
New = TT_TYPE_GIVEN_ATOM[From]
To = TT_SUPER_GIVEN_ATOM[From]
ex = quote
mutable struct $New{M <: $From} <: $To
model::M
transformer # Unsupervised or callable
inverse # callable or `nothing`
cache
end
end
eval(ex)
end
# dict whose keys and values are now types instead of symbols:
const tt_type_given_atom = Dict()
for atom in TT_SUPPORTED_ATOMS
atom_str = string(atom)
type = TT_TYPE_GIVEN_ATOM[atom]
@eval(tt_type_given_atom[$atom] = $type)
end
# not exported:
const TT_TYPES = values(tt_type_given_atom)
const TT_TYPE_EXS = values(TT_TYPE_GIVEN_ATOM)
const SomeTT = Union{TT_TYPES...}
const TTSupported = Union{keys(tt_type_given_atom)...}
# # CONSTRUCTOR
const ERR_MODEL_UNSPECIFIED = ArgumentError(
"Expecting atomic model as argument. None specified. "
)
const ERR_TRANSFORMER_UNSPECIFIED = ArgumentError(
"You must specify `transformer=...`. ."
)
const ERR_TOO_MANY_ARGUMENTS = ArgumentError(
"At most one non-keyword argument, a model, allowed. "
)
const PRETTY_TT_SUPPORT_OPTIONS =
join([string("`", opt, "`") for opt in TT_SUPPORTED_ATOMS],
", ",
", and ")
const err_tt_unsupported(model) = ArgumentError(
"Only these model supertypes support wrapping as in "*
"`TransformedTarget`: $PRETTY_TT_SUPPORT_OPTIONS.\n"*
"Model provided has type `$(typeof(model))`. "
)
const WARN_IDENTITY_INVERSE =
"Model being wrapped is not a deterministic predictor. "*
"Setting `inverse=identity` to suppress inverse transformations "*
"of predictions. "
const WARN_MISSING_INVERSE =
"Specified `transformer` is not a model instance or type "*
"and so is assumed callable (eg, is a function). "*
"I am setting `inverse=identity` as no `inverse` specified. This means "*
"predictions of the (semi)supervised model will be "*
"returned on a scale different from the training target. "
const WARN_TARGET_DEPRECATED =
"`TransformedTargetModel(target=...)` is deprecated in favor of "*
"`TransformedTargetModel(transformer=...)`. "
"""
TransformedTargetModel(model; transformer=nothing, inverse=nothing, cache=true)
Wrap the supervised or semi-supervised `model` in a transformation of
the target variable.
Here `transformer` one of the following:
- The `Unsupervised` model that is to transform the training target.
By default (`inverse=nothing`) the parameters learned by this
transformer are also used to inverse-transform the predictions of
`model`, which means `transformer` must implement the `inverse_transform`
method. If this is not the case, specify `inverse=identity` to
suppress inversion.
- A callable object for transforming the target, such as `y ->
log.(y)`. In this case a callable `inverse`, such as `z -> exp.(z)`,
should be specified.
Specify `cache=false` to prioritize memory over speed, or to guarantee data
anonymity.
Specify `inverse=identity` if `model` is a probabilistic predictor, as
inverse-transforming sample spaces is not supported. Alternatively,
replace `model` with a deterministic model, such as `Pipeline(model,
y -> mode.(y))`.
### Examples
A model that normalizes the target before applying ridge regression,
with predictions returned on the original scale:
```julia
@load RidgeRegressor pkg=MLJLinearModels
model = RidgeRegressor()
tmodel = TransformedTargetModel(model, transformer=Standardizer())
```
A model that applies a static `log` transformation to the data, again
returning predictions to the original scale:
```julia
tmodel2 = TransformedTargetModel(model, transformer=y->log.(y), inverse=z->exp.(y))
```
"""
function TransformedTargetModel(
args...;
model=nothing,
transformer=nothing,
inverse=nothing,
cache=true,
)
length(args) < 2 || throw(ERR_TOO_MANY_ARGUMENTS)
if length(args) === 1
atom = first(args)
model === nothing ||
@warn "Using `model=$atom`. Ignoring specification `model=$model`. "
else
model === nothing && throw(ERR_MODEL_UNSPECIFIED)
atom = model
end
atom isa TTSupported || throw(err_tt_unsupported(atom))
transformer === nothing && throw(ERR_TRANSFORMER_UNSPECIFIED)
metamodel =
tt_type_given_atom[MMI.abstract_type(atom)](atom,
transformer,
inverse,
cache)
message = clean!(metamodel)
isempty(message) || @warn message
return metamodel
end
_is_model_type(m) = m isa Type && m <: Model
function clean!(model::SomeTT)
message = ""
if _is_model_type(model.transformer)
model.transformer = model.transformer()
end
if prediction_type(model.model) !== :deterministic &&
model.inverse != identity
model.inverse = identity
message *= WARN_IDENTITY_INVERSE
end
if !(model.transformer isa Model) &&
!_is_model_type(model.transformer) && model.inverse === nothing
model.inverse = identity
message *= WARN_MISSING_INVERSE
end
return message
end
# # PREFIT METHOD
function prefit(model::SomeTT, verbosity, X, y, other...)
transformer = model.transformer
inverse = model.inverse
atom = model.model
cache = model.cache
Xs = source(X)
ys = source(y)
others = source.(other)
if transformer isa Model
if transformer isa Static
unsupervised_mach = machine(:transformer, cache=cache)
else
unsupervised_mach = machine(:transformer, ys, cache=cache)
end
z = transform(unsupervised_mach, ys)
else
z = node(transformer, ys)
end
supervised_mach = machine(:model, Xs, z, cache=cache)
zhat = predict(supervised_mach, Xs)
yhat = if transformer isa Model && inverse != identity
inverse_transform(unsupervised_mach, zhat)
else
node(inverse, zhat)
end
# in case the atomic model implements `transform`:
W = transform(supervised_mach, Xs)
# learning network interface:
(predict=yhat, transform=W)
end
# # TRAINING LOSSES
const ERR_TT_MISSING_REPORT =
"Cannot find report for `TransformedTargetModel` atomic model, from which "*
"to extract training losses. "
function MMI.training_losses(composite::SomeTT, tt_report)
hasproperty(tt_report, :model) || throw(ERR_TT_MISSING_REPORT)
atomic_report = getproperty(tt_report, :model)
return training_losses(composite.model, atomic_report)
end
# # FEATURE IMPORTANCES
function MMI.feature_importances(composite::SomeTT, fitresult, report)
# locate the machine associated with the supervised component:
predict_node = fitresult.interface.predict
mach = only(MLJBase.machines_given_model(predict_node)[:model])
# To extract the feature_importances, we can't do `feature_importances(mach)` because
# `mach.model` is just a symbol; instead we do:
return feature_importances(composite.model, mach.fitresult, mach.report[:fit])
end
## MODEL TRAITS
MMI.package_name(::Type{<:SomeTT}) = "MLJBase"
MMI.package_license(::Type{<:SomeTT}) = "MIT"
MMI.package_uuid(::Type{<:SomeTT}) = "a7f614a8-145f-11e9-1d2a-a57a1082229d"
MMI.is_wrapper(::Type{<:SomeTT}) = true
MMI.package_url(::Type{<:SomeTT}) = "https://github.com/JuliaAI/MLJBase.jl"
MMI.load_path(::Type{<:SomeTT}) = "MLJBase.TransformedTargetModel"
MMI.constructor(::Type{<:SomeTT}) = TransformedTargetModel
for New in TT_TYPE_EXS
quote
MMI.iteration_parameter(::Type{<:$New{M}}) where M =
MLJBase.prepend(:model, iteration_parameter(M))
end |> eval
for trait in [
:input_scitype,
:output_scitype,
:target_scitype,
:fit_data_scitype,
:predict_scitype,
:transform_scitype,
:inverse_transform_scitype,
:is_pure_julia,
:supports_weights,
:supports_class_weights,
:supports_online,
:supports_training_losses,
:reports_feature_importances,
:is_supervised,
:prediction_type
]
quote
MMI.$trait(::Type{<:$New{M}}) where M = MMI.$trait(M)
end |> eval
end
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 13680 | # SPLITTING DATA SETS
# Helper function for partitioning in the non-stratified case
function _partition(rows, fractions, ::Nothing)
# container for the row selections (head:tail)
n_splits = length(fractions) + 1
heads = zeros(Int, n_splits)
tails = zeros(Int, n_splits)
n_rows = length(rows)
head = 1
for (i, p) in enumerate(fractions)
n = round(Int, p * n_rows)
iszero(n) && (@warn "A split has only one element."; n = 1)
# update tail
tail = head + n - 1
# store
heads[i] = head
tails[i] = tail
# update head
head = tail + 1
end
if head > n_rows
@warn "Last vector in the split has only one element."
head = n_rows
end
heads[end] = head
tails[end] = n_rows
return tuple((rows[h:t] for (h, t) in zip(heads, tails))...)
end
_make_numerical(v::AbstractVector) =
throw(ArgumentError("`stratify` must have `Count`, `Continuous` "*
"or `Finite` element scitpye. Consider "*
"`coerce(stratify, Finite)`. "))
_make_numerical(v::AbstractVector{<:Union{Missing,Real}}) = v
_make_numerical(v::AbstractVector{<:Union{Missing,CategoricalValue}}) =
int.(v)
# Helper function for partitioning in the stratified case
function _partition(rows, fractions, raw_stratify::AbstractVector)
stratify = _make_numerical(raw_stratify)
length(stratify) == length(rows) ||
throw(ArgumentError("The stratification vector must "*
"have as many entries as " *
"the rows to partition."))
uv = unique(stratify)
# construct table (n_classes * idx_of_that_class)
# NOTE use of '===' is important to handle missing.
idxs = [[i for i in rows if stratify[rows[i]] === v] for v in uv]
# number of occurences of each class and proportions
nidxs = length.(idxs)
props = length.(idxs) ./ length(rows)
n_splits = length(fractions) + 1
n_rows = length(rows)
ns_props = round.(Int, n_rows * fractions * props')
ns_props = vcat(ns_props, nidxs' .- sum(ns_props, dims=1))
# warn if anything is >= 1
if !all(e -> e > 1, ns_props)
@warn "Some splits have a single or no representative of some class."
end
# container for the rows
split_rows = []
heads = ones(Int, length(uv))
for r in 1:size(ns_props, 1)
tails = heads .+ ns_props[r, :] .- 1
# take chunks of the indices corresponding to the current fraction
indices = vcat((idxs[i][heads[i]:tails[i]] for i in eachindex(uv))...)
# rearrange by order of appearance
indices = sort(indices)
push!(split_rows, rows[indices])
heads .= tails .+ 1
end
if !all(sl -> sl > 1, length.(split_rows))
@warn "Some splits have a single or no representative of some class."
end
return tuple(split_rows...)
end
const ERR_PARTITION_UNSUPPORTED = ArgumentError(
"Function `partition` only supports "*
"AbstractVector, AbstractMatrix or containers implementing the "*
"Tables interface.")
const ERR_PARTITION_DIMENSION_MISMATCH = DimensionMismatch(
"Expected a tuple of objects with a common length. ")
__nrows(X) = Tables.istable(X) ? nrows(X) : throw(ERR_PARTITION_UNSUPPORTED)
__nrows(X::Union{AbstractMatrix,AbstractVector}) = nrows(X)
"""
partition(X, fractions...;
shuffle=nothing,
rng=Random.GLOBAL_RNG,
stratify=nothing,
multi=false)
Splits the vector, matrix or table `X` into a tuple of objects of the
same type, whose vertical concatenation is `X`. The number of rows in
each component of the return value is determined by the
corresponding `fractions` of `length(nrows(X))`, where valid fractions
are floats between 0 and 1 whose sum is less than one. The last
fraction is not provided, as it is inferred from the preceding ones.
For synchronized partitioning of multiple objects, use the
`multi=true` option.
```julia-repl
julia> partition(1:1000, 0.8)
([1,...,800], [801,...,1000])
julia> partition(1:1000, 0.2, 0.7)
([1,...,200], [201,...,900], [901,...,1000])
julia> partition(reshape(1:10, 5, 2), 0.2, 0.4)
([1 6], [2 7; 3 8], [4 9; 5 10])
julia> X, y = make_blobs() # a table and vector
julia> Xtrain, Xtest = partition(X, 0.8, stratify=y)
```
Here's an example of synchronized partitioning of multiple objects:
```julia-repl
julia> (Xtrain, Xtest), (ytrain, ytest) = partition((X, y), 0.8, rng=123, multi=true)
```
## Keywords
* `shuffle=nothing`: if set to `true`, shuffles the rows before taking
fractions.
* `rng=Random.GLOBAL_RNG`: specifies the random number generator to be
used, can be an integer seed. If specified, and `shuffle ===
nothing` is interpreted as true.
* `stratify=nothing`: if a vector is specified, the partition will
match the stratification of the given vector. In that case,
`shuffle` cannot be `false`.
* `multi=false`: if `true` then `X` is expected to be a `tuple` of
objects sharing a common length, which are each partitioned
separately using the same specified `fractions` *and* the same row
shuffling. Returns a tuple of partitions (a tuple of tuples).
"""
function partition(X, fractions::Real...;
shuffle::Union{Nothing,Bool}=nothing,
rng=Random.GLOBAL_RNG,
stratify::Union{Nothing,AbstractVector}=nothing,
multi=false)
# check the fractions
if !all(e -> 0 < e < 1, fractions) || sum(fractions) >= 1
throw(DomainError(fractions,
"Fractions must be in (0, 1) with sum < 1."))
end
# determinen `n_rows`:
if X isa Tuple && multi
isempty(X) && return tuple(fill((), length(fractions) + 1)...)
x = first(X)
n_rows = __nrows(x)
all(X[2:end]) do x
nrows(x) === n_rows
end || throw(ERR_PARTITION_DIMENSION_MISMATCH)
else
n_rows = __nrows(X)
end
# check the rng & adjust shuffling
if rng isa Integer
rng = MersenneTwister(rng)
end
if rng != Random.GLOBAL_RNG && shuffle === nothing
shuffle = true
end
rows = collect(1:n_rows)
shuffle !== nothing && shuffle && shuffle!(rng, rows)
# determine the partition of `rows`:
row_partition = _partition(rows, collect(fractions), stratify)
return _partition(X, row_partition)
end
function _partition(X, row_partition)
# _X = Tables.istable(X) ? X : collect(X)
return tuple((selectrows(X, p) for p in row_partition)...)
end
_partition(X::Tuple, row_partition) =
map(x->_partition(x, row_partition), X)
# # UNPACK
"""
unpack(table, f1, f2, ... fk;
wrap_singles=false,
shuffle=false,
rng::Union{AbstractRNG,Int,Nothing}=nothing,
coerce_options...)
Horizontally split any Tables.jl compatible `table` into smaller
tables or vectors by making column selections determined by the
predicates `f1`, `f2`, ..., `fk`. Selection from the column names is
without replacement. A *predicate* is any object `f` such that
`f(name)` is `true` or `false` for each column `name::Symbol` of
`table`.
Returns a tuple of tables/vectors with length one greater than the
number of supplied predicates, with the last component including all
previously unselected columns.
```julia-repl
julia> table = DataFrame(x=[1,2], y=['a', 'b'], z=[10.0, 20.0], w=["A", "B"])
2×4 DataFrame
Row │ x y z w
│ Int64 Char Float64 String
─────┼──────────────────────────────
1 │ 1 a 10.0 A
2 │ 2 b 20.0 B
julia> Z, XY, W = unpack(table, ==(:z), !=(:w));
julia> Z
2-element Vector{Float64}:
10.0
20.0
julia> XY
2×2 DataFrame
Row │ x y
│ Int64 Char
─────┼─────────────
1 │ 1 a
2 │ 2 b
julia> W # the column(s) left over
2-element Vector{String}:
"A"
"B"
```
Whenever a returned table contains a single column, it is converted to
a vector unless `wrap_singles=true`.
If `coerce_options` are specified then `table` is first replaced
with `coerce(table, coerce_options)`. See
[`ScientificTypes.coerce`](@ref) for details.
If `shuffle=true` then the rows of `table` are first shuffled, using
the global RNG, unless `rng` is specified; if `rng` is an integer, it
specifies the seed of an automatically generated Mersenne twister. If
`rng` is specified then `shuffle=true` is implicit.
"""
function unpack(X, predicates...;
wrap_singles=false,
shuffle=nothing,
rng=nothing, pairs...)
# add a final predicate to unpack all remaining columns into to
# the last return value:
predicates = (predicates..., _ -> true)
shuffle, rng = shuffle_and_rng(shuffle, rng)
shuffle && (X = selectrows(X, Random.shuffle(rng, 1:nrows(X))))
if isempty(pairs)
Xfixed = X
else
Xfixed = coerce(X, pairs...)
end
unpacked = Any[]
names_left = schema(Xfixed).names |> collect
for c in predicates
names = filter(c, names_left)
filter!(!in(names), names_left)
length(names) == 1 && !wrap_singles && (names = names[1])
push!(unpacked, selectcols(Xfixed, names))
end
return Tuple(unpacked)
end
## RESTRICTING TO A FOLD
struct FoldRestrictor{i,N}
f::NTuple{N,Vector{Int}}
end
(r::FoldRestrictor{i})(X) where i = selectrows(X, (r.f)[i])
"""
restrict(X, folds, i)
The restriction of `X`, a vector, matrix or table, to the `i`th fold
of `folds`, where `folds` is a tuple of vectors of row indices.
The method is curried, so that `restrict(folds, i)` is the operator
on data defined by `restrict(folds, i)(X) = restrict(X, folds, i)`.
### Example
#
```julia
folds = ([1, 2], [3, 4, 5], [6,])
restrict([:x1, :x2, :x3, :x4, :x5, :x6], folds, 2) # [:x3, :x4, :x5]
```
See also [`corestrict`](@ref)
"""
restrict(f::NTuple{N}, i) where N = FoldRestrictor{i,N}(f)
restrict(X, f, i) = restrict(f, i)(X)
## RESTRICTING TO A FOLD COMPLEMENT
"""
complement(folds, i)
The complement of the `i`th fold of `folds` in the concatenation of
all elements of `folds`. Here `folds` is a vector or tuple of integer
vectors, typically representing row indices or a vector, matrix or
table.
```julia
complement(([1,2], [3,], [4, 5]), 2) # [1 ,2, 4, 5]
```
"""
complement(f, i) = reduce(vcat, collect(f)[Not(i)])
struct FoldComplementRestrictor{i,N}
f::NTuple{N,Vector{Int}}
end
(r::FoldComplementRestrictor{i})(X) where i =
selectrows(X, complement(r.f, i))
"""
corestrict(X, folds, i)
The restriction of `X`, a vector, matrix or table, to the *complement*
of the `i`th fold of `folds`, where `folds` is a tuple of vectors of
row indices.
The method is curried, so that `corestrict(folds, i)` is the operator
on data defined by `corestrict(folds, i)(X) = corestrict(X, folds, i)`.
### Example
```julia
folds = ([1, 2], [3, 4, 5], [6,])
corestrict([:x1, :x2, :x3, :x4, :x5, :x6], folds, 2) # [:x1, :x2, :x6]
```
"""
corestrict(f::NTuple{N}, i) where N = FoldComplementRestrictor{i,N}(f)
corestrict(X, f, i) = corestrict(f, i)(X)
## to be replaced (not used anywhere):
## ACCESSORS FOR JULIA NDSPARSE ARRAYS (N=2)
# nrows(::Val{:sparse}, X) = maximum([r[1] for r in keys(X)])
# function select(::Val{:sparse}, X, r::Integer, c::Symbol)
# try
# X[r,c][1]
# catch exception
# exception isa KeyError || throw(exception)
# missing
# end
# end
# select(::Val{:sparse}, X, r::AbstractVector{<:Integer}, c::Symbol) = [select(X, s, c) for s in r]
# select(::Val{:sparse}, X, ::Colon, c::Symbol) = [select(X, s, c) for s in 1:nrows(X)]
# selectrows(::Val{:sparse}, X, r::Integer) = X[r:r,:]
# selectrows(::Val{:sparse}, X, r) = X[r,:]
# selectcols(::Val{:sparse}, X, c::Symbol) = select(X, :, c)
# selectcols(::Val{:sparse}, X, c::AbstractVector{Symbol}) = X[:,sort(c)]
# selectcols(::Val{:sparse}, X, ::Colon) = X
# select(::Val{:sparse}, X, r::Integer, c::AbstractVector{Symbol}) = X[r,sort(c)]
# select(::Val{:sparse}, X, r::Integer, ::Colon) = X[r,:]
# select(::Val{:sparse}, X, r, c) = X[r,sort(c)]
## TRANSFORMING BETWEEN CATEGORICAL ELEMENTS AND RAW VALUES
MLJModelInterface.transform(
e::Union{CategoricalArray,CategoricalValue,CategoricalPool},
arg) = CategoricalDistributions.transform(e, arg)
## SKIPPING MISSING AND NAN: skipinvalid
_isnan(x) = false
_isnan(x::Number) = isnan(x)
skipnan(x) = Iterators.filter(!_isnan, x)
isinvalid(x) = ismissing(x) || _isnan(x)
"""
skipinvalid(itr)
Return an iterator over the elements in `itr` skipping `missing` and
`NaN` values. Behaviour is similar to [`skipmissing`](@ref).
"""
skipinvalid(v) = v |> skipmissing |> skipnan
"""
skipinvalid(A, B)
For vectors `A` and `B` of the same length, return a tuple of vectors
`(A[mask], B[mask])` where `mask[i]` is `true` if and only if `A[i]`
and `B[i]` are both valid (non-`missing` and non-`NaN`). Can also
called on other iterators of matching length, such as arrays, but
always returns a vector. Does not remove `Missing` from the element
types if present in the original iterators.
"""
function skipinvalid(yhat, y)
mask = .!(isinvalid.(yhat) .| isinvalid.(y))
return yhat[mask], y[mask]
end
# TODO: refactor balanced accuracy to get rid of these:
function _skipinvalid(yhat, y, w::Arr)
mask = .!(isinvalid.(yhat) .| isinvalid.(y))
return yhat[mask], y[mask], w[mask]
end
function _skipinvalid(yhat, y, w::Union{Nothing,AbstractDict})
mask = .!(isinvalid.(yhat) .| isinvalid.(y))
return yhat[mask], y[mask], w
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 8147 | # see also the macro versions in datasets.jl
# -------------------------------------------------------
# To add a new dataset assuming it has a header and is, at path
# `data/newdataset.csv`
#
# 1. start by loading it with CSV:
#
# fpath = joinpath("datadir", "newdataset.csv")
# data = CSV.read(fpath, copycols=true,
# categorical=true)
#
# 2. load it with DelimitedFiles and Tables
#
# data_raw, data_header = readdlm(fpath, ',', header=true)
# data_table = Tables.table(data_raw; header=Symbol.(vec(data_header)))
#
# 3. retrieve the conversions:
#
# for (n, st) in zip(names(data), scitype_union.(eachcol(data)))
# println(":$n=>$st,")
# end
#
# 4. copy and paste the result in a coerce
#
# data_table = coerce(data_table, ...)
#
# -------------------------------------------------------
const DATA_DIR = joinpath(MODULE_DIR, "..", "data")
const COERCE_BOSTON = (:Chas => Count,)
const COERCE_REDUCED_AMES = (
:target => Continuous,
:OverallQual => OrderedFactor,
:GrLivArea => Continuous,
:Neighborhood => Multiclass,
:x1stFlrSF => Continuous,
:TotalBsmtSF => Continuous,
:BsmtFinSF1 => Continuous,
:LotArea => Continuous,
:GarageCars => Count,
:MSSubClass => Multiclass,
:GarageArea => Continuous,
:YearRemodAdd => Count,
:YearBuilt => Count)
const COERCE_AMES = (
:Id => Count,
:MSSubClass => Multiclass,
:MSZoning => Multiclass,
:LotFrontage => Continuous,
:LotArea => Continuous,
:Street => Multiclass,
:LotShape => Multiclass,
:LandContour => Multiclass,
:LotConfig => Multiclass,
:LandSlope => OrderedFactor,
:Neighborhood => Multiclass,
:Condition1 => Multiclass,
:Condition2 => Multiclass,
:BldgType => Multiclass,
:HouseStyle => Multiclass,
:OverallQual => OrderedFactor,
:OverallCond => OrderedFactor,
:YearBuilt => Count,
:YearRemodAdd => Count,
:RoofStyle => Multiclass,
:RoofMatl => Multiclass,
:Exterior1st => Multiclass,
:Exterior2nd => Multiclass,
:MasVnrType => Multiclass,
:MasVnrArea => Continuous,
:ExterQual => OrderedFactor,
:ExterCond => OrderedFactor,
:Foundation => Multiclass,
:BsmtQual => OrderedFactor,
:BsmtCond => OrderedFactor,
:BsmtExposure => OrderedFactor,
:BsmtFinType1 => Multiclass,
:BsmtFinSF1 => Continuous,
:BsmtFinType2 => Multiclass,
:BsmtFinSF2 => Continuous,
:BsmtUnfSF => Continuous,
:TotalBsmtSF => Continuous,
:Heating => Multiclass,
:HeatingQC => OrderedFactor,
:CentralAir => Multiclass,
:Electrical => Multiclass,
:x1stFlrSF => Continuous,
:x2ndFlrSF => Continuous,
:LowQualFinSF => Continuous,
:GrLivArea => Continuous,
:BsmtFullBath => Count,
:BsmtHalfBath => Count,
:FullBath => Count,
:HalfBath => Count,
:BedroomAbvGr => Count,
:KitchenAbvGr => Count,
:KitchenQual => OrderedFactor,
:TotRmsAbvGrd => Count,
:Functional => OrderedFactor,
:Fireplaces => Count,
:FireplaceQu => OrderedFactor,
:GarageType => Multiclass,
:GarageYrBlt => Count,
:GarageFinish => Multiclass,
:GarageCars => Count,
:GarageArea => Continuous,
:GarageQual => OrderedFactor,
:GarageCond => OrderedFactor,
:PavedDrive => Multiclass,
:WoodDeckSF => Continuous,
:OpenPorchSF => Continuous,
:EnclosedPorch => Continuous,
:x3SsnPorch => Continuous,
:ScreenPorch => Continuous,
:PoolArea => Continuous,
:MiscVal => Continuous,
:MoSold => Multiclass,
:YrSold => Count,
:SaleType => Multiclass,
:SaleCondition => Multiclass,
:target => Continuous)
const COERCE_IRIS = (
:sepal_length => Continuous,
:sepal_width => Continuous,
:petal_length => Continuous,
:petal_width => Continuous,
:target => Multiclass)
const COERCE_CRABS = (
:sp => Multiclass,
:sex => Multiclass,
:index => Count,
:FL => Continuous,
:RW => Continuous,
:CL => Continuous,
:CW => Continuous,
:BD => Continuous)
typeof(COERCE_CRABS)
const COERCE_SMARKET = (
:Year=>Continuous,
:Lag1=>Continuous,
:Lag2=>Continuous,
:Lag3=>Continuous,
:Lag4=>Continuous,
:Lag5=>Continuous,
:Volume=>Continuous,
:Today=>Continuous,
:Direction=>Multiclass{2})
const COERCE_SUNSPOTS = (
(:sunspot_number=>Continuous),)
"""
load_dataset(fpath, coercions)
Load one of standard dataset like Boston etc assuming the file is a
comma separated file with a header.
"""
function load_dataset(fname::String, coercions::Tuple)
fpath = joinpath(DATA_DIR, fname)
data_raw, data_header = readdlm(fpath, ',', header=true)
data_table = MLJBase.table(data_raw; names=Symbol.(vec(data_header)))
return coerce(data_table, coercions...; tight=true)
end
load_boston() = load_dataset("Boston.csv", COERCE_BOSTON)
load_reduced_ames() = load_dataset("reduced_ames.csv", COERCE_REDUCED_AMES)
function load_ames()
data = load_dataset("ames.csv", COERCE_AMES)
levels!(data.LandSlope, ["Gtl", "Mod", "Sev"])
levels!(data.ExterQual, ["Po", "Fa", "TA", "Gd", "Ex"])
levels!(data.ExterCond, ["Po", "Fa", "TA", "Gd", "Ex"])
levels!(data.HeatingQC, ["Po", "Fa", "TA", "Gd", "Ex"])
levels!(data.KitchenQual, ["Po", "Fa", "TA", "Gd", "Ex"])
levels!(data.BsmtQual, ["_NA", "Po", "Fa", "TA", "Gd", "Ex"])
levels!(data.BsmtCond, ["_NA", "Po", "Fa", "TA", "Gd", "Ex"])
levels!(data.BsmtExposure, ["_NA", "No", "Mn", "Av", "Gd"])
levels!(data.FireplaceQu, ["None", "Po", "Fa", "TA", "Gd", "Ex"])
levels!(data.GarageQual, ["_NA", "Po", "Fa", "TA", "Gd", "Ex"])
levels!(data.GarageCond, ["_NA", "Po", "Fa", "TA", "Gd", "Ex"])
levels!(data.Functional, ["Typ", "Min1", "Min2", "Mod", "Maj1", "Maj2",
"Sev", "Sal"])
return data
end
load_iris() = load_dataset("iris.csv", COERCE_IRIS)
load_crabs() = load_dataset("crabs.csv", COERCE_CRABS)
function load_smarket()
data1 = load_dataset("smarket.csv", COERCE_SMARKET)
return merge(data1, (Year=Dates.Date.(data1.Year),))
end
"""Load a well-known sunspot time series (table with one column).
<https://www.sws.bom.gov.au/Educational/2/3/6>
"""
load_sunspots() = load_dataset("sunspots.csv", COERCE_SUNSPOTS)
"""Load a well-known public regression dataset with `Continuous` features."""
macro load_boston()
quote
y, X = unpack(load_boston(), ==(:MedV), x->x != :Chas)
(X, y)
end
end
"""Load a reduced version of the well-known Ames Housing task"""
macro load_reduced_ames()
quote
y, X = unpack(load_reduced_ames(), ==(:target), x-> true)
(X, y)
end
end
"""Load the full version of the well-known Ames Housing task."""
macro load_ames()
quote
y, X = unpack(load_ames(), ==(:target), x->x != :Id)
(X, y)
end
end
"""Load a well-known public classification task with nominal features."""
macro load_iris()
quote
y, X = unpack(load_iris(), ==(:target), x-> true)
(X, y)
end
end
"""Load a well-known sunspot time series (single table with one column)."""
macro load_sunspots()
quote
load_sunspots()
end
end
"""Load a well-known crab classification dataset with nominal features."""
macro load_crabs()
quote
y, X = unpack(load_crabs(), ==(:sp), x-> !(x in [:sex, :index]))
(X, y)
end
end
"""
Load S&P Stock Market dataset, as used in
[An Introduction to Statistical Learning with applications in
R](https://rdrr.io/cran/ISLR/man/Smarket.html),
by Witten et al (2013), Springer-Verlag, New York."""
macro load_smarket()
quote
y, X = unpack(load_smarket(), ==(:Direction), x-> true)
(X, y)
end
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 12348 | const EXTRA_KW_MAKE = """
* `eltype=Float64`: machine type of points (any subtype of
`AbstractFloat`).
* `rng=Random.GLOBAL_RNG`: any `AbstractRNG` object, or integer to seed a
`MersenneTwister` (for reproducibility).
* `as_table=true`: whether to return the points as a table (true)
or a matrix (false). """
const EXTRA_CLASSIFICATION =
"If `false` the target `y` has integer element type. "
"""
finalize_Xy(X, y, shuffle, as_table, eltype, rng; clf)
Internal function to finalize the `make_*` functions.
"""
function finalize_Xy(X, y, shuffle, as_table, eltype, rng; clf::Bool=true)
# Shuffle the rows if required
if shuffle
X, y = shuffle_rows(X, y; rng=rng)
end
if eltype != Float64
X = convert.(eltype, X)
end
# return as matrix if as_table=false
as_table || return X, y
clf && return MLJBase.table(X), categorical(y)
if length(size(y)) > 1
names = ((x) -> Symbol(string("target", x))).(collect(1:size(y, 2)))
return MLJBase.table(X), MLJBase.table(y; names)
else
clf && return MLJBase.table(X), categorical(y)
return MLJBase.table(X), y
end
end
### CLASSIFICATION TOY DATASETS
"""
runif_ab(rng, n, p, a, b)
Internal function to generate `n` points in `[a, b]ᵖ` uniformly at random.
"""
runif_ab(rng, n, p, a, b) = (b - a) .* rand(rng, n, p) .+ a
"""
X, y = make_blobs(n=100, p=2; kwargs...)
Generate Gaussian blobs for clustering and classification
problems.
### Return value
By default, a table `X` with `p` columns (features) and `n` rows
(observations), together with a corresponding vector of `n`
`Multiclass` target observations `y`, indicating blob membership.
### Keyword arguments
* `shuffle=true`: whether to shuffle the resulting points,
* `centers=3`: either a number of centers or a `c x p` matrix with `c`
pre-determined centers,
* `cluster_std=1.0`: the standard deviation(s) of each blob,
* `center_box=(-10. => 10.)`: the limits of the `p`-dimensional cube
within which the cluster centers are drawn if they are not provided,
$(EXTRA_KW_MAKE*EXTRA_CLASSIFICATION)
### Example
```julia
X, y = make_blobs(100, 3; centers=2, cluster_std=[1.0, 3.0])
```
"""
function make_blobs(n::Integer=100,
p::Integer=2;
shuffle::Bool=true,
centers::Union{<:Integer,Matrix{<:Real}}=3,
cluster_std::Union{<:Real,Vector{<:Real}}=1.0,
center_box::Pair{<:Real,<:Real}=(-10.0 => 10.0),
as_table::Bool=true,
eltype::Type{<:AbstractFloat}=Float64,
rng=Random.GLOBAL_RNG)
# check arguments make sense
if n < 1 || p < 1
throw(ArgumentError("Expected `n` and `p` to be at least 1."))
end
if center_box.first >= center_box.second
throw(ArgumentError(
"Domain for the centers improperly defined expected a pair " *
"`a => b` with `a < b`."))
end
rng = init_rng(rng)
if centers isa Matrix
if size(centers, 2) != p
throw(ArgumentError(
"The centers provided have dimension ($(size(centers, 2))) " *
"that doesn't match the one specified ($(p))."))
end
n_centers = size(centers, 1)
else
# in case the centers aren't provided, draw them from the box
n_centers = centers
centers = runif_ab(rng, n_centers, p, center_box...)
end
if cluster_std isa Vector
if length(cluster_std) != n_centers
throw(ArgumentError(
"$(length(cluster_std)) standard deviations given but there " *
"are $(n_centers) centers."))
end
if any(cluster_std .<= 0)
throw(ArgumentError(
"Cluster(s) standard deviation(s) must be positive."))
end
else
# In case only one std is given, repeat it for each center
cluster_std = fill(cluster_std, n_centers)
end
# split points equally among centers
ni, r = divrem(n, n_centers)
ns = fill(ni, n_centers)
ns[end] += r
# vector of memberships
y = vcat((fill(i, ni) for (i, ni) in enumerate(ns))...)
# Pre-generate random points then modify for each center
X = randn(rng, n, p)
nss = [1, cumsum(ns)...]
# ranges of rows for each center
rows = [nss[i]:nss[i+1] for i in 1:n_centers]
@inbounds for c in 1:n_centers
Xc = view(X, rows[c], :)
# adjust standard deviation
Xc .*= cluster_std[c]
# adjust center
Xc .+= centers[c, :]'
end
return finalize_Xy(X, y, shuffle, as_table, eltype, rng)
end
"""
X, y = make_circles(n=100; kwargs...)
Generate `n` labeled points close to two concentric circles for
classification and clustering models.
### Return value
By default, a table `X` with `2` columns and `n` rows (observations),
together with a corresponding vector of `n` `Multiclass` target
observations `y`. The target is either `0` or `1`, corresponding to
membership to the smaller or larger circle, respectively.
### Keyword arguments
* `shuffle=true`: whether to shuffle the resulting points,
* `noise=0`: standard deviation of the Gaussian noise added to the data,
* `factor=0.8`: ratio of the smaller radius over the larger one,
$(EXTRA_KW_MAKE*EXTRA_CLASSIFICATION)
### Example
```julia
X, y = make_circles(100; noise=0.5, factor=0.3)
```
"""
function make_circles(n::Integer=100;
shuffle::Bool=true,
noise::Real=0.,
factor::Real=0.8,
as_table::Bool=true,
eltype::Type{<:AbstractFloat}=Float64,
rng=Random.GLOBAL_RNG)
# check arguments make sense
if n < 1
throw(ArgumentError("Expected `n` to be at least 1."))
end
if noise < 0
throw(ArgumentError("Noise argument cannot be negative."))
end
if !(0 < factor < 1)
throw(ArgumentError(
"Factor argument must be strictly between 0 and 1."))
end
rng = init_rng(rng)
# Generate points on a 2D circle
θs = runif_ab(rng, n, 1, 0, 2pi)
n0 = div(n, 2)
X = hcat(cos.(θs), sin.(θs))
X[1:n0, :] .*= factor
y = ones(Int, n)
y[1:n0] .= 0
if !iszero(noise)
X .+= noise .* randn(rng, n, 2)
end
return finalize_Xy(X, y, shuffle, as_table, eltype, rng)
end
"""
make_moons(n::Int=100; kwargs...)
Generates labeled two-dimensional points lying close to two
interleaved semi-circles, for use with classification and clustering
models.
### Return value
By default, a table `X` with `2` columns and `n` rows (observations),
together with a corresponding vector of `n` `Multiclass` target
observations `y`. The target is either `0` or `1`, corresponding to
membership to the left or right semi-circle.
### Keyword arguments
* `shuffle=true`: whether to shuffle the resulting points,
* `noise=0.1`: standard deviation of the Gaussian noise added to the data,
* `xshift=1.0`: horizontal translation of the second center with respect to
the first one.
* `yshift=0.3`: vertical translation of the second center with respect
to the first one. $(EXTRA_KW_MAKE*EXTRA_CLASSIFICATION)
### Example
```julia
X, y = make_moons(100; noise=0.5)
```
"""
function make_moons(n::Int=150;
shuffle::Bool=true,
noise::Real=0.1,
xshift::Real=1.0,
yshift::Real=0.3,
as_table::Bool=true,
eltype::Type{<:AbstractFloat}=Float64,
rng=Random.GLOBAL_RNG)
# check arguments make sense
if n < 1
throw(ArgumentError("Expected `n` to be at least 1."))
end
if noise < 0
throw(ArgumentError("Noise argument cannot be negative."))
end
rng = init_rng(rng)
n1 = div(n, 2)
n2 = n - n1
θs = runif_ab(rng, n, 1, 0, pi)
θs[n2+1:end] .*= -1
X = hcat(cos.(θs), sin.(θs))
X[n2+1:end, 1] .+= xshift
X[n2+1:end, 2] .+= yshift
y = ones(Int, n)
y[1:n1] .= 0
if !iszero(noise)
X .+= noise .* randn(rng, n, 2)
end
return finalize_Xy(X, y, shuffle, as_table, eltype, rng)
end
### REGRESSION TOY DATASETS
"""
augment_X(X, fit_intercept)
Given a matrix `X`, append a column of ones if `fit_intercept` is true.
See [`make_regression`](@ref).
"""
function augment_X(X::Matrix{<:Real}, fit_intercept::Bool)
fit_intercept || return X
return hcat(X, ones(eltype(X), size(X, 1)))
end
"""
sparsify!(rng, θ, s)
Make portion `s` of vector `θ` exactly 0.
"""
sparsify!(rng, θ, s) = (θ .*= (rand(rng, length(θ)) .< s))
"""
outlify!(rng, y, s)
Add outliers to portion `s` of vector `y`.
"""
outlify!(rng, y, s) =
(n = length(y); y .+= 20 * randn(rng, n) .* (rand(rng, n) .< s))
const SIGMOID_64 = log(Float64(1)/eps(Float64) - Float64(1))
const SIGMOID_32 = log(Float32(1)/eps(Float32) - Float32(1))
"""
sigmoid(x)
Return the sigmoid computed in a numerically stable way:
``σ(x) = 1/(1+\\exp(-x))``
"""
function sigmoid(x::Float64)
x > SIGMOID_64 && return one(x)
x < -SIGMOID_64 && return zero(x)
return one(x) / (one(x) + exp(-x))
end
function sigmoid(x::Float32)
x > SIGMOID_32 && return one(x)
x < -SIGMOID_32 && return zero(x)
return one(x) / (one(x) + exp(-x))
end
sigmoid(x) = sigmoid(float(x))
"""
make_regression(n, p; kwargs...)
Generate Gaussian input features and a linear response with Gaussian
noise, for use with regression models.
### Return value
By default, a tuple `(X, y)` where table `X` has `p` columns and `n` rows (observations),
together with a corresponding vector of `n` `Continuous` target
observations `y`.
### Keywords
* `intercept=true`: Whether to generate data from a model with
intercept.
* `n_targets=1`: Number of columns in the target.
* `sparse=0`: Proportion of the generating weight vector that is sparse.
* `noise=0.1`: Standard deviation of the Gaussian noise added to the
response (target).
* `outliers=0`: Proportion of the response vector to make as outliers by
adding a random quantity with high variance. (Only applied if
`binary` is `false`.)
* `as_table=true`: Whether `X` (and `y`, if `n_targets > 1`) should be a table or a matrix.
* `eltype=Float64`: Element type for `X` and `y`. Must subtype `AbstractFloat`.
* `binary=false`: Whether the target should be binarized (via a sigmoid).
$EXTRA_KW_MAKE
### Example
```julia
X, y = make_regression(100, 5; noise=0.5, sparse=0.2, outliers=0.1)
```
"""
function make_regression(n::Int=100,
p::Int=2;
n_targets::Int=1,
intercept::Bool=true,
sparse::Real=0,
noise::Real=0.1,
outliers::Real=0,
binary::Bool=false,
as_table::Bool=true,
eltype::Type{<:AbstractFloat}=Float64,
rng=Random.GLOBAL_RNG)
# check arguments make sense
if n < 1 || p < 1
throw(ArgumentError("Expected `n` and `p` to be at least 1."))
end
if n_targets < 1
throw(ArgumentError("Expected `n_targets` to be at least 1."))
end
if !(0 <= sparse < 1)
throw(ArgumentError("Sparsity argument must be in [0, 1)."))
end
if noise < 0
throw(ArgumentError("Noise argument cannot be negative."))
end
if !(0 <= outliers <= 1)
throw(ArgumentError("Outliers argument must be in [0, 1]."))
end
rng = init_rng(rng)
X = augment_X(randn(rng, n, p), intercept)
y_shape = n_targets > 1 ? (n, n_targets) : n
theta_shape = n_targets > 1 ? (p + Int(intercept), n_targets) : (p + Int(intercept))
θ = randn(rng, theta_shape)
sparse > 0 && sparsify!(rng, θ, sparse)
y = X * θ
if !iszero(noise)
y .+= noise .* randn(rng, y_shape)
end
if binary
y = rand(rng, y_shape) .< sigmoid.(y)
else
if !iszero(outliers)
outlify!(rng, y, outliers)
end
end
return finalize_Xy(X[:,1:end-Int(intercept)], y, false,
as_table, eltype, rng; clf=binary)
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 14477 | ## SCALE TRANSFORMATIONS
# Scale = SCALE()
# Object for dispatching on scales and functions when generating
# parameter ranges. We require different behaviour for scales and
# functions:
# transform(Scale, scale(:log10), 100) = 2
# inverse_transform(Scale, scale(:log10), 2) = 100
# but
# transform(Scale, scale(log10), 100) = 100 # identity
# inverse_transform(Scale, scale(log10), 100) = 2
struct SCALE end
Scale = SCALE()
scale(s::Symbol) = Val(s)
scale(f::Function) = f
transform(::SCALE, ::Val{:linear}, x) = x
inverse_transform(::SCALE, ::Val{:linear}, x) = x
transform(::SCALE, ::Val{:log}, x) = log(x)
inverse_transform(::SCALE, ::Val{:log}, x) = exp(x)
transform(::SCALE, ::Val{:logminus}, x) = log(-x)
inverse_transform(::SCALE, ::Val{:logminus}, x) = -exp(x)
transform(::SCALE, ::Val{:log10minus}, x) = log10(-x)
inverse_transform(::SCALE, ::Val{:log10minus}, x) = -10^x
transform(::SCALE, ::Val{:log10}, x) = log10(x)
inverse_transform(::SCALE, ::Val{:log10}, x) = 10^x
transform(::SCALE, ::Val{:log2}, x) = log2(x)
inverse_transform(::SCALE, ::Val{:log2}, x) = 2^x
transform(::SCALE, f::Function, x) = x # not a typo!
inverse_transform(::SCALE, f::Function, x) = f(x) # not a typo!
## SCALE INSPECTION (FOR EG PLOTTING)
"""
scale(r::ParamRange)
Return the scale associated with a `ParamRange` object `r`. The possible
return values are: `:none` (for a `NominalRange`), `:linear`, `:log`, `:log10`,
`:log2`, or `:custom` (if `r.scale` is a callable object).
"""
scale(r::NominalRange) = :none
scale(r::NumericRange) = :custom
scale(r::NumericRange{T,B,Symbol}) where {B<:Boundedness,T} = r.scale
## ITERATOR METHOD (FOR GENERATING A 1D GRID)
"""
iterator([rng, ], r::NominalRange, [,n])
iterator([rng, ], r::NumericRange, n)
Return an iterator (currently a vector) for a `ParamRange` object `r`.
In the first case iteration is over all `values` stored in the range
(or just the first `n`, if `n` is specified). In the second case, the
iteration is over approximately `n` ordered values, generated as
follows:
1. First, exactly `n` values are generated between `U` and `L`, with a
spacing determined by `r.scale` (uniform if `scale=:linear`) where `U`
and `L` are given by the following table:
| `r.lower` | `r.upper` | `L` | `U` |
|-------------|------------|---------------------|---------------------|
| finite | finite | `r.lower` | `r.upper` |
| `-Inf` | finite | `r.upper - 2r.unit` | `r.upper` |
| finite | `Inf` | `r.lower` | `r.lower + 2r.unit` |
| `-Inf` | `Inf` | `r.origin - r.unit` | `r.origin + r.unit` |
2. If a callable `f` is provided as `scale`, then a uniform spacing
is always applied in (1) but `f` is broadcast over the results. (Unlike
ordinary scales, this alters the effective range of values generated,
instead of just altering the spacing.)
3. If `r` is a discrete numeric range (`r isa NumericRange{<:Integer}`)
then the values are additionally rounded, with any duplicate values
removed. Otherwise all the values are used (and there are exacltly `n`
of them).
4. Finally, if a random number generator `rng` is specified, then the values are
returned in random order (sampling without replacement), and otherwise
they are returned in numeric order, or in the order provided to the
range constructor, in the case of a `NominalRange`.
"""
iterator(rng::AbstractRNG, r::ParamRange, args...) =
Random.shuffle(rng, iterator(r, args...))
iterator(r::NominalRange, ::Nothing) = iterator(r)
iterator(r::NominalRange, n::Integer) =
collect(r.values[1:min(n, length(r.values))])
iterator(r::NominalRange) = collect(r.values)
# numeric range, top level dispatch
function iterator(r::NumericRange{T,<:Bounded},
n::Int) where {T<:Real}
L = r.lower
U = r.upper
return iterator(T, L, U, r.scale, n)
end
function iterator(r::NumericRange{T,<:LeftUnbounded},
n::Int) where {T<:Real}
L = r.upper - 2r.unit
U = r.upper
return iterator(T, L, U, r.scale, n)
end
function iterator(r::NumericRange{T,<:RightUnbounded},
n::Int) where {T<:Real}
L = r.lower
U = r.lower + 2r.unit
return iterator(T, L, U, r.scale, n)
end
function iterator(r::NumericRange{T,<:DoublyUnbounded},
n::Int) where {T<:Real}
L = r.origin - r.unit
U = r.origin + r.unit
return iterator(T, L, U, r.scale, n)
end
# middle level
iterator(::Type{<:Real}, L, U, s, n) =
iterator(L, U, s, n)
function iterator(I::Type{<:Integer}, L, U, s, n)
raw = iterator(L, U, s, n)
rounded = map(x -> round(I, x), raw)
return unique(rounded)
end
# ground level
# if scale `s` is a callable (the fallback):
function iterator(L, U, s, n)
return s.(range(L, stop=U, length=n))
end
# if scale is a symbol:
function iterator(L, U, s::Symbol, n)
transformed = range(transform(Scale, scale(s), L),
stop=transform(Scale, scale(s), U),
length=n)
inverse_transformed = map(transformed) do value
inverse_transform(Scale, scale(s), value)
end
return inverse_transformed
end
## FITTING DISTRIBUTIONS TO A RANGE
### Helper
function _truncated(d::Dist.Distribution, r::NumericRange)
if minimum(d) >= r.lower && maximum(d) <= r.upper
return d
else
return Dist.truncated(d, r.lower, r.upper)
end
end
### Fallback and docstring
"""
Distributions.fit(D, r::MLJBase.NumericRange)
Fit and return a distribution `d` of type `D` to the one-dimensional
range `r`.
Only types `D` in the table below are supported.
The distribution `d` is constructed in two stages. First, a
distributon `d0`, characterized by the conditions in the second column
of the table, is fit to `r`. Then `d0` is truncated between `r.lower`
and `r.upper` to obtain `d`.
Distribution type `D` | Characterization of `d0`
:----------------------|:-------------------------
`Arcsine`, `Uniform`, `Biweight`, `Cosine`, `Epanechnikov`, `SymTriangularDist`, `Triweight` | `minimum(d) = r.lower`, `maximum(d) = r.upper`
`Normal`, `Gamma`, `InverseGaussian`, `Logistic`, `LogNormal` | `mean(d) = r.origin`, `std(d) = r.unit`
`Cauchy`, `Gumbel`, `Laplace`, (`Normal`) | `Dist.location(d) = r.origin`, `Dist.scale(d) = r.unit`
`Poisson` | `Dist.mean(d) = r.unit`
Here `Dist = Distributions`.
"""
Dist.fit(::Type{D}, r::NumericRange) where D<:Distributions.Distribution =
throw(ArgumentError("Fitting distributions of type `$D` to "*
"`NumericRange` objects is unsupported. "*
"Try passing an explicit instance, or a supported type. "))
### Continuous support
##### bounded
for D in [:Arcsine, :Uniform]
@eval Dist.fit(::Type{<:Dist.$D}, r::NumericRange) =
Dist.$D(r.lower, r.upper)
end
for D in [:Biweight, :Cosine, :Epanechnikov, :SymTriangularDist, :Triweight]
@eval Dist.fit(::Type{<:Dist.$D}, r::NumericRange) =
Dist.$D(r.origin, r.unit)
end
##### doubly-unbounded
# corresponding to values of `Dist.location` and `Dist.scale`:
for D in [:Cauchy, :Gumbel, :Normal, :Laplace]
@eval Dist.fit(::Type{<:Dist.$D}, r::NumericRange) =
_truncated(Dist.$D(r.origin, r.unit), r)
end
# Logistic:
function Dist.fit(::Type{<:Dist.Logistic}, r::NumericRange)
μ = r.origin
θ = sqrt(3)*r.unit/pi
return _truncated(Dist.Logistic(μ, θ), r)
end
#### right-unbounded
# Gamma:
function Dist.fit(::Type{<:Dist.Gamma}, r::NumericRange)
α = (r.origin/r.unit)^2
θ = r.origin/α
_truncated(Dist.Gamma(α, θ), r)
end
# InverseGaussian:
function Dist.fit(::Type{<:Dist.InverseGaussian}, r::NumericRange)
mu = r.origin
lambda = mu^3/r.unit^2
return _truncated(Dist.InverseGaussian(mu, lambda), r)
end
# LogNormal:
function Dist.fit(::Type{<:Dist.LogNormal}, r::NumericRange)
sig2 = log((r.unit/r.origin)^2 + 1)
sig = sqrt(sig2)
mu = log(r.origin) - sig2/2
return _truncated(Dist.LogNormal(mu, sig), r)
end
### Discrete support
# Poisson:
function Dist.fit(::Type{<:Dist.Poisson}, r::NumericRange)
_truncated(Dist.Poisson(r.unit), r)
end
## SAMPLER (FOR RANDOM SAMPLING A 1D RANGE)
### Numeric case
struct NumericSampler{T,D<:Distributions.Sampleable,S}
distribution::D
scale::S
NumericSampler(::Type{T}, d::D, s::S) where {T,D,S} = new{T,D,S}(d,s)
end
function Base.show(stream::IO,
s::NumericSampler{T,D}) where {T,D}
repr = "NumericSampler{$T,$D}}"
s.scale isa Symbol || (repr = "transformed "*repr)
print(stream, repr)
return nothing
end
# constructor for distribution *instances*:
"""
sampler(r::NominalRange, probs::AbstractVector{<:Real})
sampler(r::NominalRange)
sampler(r::NumericRange{T}, d)
Construct an object `s` which can be used to generate random samples
from a `ParamRange` object `r` (a one-dimensional range) using one of
the following calls:
```julia
rand(s) # for one sample
rand(s, n) # for n samples
rand(rng, s [, n]) # to specify an RNG
```
The argument `probs` can be any probability vector with the same
length as `r.values`. The second `sampler` method above calls the
first with a uniform `probs` vector.
The argument `d` can be either an arbitrary instance of
`UnivariateDistribution` from the Distributions.jl package, or one of
a Distributions.jl *types* for which `fit(d, ::NumericRange)` is
defined. These include: `Arcsine`, `Uniform`, `Biweight`, `Cosine`,
`Epanechnikov`, `SymTriangularDist`, `Triweight`, `Normal`, `Gamma`,
`InverseGaussian`, `Logistic`, `LogNormal`, `Cauchy`, `Gumbel`,
`Laplace`, and `Poisson`; but see the doc-string for
[`Distributions.fit`](@ref) for an up-to-date list.
If `d` is an *instance*, then sampling is from a truncated form of the
supplied distribution `d`, the truncation bounds being `r.lower` and
`r.upper` (the attributes `r.origin` and `r.unit` attributes are
ignored). For discrete numeric ranges (`T <: Integer`) the samples are
rounded.
If `d` is a *type* then a suitably truncated distribution is
automatically generated using `Distributions.fit(d, r)`.
*Important.* Values are generated with no regard to `r.scale`, except
in the special case `r.scale` is a callable object `f`. In that case,
`f` is applied to all values generated by `rand` as described above
(prior to rounding, in the case of discrete numeric ranges).
### Examples
```julia-repl
julia> r = range(Char, :letter, values=collect("abc"))
julia> s = sampler(r, [0.1, 0.2, 0.7])
julia> samples = rand(s, 1000);
julia> StatsBase.countmap(samples)
Dict{Char,Int64} with 3 entries:
'a' => 107
'b' => 205
'c' => 688
julia> r = range(Int, :k, lower=2, upper=6) # numeric but discrete
julia> s = sampler(r, Normal)
julia> samples = rand(s, 1000);
julia> UnicodePlots.histogram(samples)
┌ ┐
[2.0, 2.5) ┤▇▇▇▇▇▇▇▇▇▇▇▇▇▇ 119
[2.5, 3.0) ┤ 0
[3.0, 3.5) ┤▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇ 296
[3.5, 4.0) ┤ 0
[4.0, 4.5) ┤▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇ 275
[4.5, 5.0) ┤ 0
[5.0, 5.5) ┤▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇ 221
[5.5, 6.0) ┤ 0
[6.0, 6.5) ┤▇▇▇▇▇▇▇▇▇▇▇ 89
└ ┘
```
"""
Distributions.sampler(r::NumericRange{T},
d::Distributions.UnivariateDistribution) where T =
NumericSampler(T, _truncated(d, r), r.scale)
# constructor for distribution *types*:
Distributions.sampler(r::NumericRange,
D::Type{<:Dist.UnivariateDistribution}) =
sampler(r, Dist.fit(D, r))
# rand fallbacks (non-integer ranges):
Base.rand(s::NumericSampler, dims::Integer...) =
s.scale.(rand(s.distribution, dims...))
Base.rand(rng::AbstractRNG, s::NumericSampler, dims::Integer...) =
s.scale.(rand(rng, s.distribution, dims...))
Base.rand(s::NumericSampler{<:Any,<:Dist.Sampleable,Symbol},
dims::Integer...) = rand(s.distribution, dims...)
Base.rand(rng::AbstractRNG,
s::NumericSampler{<:Any,<:Dist.Sampleable,Symbol},
dims::Integer...) =
rand(rng, s.distribution, dims...)
# rand for integer ranges:
Base.rand(s::NumericSampler{I}, dims::Integer...) where I<:Integer =
map(x -> round(I, s.scale(x)), rand(s.distribution, dims...))
Base.rand(rng::AbstractRNG,
s::NumericSampler{I},
dims::Integer...) where I<:Integer =
map(x -> round(I, s.scale(x)), rand(rng, s.distribution, dims...))
Base.rand(s::NumericSampler{I,<:Dist.Sampleable,Symbol},
dims::Integer...) where I<:Integer =
map(x -> round(I, x), rand(s.distribution, dims...))
Base.rand(rng::AbstractRNG,
s::NumericSampler{I,<:Dist.Sampleable,Symbol},
dims::Integer...) where I<:Integer =
map(x -> round(I, x), rand(rng, s.distribution, dims...))
## Nominal case:
struct NominalSampler{T,N,D<:Distributions.Sampleable} <: MLJType
distribution::D
values::NTuple{N,T}
NominalSampler(::Type{T}, d::D, values::NTuple{N,T}) where {T,N,D} =
new{T,N,D}(d, values)
end
function Base.show(stream::IO,
s::NominalSampler{T,N,D}) where {T,N,D}
samples = round3.(s.values)
seqstr = sequence_string(s.values)
repr = "NominalSampler($seqstr)"
print(stream, repr)
return nothing
end
# constructor for probability vectors:
function Distributions.sampler(r::NominalRange{T},
probs::AbstractVector{<:Real}) where T
length(probs) == length(r.values) ||
error("Length of probability vector must match number "*
"of range values. ")
return NominalSampler(T, Distributions.Categorical(probs), r.values)
end
# constructor for uniform sampling:
function Distributions.sampler(r::NominalRange{T,N}) where {T, N}
return sampler(r, fill(1/N, N))
end
Base.rand(s::NominalSampler, dims::I...) where I<:Integer =
broadcast(idx -> s.values[idx], rand(s.distribution, dims...))
Base.rand(rng::AbstractRNG,
s::NominalSampler,
dims::I...) where I<:Integer =
broadcast(idx -> s.values[idx], rand(rng, s.distribution, dims...))
## SCALE METHOD FOR SAMPLERS
# these mimick the definitions for 1D ranges above:
scale(::Any) = :none
scale(::NumericSampler) = :custom
scale(s::NumericSampler{<:Any,<:Distributions.Sampleable,Symbol}) = s.scale
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 9085 | ## PARAMETER RANGES
abstract type ParamRange{T} end
Base.isempty(::ParamRange) = false
abstract type Boundedness end
abstract type Bounded <: Boundedness end
abstract type Unbounded <: Boundedness end
abstract type LeftUnbounded <: Unbounded end
abstract type RightUnbounded <: Unbounded end
abstract type DoublyUnbounded <: Unbounded end
struct NumericRange{T,B<:Boundedness,D} <: ParamRange{T}
field::Union{Symbol,Expr}
lower::Union{T,Float64} # Float64 to allow for -Inf
upper::Union{T,Float64} # Float64 to allow for Inf
origin::Float64
unit::Float64
scale::D
end
struct NominalRange{T,N} <: ParamRange{T}
field::Union{Symbol,Expr}
values::NTuple{N,T}
end
# return a suitable string representation of `r.field`, applying a transformation in the
# case that `r.scale` is not a symbol, and returning "?" if applying the transformation
# throws an exception:
function _repr(r::NumericRange{T}, field) where T
value = getproperty(r, field)
if !(value isa Integer)
value = round(value, sigdigits=4)
end
r.scale isa Symbol && return repr(value)
return try
scaled = (r.scale)(value)
if T <: Integer
round(T, scaled)
else
round(scaled, sigdigits=4)
end
catch
"?"
end
end
function Base.show(stream::IO,
# ::MIME"text/plain",
r::NumericRange{T}) where T
fstr = string(r.field)
prefix = ""
suffix = ""
if r.scale isa Symbol
if r.scale !== :linear
suffix = "; on $(r.scale) scale"
end
else
prefix = "after scaling: "
end
repr = "NumericRange($(_repr(r, :lower)) ≤ $fstr ≤ $(_repr(r, :upper)); "*
prefix*"origin=$(_repr(r, :origin)), unit=$(_repr(r, :unit))$suffix)"
print(stream, repr)
return nothing
end
function Base.show(stream::IO,
# ::MIME"text/plain",
r::NominalRange{T}) where T
fstr = string(r.field)
seqstr = sequence_string(collect(r.values))
repr = "NominalRange($fstr = $seqstr)"
print(stream, repr)
return nothing
end
WARN_INFERRING_TYPE =
"Inferring the hyper-parameter type from the given "*
"model instance as the corresponding field is typed `Any`."
ERROR_AMBIGUOUS_UNION = ArgumentError(
"The inferred hyper-parameter type is ambiguous, because "*
"the union type contains multiple real subtypes. You can "*
"specify the correct type as first argument of `range`, as"*
" in the example, `range(Int, :dummy, lower=1, upper=10)`.")
"""
r = range(model, :hyper; values=nothing)
Define a one-dimensional `NominalRange` object for a field `hyper` of
`model`. Note that `r` is not directly iterable but `iterator(r)` is.
A nested hyperparameter is specified using dot notation. For example,
`:(atom.max_depth)` specifies the `max_depth` hyperparameter of
the submodel `model.atom`.
r = range(model, :hyper; upper=nothing, lower=nothing,
scale=nothing, values=nothing)
Assuming `values` is not specified, define a one-dimensional
`NumericRange` object for a `Real` field `hyper` of `model`. Note
that `r` is not directly iteratable but `iterator(r, n)`is an iterator
of length `n`. To generate random elements from `r`, instead apply
`rand` methods to `sampler(r)`. The supported scales are `:linear`,`
:log`, `:logminus`, `:log10`, `:log10minus`, `:log2`, or a callable
object.
Note that `r` is not directly iterable, but `iterator(r, n)` is, for
given resolution (length) `n`.
By default, the behaviour of the constructed object depends on the
type of the value of the hyperparameter `:hyper` at `model` *at the
time of construction.* To override this behaviour (for instance if
`model` is not available) specify a type in place of `model` so the
behaviour is determined by the value of the specified type.
A nested hyperparameter is specified using dot notation (see above).
If `scale` is unspecified, it is set to `:linear`, `:log`,
`:log10minus`, or `:linear`, according to whether the interval
`(lower, upper)` is bounded, right-unbounded, left-unbounded, or
doubly unbounded, respectively. Note `upper=Inf` and `lower=-Inf` are
allowed.
If `values` is specified, the other keyword arguments are ignored and
a `NominalRange` object is returned (see above).
See also: [`iterator`](@ref), [`sampler`](@ref)
"""
function Base.range(model::Union{Model, Type}, field::Union{Symbol,Expr};
values=nothing, lower=nothing, upper=nothing,
origin=nothing, unit=nothing, scale::D=nothing) where D
all(==(nothing), [values, lower, upper, origin, unit]) &&
throw(ArgumentError("You must specify at least one of these: "*
"values=..., lower=..., upper=..., origin=..., "*
"unit=..."))
if model isa Model
T = recursive_getpropertytype(model, field)
if T === Any
@warn WARN_INFERRING_TYPE
T = typeof(recursive_getproperty(model, field))
end
else
T = model
end
possible_types = filter(t -> t <: Real, Base.uniontypes(T))
n_possible_types = length(possible_types)
if n_possible_types > 0 && values === nothing
n_possible_types > 1 && throw(ERROR_AMBIGUOUS_UNION)
return numeric_range(first(possible_types),
D,
field,
lower,
upper,
origin,
unit,
scale)
else
return nominal_range(T, field, values)
end
end
function numeric_range(T, D, field, lower, upper, origin, unit, scale)
lower === Inf &&
throw(ArgumentError("`lower` must be finite or `-Inf`."))
upper === -Inf &&
throw(ArgumentError("`upper` must be finite or `Inf`."))
lower === nothing && (lower = -Inf)
upper === nothing && (upper = Inf)
lower < upper ||
throw(ArgumentError("`lower` must be strictly less than `upper`."))
is_unbounded = (lower === -Inf || upper === Inf)
if origin === nothing
is_unbounded &&
throw(DomainError("For an unbounded range you must specify " *
"`origin=...` to define a centre.\nTo make " *
"the range bounded, specify finite " *
"`upper=...` and `lower=...`."))
origin = (upper + lower)/2
end
if unit === nothing
is_unbounded &&
throw(DomainError("For an unbounded range you must specify " *
"`unit=...` to define a unit of scale.\nTo " *
"make the range bounded, specify finite " *
"`upper=...` and `lower=...`."))
unit = (upper - lower)/2
end
unit > 0 || throw(DomainError("`unit` must be positive."))
origin < upper && origin > lower ||
throw(DomainError("`origin` must lie strictly between `lower` and " *
"`upper`."))
if lower === -Inf
if upper === Inf
B = DoublyUnbounded
scale === nothing && (scale = :linear)
else
B = LeftUnbounded
scale === nothing && (scale = :log10minus)
end
else
if upper === Inf
B = RightUnbounded
scale === nothing && (scale = :log10)
else
B = Bounded
scale === nothing && (scale = :linear)
end
end
lower isa Union{T, Float64} || (lower = convert(T, lower) )
upper isa Union{T, Float64} || (upper = convert(T, upper) )
scale isa Symbol && (D = Symbol)
return NumericRange{T,B,D}(field, lower, upper, origin, unit, scale)
end
nominal_range(T, field, values) = throw(ArgumentError(
"`$values` must be an instance of type `AbstractVector{<:$T}`."
* (T <: Model ? "\n Perharps you forgot to instantiate model"
* "as `$(T)()`" : "") ))
nominal_range(T, field, ::Nothing) = throw(ArgumentError(
"The inferred hyper-parameter type is $T, which is nominal. "*
"If this is true, you must specify values=... "*
"If this is false, specify the correct type as "*
"first argument of `range`, as in "*
"the example, "*
"`range(Int, :dummy, lower=1, upper=10)`. " ))
function nominal_range(::Type{T}, field, values::AbstractVector{<:T}) where T
return NominalRange{T,length(values)}(field, Tuple(values))
end
#specific def for T<:AbstractFloat(Allows conversion btw AbstractFloats and Signed types)
function nominal_range(::Type{T}, field,
values::AbstractVector{<:Union{AbstractFloat,Signed}}) where T<: AbstractFloat
return NominalRange{T,length(values)}(field, Tuple(values))
end
#specific def for T<:Signed (Allows conversion btw Signed types)
function nominal_range(::Type{T}, field,
values::AbstractVector{<:Signed}) where T<: Signed
return NominalRange{T,length(values)}(field, Tuple(values))
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 4638 | # `vtrait` (internal method, not to be re-exported)
MMI.vtrait(::FI, X, s) = ScientificTypes.vtrait(X)
# ------------------------------------------------------------------------
# `categorical`
MMI.categorical(::FI, a...; kw...) = categorical(a...; kw...)
# ------------------------------------------------------------------------
# `matrix`
MMI.matrix(::FI, ::Val{:table}, X; kw...) = Tables.matrix(X; kw...)
# ------------------------------------------------------------------------
# int
MMI.int(::FI, x; args...) = CategoricalDistributions.int(x; args...)
# ------------------------------------------------------------------------
# classes
MMI.classes(::FI, x) = CategoricalDistributions.classes(x)
# ------------------------------------------------------------------------
# `scitype`
function MMI.scitype(::FI, ::Union{Val{:other}, Val{:table}}, X)
return ScientificTypes.scitype(X)
end
# ------------------------------------------------------------------------
# `schema`
function MMI.schema(::FI, ::Union{Val{:other}, Val{:table}}, X)
return ScientificTypes.schema(X)
end
# ------------------------------------------------------------------------
# decoder
MMI.decoder(::FI, x) = CategoricalDistributions.decoder(x)
# ------------------------------------------------------------------------
# `table`
function MMI.table(::FI, cols::NamedTuple; prototype=NamedTuple())
Tables.istable(prototype) || error("`prototype` is not a table. ")
if !Tables.istable(cols)
tuple_of_vectors = tuple((collect(v) for v in values(cols))...)
names = keys(cols)
cols = NamedTuple{names}(tuple_of_vectors)
end
return Tables.materializer(prototype)(cols)
end
function MMI.table(::FI, A::AbstractMatrix; names=nothing, prototype=nothing)
if names === nothing
_names = [Symbol(:x, j) for j in 1:size(A, 2)]
else
_names = collect(names)
end
matrix_table = Tables.table(A, header=_names)
prototype === nothing && return matrix_table
return Tables.materializer(prototype)(matrix_table)
end
# ------------------------------------------------------------------------
# `nrows`, `selectrows`, `selectcols`
function MMI.nrows(::FI, ::Val{:table}, X)
if Tables.rowaccess(X)
rows = Tables.rows(X)
return _nrows_rat(Base.IteratorSize(typeof(rows)), rows)
else
cols = Tables.columns(X)
return _nrows_cat(cols)
end
end
# number of rows for columnaccessed table
function _nrows_cat(cols)
names = Tables.columnnames(cols)
!isempty(names) || return 0
return length(Tables.getcolumn(cols, names[1]))
end
# number of rows for rowaccessed table
_nrows_rat(::Base.HasShape, rows) = size(rows, 1)
_nrows_rat(::Base.HasLength, rows) = length(rows)
_nrows_rat(iter_size, rows) = length(collect(rows))
MMI.selectrows(::FI, ::Val{:table}, X, ::Colon) = X
MMI.selectcols(::FI, ::Val{:table}, X, ::Colon) = X
function MMI.selectrows(::FI, ::Val{:table}, X, r)
r = r isa Integer ? (r:r) : r
# next uncommented line is a hack; see
# https://github.com/JuliaAI/MLJBase.jl/issues/151
isdataframe(X) && return X[r, :]
cols = Tables.columntable(X)
new_cols = NamedTuple{keys(cols)}(tuple((c[r] for c in values(cols))...))
return Tables.materializer(X)(new_cols)
end
function MMI.selectcols(::FI, ::Val{:table}, X, c::Union{Symbol, Integer})
cols = Tables.columns(X)
return Tables.getcolumn(cols, c)
end
function MMI.selectcols(::FI, ::Val{:table}, X, c::Union{Colon, AbstractArray})
if isdataframe(X)
return X[!, c]
else
cols = Tables.columntable(X) # named tuple of vectors
newcols = project(cols, c)
return Tables.materializer(X)(newcols)
end
end
# -------------------------------
# utils for `select`*
# project named tuple onto a tuple with only specified `labels` or indices:
function project(t::NamedTuple, labels::AbstractArray{Symbol})
return NamedTuple{tuple(labels...)}(t)
end
project(t::NamedTuple, label::Colon) = t
project(t::NamedTuple, label::Symbol) = project(t, [label,])
project(t::NamedTuple, i::Integer) = project(t, [i,])
function project(t::NamedTuple, indices::AbstractArray{<:Integer})
return NamedTuple{tuple(keys(t)[indices]...)}(tuple([t[i] for i in indices]...))
end
# utils for selectrows
typename(X) = split(string(supertype(typeof(X))), '.')[end]
isdataframe(X) = typename(X) == "AbstractDataFrame"
# ----------------------------------------------------------------
# univariate finite
MMI.UnivariateFinite(::FI, b...; kwargs...) =
CategoricalDistributions.UnivariateFinite(b...; kwargs...)
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 2666 | # Add fallbacks for predict_* which require mean, mode, median.
const BadMeanTypes = Union{AbstractArray{<:Finite},Table(Finite)}
const BadMedianTypes = Union{AbstractArray{<:Finite},Table(Finite)}
const err_wrong_target_scitype(actual_scitype) = ArgumentError(
"Attempting to compute mode of predictions made "*
"by a model with `$actual_scitype` targets. ")
# mode:
predict_mode(m, fitresult, Xnew) =
mode.(predict(m, fitresult, Xnew))
# mean:
predict_mean(m, fitresult, Xnew) =
predict_mean(m, fitresult, Xnew, target_scitype(m))
predict_mean(m, fitresult, Xnew, ::Any) =
mean.(predict(m, fitresult, Xnew))
predict_mean(m, fitresult, Xnew, ::Type{<:BadMeanTypes}) =
throw(err_wrong_target_scitype(Finite))
# median:
predict_median(m, fitresult, Xnew) =
predict_median(m, fitresult, Xnew, target_scitype(m))
predict_median(m, fitresult, Xnew, ::Any) =
median.(predict(m, fitresult, Xnew))
predict_median(m, fitresult, Xnew, ::Type{<:BadMedianTypes}) =
throw(err_wrong_target_scitype(Finite))
# not in MLJModelInterface as methodswith requires InteractiveUtils
MLJModelInterface.implemented_methods(::FI, M::Type{<:MLJType}) =
getfield.(methodswith(M), :name) |> unique
# The following serialization fallbacks should live in
# MLJModelInterface when version 2.0 is released. At that time the
# hack block could also be removed.
#####################
# hack block begins #
#####################
const ERR_SERIALIZATION_FAILURE = ErrorException(
"Serialization failure. You are using a model that implements an outdated "*
"version of the serialization API. If you are using "*
"a model from XGBoost.jl, try using MLJXGBoostInterface 2.0 or "*
"or higher. "
)
const ERR_DESERIALIZATION_FAILURE = ErrorException(
"Deserialization failure. Your model must be deserialized using "*
"using MLJBase < 0.20 and MLJSerialization < 2.0. If this is an "*
"XGBoost.jl model, be sure to use MLJXGBoostInterface < 2.0. "
)
MLJModelInterface.save(filename, model, fitresult; kwargs...) =
throw(ERR_SERIALIZATION_FAILURE)
MLJModelInterface.restore(filename, model, serializable_fitresult) =
throw(ERR_DESERIALIZATION_FAILURE)
###################
# hack block ends #
###################
MLJModelInterface.save(model, fitresult; kwargs...) = fitresult
MLJModelInterface.restore(model, serializable_fitresult) =
serializable_fitresult
# to suppress inclusion of abstract types in the model registry.
for T in (:Supervised, :Unsupervised,
:Interval, :Static, :Deterministic, :Probabilistic)
ex = quote
MLJModelInterface.is_wrapper(::Type{$T}) = true
end
eval(ex)
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 1531 | mutable struct DRegressor <: Deterministic end
MLJBase.target_scitype(::Type{<:DRegressor}) =
AbstractVector{<:Union{Missing,Continuous}}
mutable struct D2Regressor <: Deterministic end
MLJBase.target_scitype(::Type{<:D2Regressor}) =
AbstractVector{<:Union{Missing,Continuous}}
mutable struct DClassifier <: Deterministic end
MLJBase.target_scitype(::Type{<:DClassifier}) =
AbstractVector{<:Union{Missing,Finite}}
mutable struct DClassifierWeird <: Deterministic end
MLJBase.target_scitype(::Type{<:DClassifierWeird}) =
AbstractVector{<:Textual}
mutable struct PClassifier <: Probabilistic end
MLJBase.target_scitype(::Type{<:PClassifier}) =
AbstractVector{<:Union{Missing,Finite}}
mutable struct PRegressor <: Probabilistic end
MLJBase.target_scitype(::Type{<:PRegressor}) =
AbstractVector{<:Union{Missing,Continuous}}
mutable struct PCountRegressor <: Probabilistic end
MLJBase.target_scitype(::Type{<:PCountRegressor}) =
AbstractVector{<:Union{Missing,Count}}
@testset "default_measure" begin
@test MLJBase.default_measure(DRegressor()) == l2
@test MLJBase.default_measure(D2Regressor()) == l2
@test MLJBase.default_measure(DClassifier()) == misclassification_rate
@test MLJBase.default_measure(PClassifier()) == log_loss
@test MLJBase.default_measure(PRegressor()) == log_loss
@test MLJBase.default_measure(PCountRegressor()) == log_loss
@test isnothing(MLJBase.default_measure(DClassifierWeird()))
@test isnothing(MLJBase.default_measure("junk"))
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 20856 | module TestMachines
using MLJBase
using Test
using Statistics
using ..Models
using StableRNGs
using Serialization
using ..TestUtilities
using StatisticalMeasures
const MLJModelInterface = MLJBase.MLJModelInterface
const MMI = MLJModelInterface
verbosity = 0
N=50
X = (a=rand(N), b=rand(N), c=rand(N));
y = 2*X.a - X.c + 0.05*rand(N);
train, test = partition(eachindex(y), 0.7);
tree = DecisionTreeRegressor(max_depth=5)
knn = KNNRegressor()
pca = PCA()
@testset "_contains_unknown" begin
@test MLJBase._contains_unknown(Unknown)
@test MLJBase._contains_unknown(Tuple{Unknown})
@test MLJBase._contains_unknown(Tuple{Unknown, Int})
@test MLJBase._contains_unknown(Union{Tuple{Unknown}, Tuple{Int,Char}})
@test MLJBase._contains_unknown(Union{Tuple{Int}, Tuple{Int,Unknown}})
@test !MLJBase._contains_unknown(Int)
@test !MLJBase._contains_unknown(Tuple{Int})
@test !MLJBase._contains_unknown(Tuple{Char, Int})
@test !MLJBase._contains_unknown(Union{Tuple{Int}, Tuple{Int,Char}})
@test !MLJBase._contains_unknown(Union{Tuple{Int}, Tuple{Int,Char}})
end
struct StaticYoghurt <: Static end
MLJBase.transform(::StaticYoghurt, _, X) = X
@testset "machine constructor for Static models" begin
end
@testset "machine training and inpection" begin
t = machine(tree, X, y)
@test_throws MLJBase.NotTrainedError(t, :fitted_params) fitted_params(t)
@test_throws MLJBase.NotTrainedError(t, :report) report(t)
@test_throws MLJBase.NotTrainedError(t, :training_losses) training_losses(t)
@test_throws MLJBase.NotTrainedError(t, :feature_importances) feature_importances(t)
@test_logs (:info, r"Training") fit!(t)
@test_logs (:info, r"Training") fit!(t, rows=train)
@test_logs (:info, r"Not retraining") fit!(t, rows=train)
@test_logs (:info, r"Training") fit!(t)
t.model.max_depth = 1
@test_logs (:info, r"Updating") fit!(t)
# The following tests only pass when machine `t` has been fitted
@test fitted_params(t) == MMI.fitted_params(t.model, t.fitresult)
@test isnothing(report(t))
@test training_losses(t) === nothing
@test feature_importances(t) === nothing
predict(t, selectrows(X,test));
@test rms(predict(t, selectrows(X, test)), y[test]) < std(y)
# cache type parameter
mach = machine(ConstantRegressor(), X, y, cache=false)
@test !MLJBase.caches_data(mach)
mach = machine(ConstantRegressor(), X, y)
@test MLJBase.caches_data(mach)
@test_logs (:info, r"Training") fit!(mach)
yhat = predict_mean(mach, X);
n = nrows(X)
@test rms(yhat, y) ≈ std(y)*sqrt(1 - 1/n)
# test an unsupervised univariate case:
mach = machine(UnivariateStandardizer(), float.(1:5))
@test_logs (:info, r"Training") fit!(mach)
@test isempty(params(mach))
# test a frozen Machine
stand = machine(Standardizer(), source((x1=rand(10),)))
freeze!(stand)
@test_logs (:warn, r"not trained as it is frozen\.$") fit!(stand)
end
@testset "machine instantiation warnings" begin
@test_throws DimensionMismatch machine(tree, X, y[1:end-1])
# supervised model with bad target:
@test_logs((:warn,
MLJBase.alert_generic_scitype_mismatch(
Tuple{scitype(X), AbstractVector{Multiclass{N}}},
MLJBase.fit_data_scitype(tree),
typeof(tree)
)
),
machine(tree, X, categorical(1:N)))
# ordinary transformer:
@test_logs((:warn,
MLJBase.alert_generic_scitype_mismatch(
Tuple{scitype(42),},
MLJBase.fit_data_scitype(pca),
typeof(pca)
)
),
machine(pca, 42))
y2 = coerce(1:N, OrderedFactor);
# bad weight vector:
@test_logs((:warn,
MLJBase.alert_generic_scitype_mismatch(
Tuple{scitype(X), scitype(y2), scitype(42)},
MLJBase.fit_data_scitype(ConstantClassifier()),
ConstantClassifier
)
),
machine(ConstantClassifier(), X, y2, 42))
end
struct FooBar <: Model end
MLJBase.fit_data_scitype(::Type{<:FooBar}) =
Union{Tuple{AbstractVector{Count}},
Tuple{AbstractVector{Count},AbstractVector{Continuous}}}
struct FooBarUnknown <: Model end
@testset "machine scitype_check_level" begin
X = [1, 2, 3, 4]
y = rand(4)
# with no Unknown scitypes
model = FooBar()
for scitype_check_level in [1, 2]
@test_logs machine(model, X, y; scitype_check_level)
@test_logs machine(model, X; scitype_check_level)
@test_logs((:warn,
MLJBase.alert_generic_scitype_mismatch(Tuple{scitype(y)},
fit_data_scitype(model),
FooBar)),
machine(model, y; scitype_check_level))
end
scitype_check_level = 3
@test_logs machine(model, X, y; scitype_check_level)
@test_logs machine(model, X; scitype_check_level)
@test_throws(ArgumentError(
MLJBase.alert_generic_scitype_mismatch(Tuple{scitype(y)},
fit_data_scitype(model),
FooBar)),
machine(model, y; scitype_check_level))
@test default_scitype_check_level() == 1
default_scitype_check_level(3)
@test default_scitype_check_level() == 3
@test_logs machine(model, X, y)
@test_logs machine(model, X)
@test_throws(ArgumentError(
MLJBase.alert_generic_scitype_mismatch(Tuple{scitype(y)},
fit_data_scitype(model),
FooBar)),
machine(model, y))
default_scitype_check_level(1)
# with Unknown scitypes
model = FooBarUnknown()
scitype_check_level = 1
@test_logs machine(model, X, y; scitype_check_level)
@test_logs machine(model, X; scitype_check_level)
warning = MLJBase.WARN_UNKNOWN_SCITYPE
for scitype_check_level in [2, 3]
@test_logs (:warn, warning) machine(model, X, y; scitype_check_level)
@test_logs (:warn, warning) machine(model, X; scitype_check_level)
end
scitype_check_level = 4
@test_throws ArgumentError(warning) machine(model, X, y; scitype_check_level)
@test_throws ArgumentError(warning) machine(model, X; scitype_check_level)
end
@testset "copy(::Machine) and replace(::Machine, ...)" begin
mach = machine(tree, X, y)
clone = copy(mach)
@test all(fieldnames(typeof(mach))) do field
field === :fit_okay && return true
a = !isdefined(mach, field)
b = !isdefined(mach, field)
a ⊻ b && return false # xor
a && b && return true
getproperty(mach, field) === getproperty(clone, field)
end
@test typeof(mach) == typeof(clone)
fit!(mach, verbosity=0)
clone = copy(mach)
@test all(fieldnames(typeof(mach))) do field
field === :fit_okay && return true
a = !isdefined(mach, field)
b = !isdefined(mach, field)
a ⊻ b && return false
a && b && return true
getproperty(mach, field) === getproperty(clone, field)
end
mach = machine(tree, X, y)
s1 = source(42)
s2 = source(21)
mach_clone = replace(
mach,
:args=>(s1, s2),
:report=>57,
)
@test mach_clone.args == (s1, s2)
@test mach_clone.report == 57
@test !isdefined(mach_clone, :fitresult)
@test mach_clone.model == tree
@test mach_clone.state == mach.state
mach_clone = replace(mach, :model=>knn)
@test mach_clone.model === knn
end
@testset "weights" begin
yraw = ["Perry", "Antonia", "Perry", "Skater"]
X = (x=rand(4),)
y = categorical(yraw)
w = [2, 3, 2, 5]
# without weights:
mach = machine(ConstantClassifier(), X, y)
fit!(mach, verbosity=0)
d1 = predict(mach, X)[1]
d2 = MLJBase.UnivariateFinite([y[1], y[2], y[4]], [0.5, 0.25, 0.25])
@test all([pdf(d1, c) ≈ pdf(d2, c) for c in MLJBase.classes(d1)])
# with weights:
mach = machine(ConstantClassifier(), X, y, w)
fit!(mach, verbosity=0)
d1 = predict(mach, X)[1]
d2 = MLJBase.UnivariateFinite([y[1], y[2], y[4]], [1/3, 1/4, 5/12])
@test all([pdf(d1, c) ≈ pdf(d2, c) for c in MLJBase.classes(d1)])
end
mutable struct Scale <: MLJBase.Static
scaling::Float64
end
function MLJBase.transform(s::Scale, _, X)
X isa AbstractVecOrMat && return X * s.scaling
MLJBase.table(s.scaling * MLJBase.matrix(X), prototype=X)
end
function MLJBase.inverse_transform(s::Scale, _, X)
X isa AbstractVecOrMat && return X / s.scaling
MLJBase.table(MLJBase.matrix(X) / s.scaling, prototype=X)
end
@testset "static transformer machines" begin
s = Scale(2)
X = ones(2, 3)
mach = @test_logs machine(Scale(2))
transform(mach, X) # triggers training of `mach`, ie is mutating
@test report(mach) in [nothing, NamedTuple()]
@test isnothing(fitted_params(mach))
@test_throws(
MLJBase.ERR_STATIC_ARGUMENTS,
machine(Scale(2), X),
)
@test_throws(
MLJBase.ERR_STATIC_ARGUMENTS,
machine(Scale(2), source(X)),
)
@test_logs (:info, r"Not retraining") fit!(mach) # no-op
state = mach.state
R = transform(mach, X)
IR = inverse_transform(mach, R)
@test IR ≈ X
# changing rows does not alter state (and "training" is skipped):
@test_logs (:info, r"Not retraining") fit!(mach, rows=1:3)
@test mach.state == state
# changing hyper-parameters *does* change state (and "training" is
# not skipped):
mach.model.scaling = 3.0
@test_logs (:info, r"Updating") fit!(mach, rows=1:3)
@test mach.state != state
@test_throws ArgumentError transform(mach, rows=1:2)
end
mutable struct Box
matrix::Matrix{Int}
end
## DUMMY UNSUPERVISED MODEL
mutable struct Fozy <: Unsupervised end
MLJBase.fit(model::Fozy, verbosity, X) = minimum(X.matrix), nothing, nothing
MLJBase.transform(model::Fozy, fitresult, newbox) =
fill(fitresult, nrows(newbox.matrix))
MLJBase.MLJModelInterface.reformat(model::Fozy, user_data) =
(Box(MLJBase.matrix(user_data)),)
MLJBase.selectrows(model::Fozy, I, X...) = (Box(X[1].matrix[I,:]),)
## BABY SUPERVISED MODEL WITH AN UPDATE METHOD AND FEATURE IMPORTANCE
mutable struct SomeModel <: Deterministic
n::Int
end
function MLJModelInterface.fit(model::SomeModel,
verbosity,
A,
y)
n = model.n
cache = (A \ y)' # all coefficients
n_features = length(cache)
# kill all but first n coefficients:
fitresult = vcat(cache[1:n], fill(0.0, n_features - n))
report = (n_features=n_features, )
return fitresult, cache, report
end
function MLJModelInterface.update(model::SomeModel,
verbosity,
old_fitresult,
old_cache,
A, # ignored in this case
y) # ignored in this case
n = model.n
cache = old_cache # coefficients already obtained in `fit`
n_features = length(cache)
# kill all but first n coefficients:
fitresult = vcat(cache[1:n], fill(0.0, n_features - n))
report = (n_features=n_features, )
return fitresult, cache, report
end
function MLJModelInterface.predict(::SomeModel, fitresult, Xnew)
Anew = MLJBase.matrix(Xnew)
return Anew*fitresult
end
const dummy_importances = [:x1 => 1.0, ]
MLJModelInterface.reports_feature_importances(::Type{<:SomeModel}) = true
MLJModelInterface.feature_importances(::SomeModel, fitresult, report) = dummy_importances
MLJModelInterface.supports_training_losses(::Type{<:SomeModel}) = true
MLJModelInterface.training_losses(::SomeModel, report) = 1:report.n_features
MLJModelInterface.reformat(model::SomeModel, X, y) = (MLJBase.matrix(X), y)
MLJModelInterface.selectrows(model::SomeModel, I, A, y) =
(view(A, I, :), view(y, I))
@testset "feature_importances" begin
mach = machine(SomeModel(1), make_regression(10)...)
fit!(mach, verbosity=0)
@test feature_importances(mach) == dummy_importances
mach = machine(KNNClassifier(), make_blobs(10)...)
fit!(mach, verbosity=0)
@test isnothing(feature_importances(mach))
end
@testset "overloading reformat(::Model, ...), selectrows(::Model, ...)" begin
# dummy unsupervised model:
model = Fozy()
args = ((x1=[10, 30, 50], x2 = [20, 40, 60]),)
data = MLJBase.MLJModelInterface.reformat(model, args...)
@test data[1] isa Box && data[1].matrix == [10 20; 30 40; 50 60]
@test selectrows(model, 2:3, data...)[1].matrix == [30 40; 50 60]
@test fit(model, 1, data...)[1] == 10
mach = machine(model, args...)
@test_logs (:info, r"Training") fit!(mach, rows=2:3);
@test transform(mach, (x1 = 1:4, x2 = 1:4)) == [30, 30, 30, 30]
# supervised model with an update method:
rng = StableRNGs.StableRNG(123)
A = rand(rng, 8, 3)
y = A*[1, 2, 3]
X = MLJBase.table(A)
model = SomeModel(1)
mach = machine(model, X, y)
@test_mach_sequence fit!(mach, rows=1:4) [(:train, mach),]
Xnew = selectrows(X, 1:4)
@test predict(mach, Xnew) ≈ A[1:4,1]
# mutate the model to trigger `update` call:
model.n=3
@test_mach_sequence fit!(mach, rows=1:4) [(:update, mach), ]
@test predict(mach, Xnew) ≈ y[1:4]
# change the rows to be sampled:
@test_mach_sequence fit!(mach) [(:train, mach),]
@test predict(mach, Xnew) ≈ y[1:4]
end
@testset "fit! for models with reformat front-end" begin
X = (x1=ones(5), x2=2*ones(5))
y = categorical(collect("abaaa"))
clf = ConstantClassifier()
clf = ConstantClassifier(testing=true)
mach = machine(clf, X, y, cache=true)
# first call to fit reformats data and resamples data:
@test_logs((:info, "reformatting X, y"),
(:info, "resampling X, y"),
fit!(mach, rows=1:3, verbosity=0))
@test mach.data == (MLJBase.matrix(X), y)
@test mach.resampled_data[1] == mach.data[1][1:3,:]
@test mach.resampled_data[2] == y[1:3]
yhat = @test_logs (:info, r"reformatting X") predict_mode(mach, X)
@test yhat == fill('a', 5)
yhat = @test_logs (:info, "resampling X") predict_mode(mach, rows=1:2)
@test yhat == fill('a', 2)
# calling fit! with new `rows` triggers resampling but no
# reformatting:
@test_logs((:info, "resampling X, y"),
fit!(mach, rows=1:2, verbosity=0))
end
@testset "Test serializable method of Supervised Machine" begin
X, y = make_regression(100, 1)
filename = "decisiontree.jls"
# test error for untrained machines:
mach = machine(DecisionTreeRegressor(), X, y)
@test_throws(
MLJBase.ERR_SERIALIZING_UNTRAINED,
serializable(mach),
)
fit!(mach, verbosity=0)
# Check serializable function
smach = MLJBase.serializable(mach)
@test smach === MLJBase.serializable(smach) # check no-op if repeated
@test smach.report == mach.report
@test smach.fitresult == mach.fitresult
@test_throws(ArgumentError, predict(smach))
@test_logs (:warn, MLJBase.WARN_SERIALIZABLE_MACH) predict(smach, X)
TestUtilities.generic_tests(mach, smach)
# Check restore! function
Serialization.serialize(filename, smach)
smach = Serialization.deserialize(filename)
MLJBase.restore!(smach)
@test smach.state == 1
@test MLJBase.predict(smach, X) == MLJBase.predict(mach, X)
@test fitted_params(smach) isa NamedTuple
@test report(smach) == report(mach)
# repeated `restore!` makes no difference:
MLJBase.restore!(smach)
@test MLJBase.predict(smach, X) == MLJBase.predict(mach, X)
rm(filename)
# End to end save and reload
MLJBase.save(filename, mach)
smach = machine(filename)
@test smach.state == 1
@test predict(smach, X) == predict(mach, X)
rm(filename)
end
@testset "Test serializable method of Unsupervised Machine" begin
X, _ = make_regression(100, 1)
filename = "standardizer.jls"
mach = machine(Standardizer(), X)
fit!(mach, verbosity=0)
MLJBase.save(filename, mach)
smach = machine(filename)
@test transform(mach, X) == transform(smach, X)
@test_throws(ArgumentError, transform(smach))
# warning on non-restored machine
smach = deserialize(filename)
@test_logs (:warn, MLJBase.WARN_SERIALIZABLE_MACH) transform(smach, X)
rm(filename)
end
# define a model with non-persistent fitresult:
thing = []
struct EphemeralTransformer <: Unsupervised end
function MLJModelInterface.fit(::EphemeralTransformer, verbosity, X)
view = pointer(thing)
fitresult = (thing, view)
return fitresult, nothing, NamedTuple()
end
function MLJModelInterface.transform(::EphemeralTransformer, fitresult, X)
thing, view = fitresult
return view == pointer(thing) ? X : throw(ErrorException("dead fitresult"))
end
function MLJModelInterface.save(::EphemeralTransformer, fitresult)
thing, _ = fitresult
return thing
end
function MLJModelInterface.restore(::EphemeralTransformer, serialized_fitresult)
view = pointer(thing)
return (thing, view)
end
# commented out code just tests the transformer above has desired properties for testing:
# # test model transforms:
# model = EphemeralTransformer()
# mach = machine(model, 42) |> fit!
# @test MLJBase.transform(mach, 27) == 27
# # direct serialization fails:
# io = IOBuffer()
# serialize(io, mach)
# seekstart(io)
# mach2 = deserialize(io)
# @test_throws ErrorException("dead fitresult") transform(mach2, 42)
@testset "serialization for model with non-persistent fitresult" begin
X = (; x=randn(5))
mach = machine(EphemeralTransformer(), X)
fit!(mach, verbosity=0)
v = MLJBase.transform(mach, X).x
io = IOBuffer()
serialize(io, serializable(mach))
seekstart(io)
mach2 = restore!(deserialize(io))
@test MLJBase.transform(mach2, X).x == v
# using `save`/`machine`:
MLJBase.save(io, mach)
seekstart(io)
mach2 = machine(io)
@test MLJBase.transform(mach2, X).x == v
end
@testset "serialization for model with non-persistent fitresult in pipeline" begin
# https://github.com/JuliaAI/MLJBase.jl/issues/927
X = (; x=randn(5))
pipe = Standardizer |> EphemeralTransformer
X = (; x=randn(5))
mach = machine(pipe, X)
fit!(mach, verbosity=0)
v = MLJBase.transform(mach, X).x
io = IOBuffer()
serialize(io, serializable(mach))
seekstart(io)
mach2 = restore!(deserialize(io))
@test MLJBase.transform(mach2, X).x == v
# using `save`/`machine`:
MLJBase.save(io, mach)
seekstart(io)
mach2 = machine(io)
@test MLJBase.transform(mach2, X).x == v
end
struct ReportingDynamic <: Unsupervised end
MLJBase.fit(::ReportingDynamic, _, X) = nothing, 16, NamedTuple()
MLJBase.transform(::ReportingDynamic,_, X) = (X, (news=42,))
MLJBase.reporting_operations(::Type{<:ReportingDynamic}) = (:transform, )
@testset "corner case for operation applied to a reporting machinw" begin
model = ReportingDynamic()
mach = fit!(machine(model, [1,2,3]), verbosity=0)
@test transform(mach, rows=:) == [1, 2, 3]
@test transform(mach, rows=1:2) == [1, 2]
end
@testset "machines with symbolic model placeholders" begin
struct CherryComposite
rgs
end
composite = CherryComposite(SomeModel(1))
X = MLJBase.table(
[1.0 3.0
2.0 4.0]
)
y = [1.0, 2.0]
mach = machine(:rgs, X, y)
@test isnothing(last_model(mach))
fit!(mach; composite, verbosity=0)
@test last_model(mach) == composite.rgs
@test fitted_params(mach).fitresult ≈ [1.0, 0.0]
@test report(mach) == (; n_features = 2)
@test training_losses(mach) == 1:2
@test feature_importances(mach) == dummy_importances
Xnew = MLJBase.table(rand(2, 2))
@test size(predict(mach, Xnew)) == (2,)
# if composite.rgs is changed, but keeps same type, get an update:
composite = CherryComposite(SomeModel(2))
@test_logs (:info, r"Updating") fit!(mach; composite)
# if composite.rgs is changed, but type changes, retrain from scratch:
composite = CherryComposite(StaticYoghurt())
@test_logs (:info, r"Training") fit!(mach; composite)
# no training arguments:
composite = CherryComposite(StaticYoghurt())
mach = machine(:rgs)
@test isnothing(last_model(mach))
@test_throws MLJBase.err_no_real_model(mach) transform(mach, X)
fit!(mach; composite, verbosity=0)
@test transform(mach, X) == X
end
end # module
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 1283 | struct Potato <: Model end
@testset "message_expecting_model" begin
m1 = MLJBase.message_expecting_model(123)
@test !isnothing(match(r"(Expected)", m1))
@test isnothing(match(r"(type)", m1))
@test isnothing(match(r"(mispelled)", m1))
m2 = MLJBase.message_expecting_model(Potato)
@test !isnothing(match(r"(Expected)", m2))
@test !isnothing(match(r"(type)", m2))
@test isnothing(match(r"(mispelled)", m2))
m3 = MLJBase.message_expecting_model(123, spelling=true)
@test !isnothing(match(r"(Expected)", m3))
@test isnothing(match(r"(type)", m3))
@test !isnothing(match(r"(misspelled)", m3))
m4 = MLJBase.message_expecting_model(Potato, spelling=true)
@test !isnothing(match(r"(Expected)", m4))
@test !isnothing(match(r"(type)", m4))
@test isnothing(match(r"(misspelled)", m4))
end
@testset "check_ismodel" begin
@test isnothing(MLJBase.check_ismodel(Potato()))
@test_throws(
MLJBase.err_expecting_model(123),
MLJBase.check_ismodel(123),
)
@test_throws(
MLJBase.err_expecting_model(123, spelling=true),
MLJBase.check_ismodel(123, spelling=true),
)
@test_throws(
MLJBase.err_expecting_model(Potato),
MLJBase.check_ismodel(Potato),
)
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 2724 | module TestOperations
using Test
using MLJBase
using Serialization
using ..Models
@testset "Operations on non-composite models" begin
# Unsupervised model
X = rand(4)
m = fit!(machine(UnivariateStandardizer(), X), verbosity=0)
@test_throws ArgumentError inverse_transform(m)
@test inverse_transform(m, transform(m)) ≈ X
@test inverse_transform(m, transform(m, X)) ≈ X
X = source(rand(4))
m = fit!(machine(UnivariateStandardizer(), X), verbosity=0)
@test_throws ArgumentError inverse_transform(m)
@test inverse_transform(m, transform(m)) ≈ X() # test with node args
@test inverse_transform(m, transform(m, X))() ≈ X()
# Supervised model
X = MLJBase.table(rand(4, 4))
y = rand(4)
m2 = fit!(machine(DeterministicConstantRegressor(), X, y; cache=false), verbosity=0)
@test predict(m2) == fill(mean(y), length(y))
# Check that an error is thrown when applying an operation to a serialized machine
# with no args
filename = "constant_regressor_machine"
filename2 = "univariate_standadizer_machine"
smach = serializable(m)
smach2 = serializable(m2)
Serialization.serialize(filename, smach)
Serialization.serialize(filename2, smach2)
smach = Serialization.deserialize(filename)
smach2 = Serialization.deserialize(filename2)
MLJBase.restore!(smach)
MLJBase.restore!(smach2)
@test_throws ArgumentError predict(smach)
@test_throws ArgumentError predict(smach2)
rm(filename)
rm(filename2)
# Static model
y1, y2 = rand(4), rand(4)
m = fit!(machine(Averager(mix = 0.5)), verbosity=0)
m2 = fit!(machine(Averager(mix = 0.5); cache=false), verbosity=0) # non-cached version
@test_throws ArgumentError transform(m)
@test_throws ArgumentError transform(m2)
@test_throws ArgumentError inverse_transform(m)
average = 0.5 .* y1 .+ 0.5 .* y2
@test transform(m, y1, y2) == average #(1 - 0.5) .* y1 .+ 0.5 .* y2
@test transform(m, source(y1), source(y2))() == average
# Check that error is thrown when at least one of the inputs to `transform` is wrong.
# These tests are specific to the `Averager` static transformer
@test_throws ArgumentError transform(m, y1, Tuple(y2))
@test_throws ArgumentError transform(m, Tuple(y1), Tuple(y2))
end
@testset "operations on NetworkComposite models" begin
X = MLJBase.table(rand(4, 4))
y = rand(4)
m = fit!(machine(SimpleProbabilisticNetworkCompositeModel(), X, y), verbosity=0)
predictions = first(MLJBase.output_and_report(m.fitresult, :predict, X))
@test predict(m, X) == predictions
@test predict_mode(m, X) == mode.(predictions)
@test_throws ErrorException transform(m, X)
end
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 508 | # using Revise
using Test
using MLJBase
struct Opaque
a::Int
end
struct Transparent
A::Int
B::Opaque
end
MLJBase.istransparent(::Transparent) = true
struct Dummy <:MLJType
t::Transparent
o::Opaque
n::Integer
end
@testset "params method" begin
t= Transparent(6, Opaque(5))
m = Dummy(t, Opaque(7), 42)
@test MLJBase.params(m) == (t = (A = 6,
B = Opaque(5)),
o = Opaque(7),
n = 42)
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 1723 | using MLJBase
if !MLJBase.TESTING
error(
"To test MLJBase, the environment variable "*
"`TEST_MLJBASE` must be set to `\"true\"`\n"*
"You can do this in the REPL with `ENV[\"TEST_MLJBASE\"]=\"true\"`"
)
end
using Distributed
# Thanks to https://stackoverflow.com/a/70895939/5056635 for the exeflags tip.
addprocs(; exeflags="--project=$(Base.active_project())")
@info "nprocs() = $(nprocs())"
import .Threads
@info "nthreads() = $(Threads.nthreads())"
@everywhere begin
using MLJModelInterface
using MLJBase
using Test
using CategoricalArrays
using Logging
using ComputationalResources
using StableRNGs
using StatisticalMeasures
end
import TypedTables
using Tables
function include_everywhere(filepath)
include(filepath) # Load on Node 1 first, triggering any precompile
if nprocs() > 1
fullpath = joinpath(@__DIR__, filepath)
@sync for p in workers()
@async remotecall_wait(include, p, fullpath)
end
end
end
include("test_utilities.jl")
# load Models module containing model implementations for testing:
print("Loading some models for testing...")
include_everywhere("_models/models.jl")
print("\r \r")
# enable conditional testing of modules by providing test_args
# e.g. `Pkg.test("MLJBase", test_args=["misc"])`
RUN_ALL_TESTS = isempty(ARGS)
macro conditional_testset(name, expr)
name = string(name)
esc(quote
if RUN_ALL_TESTS || $name in ARGS
@testset $name $expr
end
end)
end
# To avoid printing `@conditional_testset (macro with 1 method)`
# when loading this file via `include("test/preliminaries.jl")`.
nothing
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 34591 | using Distributed
import ComputationalResources: CPU1, CPUProcesses, CPUThreads
using .TestUtilities
using ProgressMeter
import Tables
@everywhere import StatisticalMeasures.StatisticalMeasuresBase as API
using StatisticalMeasures
import LearnAPI
@everywhere begin
using .Models
using StableRNGs
rng = StableRNG(1513515)
const verb = 0
end
using Test
using MLJBase
import Distributions
import StatsBase
@static if VERSION >= v"1.3.0-DEV.573"
using .Threads
end
struct DummyInterval <: Interval end
dummy_interval=DummyInterval()
struct GoofyTransformer <: Unsupervised end
dummy_measure_det(yhat, y) = 42
API.@trait(
typeof(dummy_measure_det),
observation_scitype = MLJBase.Textual,
kind_of_proxy = LearnAPI.LiteralTarget(),
)
dummy_measure_interval(yhat, y) = 42
API.@trait(
typeof(dummy_measure_interval),
observation_scitype = MLJBase.Textual,
kind_of_proxy = LearnAPI.ConfidenceInterval(),
)
@testset "_actual_operations" begin
clf = ConstantClassifier()
rgs = ConstantRegressor()
clf_det = DeterministicConstantClassifier()
rgs_det = DeterministicConstantRegressor()
measures = [LogLoss(), Accuracy(), BrierScore()] # mixed prob/determ
measures_det = [Accuracy(), FScore()]
operations = [predict, predict_mode, predict]
# single measure gets replicated to match length of `measures`:
@test MLJBase._actual_operations(predict_mean,
[Accuracy(), FScore()],
clf,
1) ==
[predict_mean, predict_mean]
# handling of a measure with `nothing` `kind_of_proxy` (eg,
# custom measure):
my_mae(yhat, y) = abs.(yhat - y)
@test(
MLJBase._actual_operations(nothing, [my_mae, LPLoss()], rgs_det , 1) ==
[predict, predict])
@test MLJBase._actual_operations(predict, [LogLoss(),], clf, 1) ==
[predict,]
@test MLJBase._actual_operations(operations, measures, clf, 1) == operations
@test_throws MLJBase.ERR_OPERATION_MEASURE_MISMATCH _ =
MLJBase._actual_operations([predict, predict_mode], measures, clf, 1)
@test_throws MLJBase.ERR_OPERATION_MEASURE_MISMATCH junk =
MLJBase._actual_operations([predict,], measures, clf, 1)
@test_throws MLJBase.ERR_INVALID_OPERATION _ =
MLJBase._actual_operations(transform, [LogLoss(),], clf, 1)
@test MLJBase._actual_operations(nothing, measures, clf, 1) == operations
@test(
@test_logs MLJBase._actual_operations(nothing, [Accuracy(),], clf, 1) ==
[predict_mode])
@test MLJBase._actual_operations(nothing, [l2,], rgs, 1) ==
[predict_mean, ]
@test_throws(
MLJBase.err_incompatible_prediction_types(clf_det, LogLoss()),
MLJBase._actual_operations(nothing, [LogLoss(),], clf_det, 1),
)
@test MLJBase._actual_operations(nothing, measures_det, clf_det, 1) ==
[predict, predict]
# measure/model differ in prediction type:
@test_throws(
MLJBase.err_ambiguous_operation(clf, dummy_measure_det),
MLJBase._actual_operations(nothing, [dummy_measure_det, ], clf, 1),
)
# measure has :interval prediction type but model does not (2 cases):
@test_throws(
MLJBase.err_ambiguous_operation(clf, dummy_measure_interval),
MLJBase._actual_operations(
nothing,
[dummy_measure_interval, ],
clf,
1,
),
)
@test_throws(
MLJBase.err_ambiguous_operation(clf_det, dummy_measure_interval),
MLJBase._actual_operations(nothing,
[dummy_measure_interval, ], clf_det, 1))
# both measure and model have :interval prediction type:
@test MLJBase._actual_operations(nothing,
[dummy_measure_interval, ],
dummy_interval, 1) == [predict, ]
# model has :interval prediction type but measure does not:
@test_throws(
MLJBase.err_ambiguous_operation(dummy_interval, LogLoss()),
MLJBase._actual_operations(nothing,
[LogLoss(), ], dummy_interval, 1))
# model does not have a valid `prediction_type`:
@test_throws(
MLJBase.ERR_UNSUPPORTED_PREDICTION_TYPE,
MLJBase._actual_operations(nothing, [LogLoss(),], GoofyTransformer(), 0),
)
end
@everywhere begin
nfolds = 6
nmeasures = 2
func(mach, k) = (
(sleep(MLJBase.PROG_METER_DT*rand(rng)); fill(1:k, nmeasures)),
:fitted_params,
:report,
)
end
@testset_accelerated "dispatch of resources and progress meter" accel begin
@info "Checking progress bars:"
X = (x = [1, ],)
y = [2.0, ]
mach = machine(ConstantRegressor(), X, y)
if accel isa CPUThreads
result = MLJBase._evaluate!(
func,
mach,
CPUThreads(Threads.nthreads()),
nfolds,
1
)
else
result = MLJBase._evaluate!(func, mach, accel, nfolds, 1)
end
measurements = vcat(result[1]...)
@test measurements ==
[1:1, 1:1, 1:2, 1:2, 1:3, 1:3, 1:4, 1:4, 1:5, 1:5, 1:6, 1:6]
@test collect(result[2]) == fill(:fitted_params, nfolds)
end
@test CV(nfolds=6) == CV(nfolds=6)
@test CV(nfolds=5) != CV(nfolds=6)
@test MLJBase.train_test_pairs(CV(), 1:10) !=
MLJBase.train_test_pairs(CV(shuffle=true), 1:10)
@test MLJBase.train_test_pairs(Holdout(), 1:10) !=
MLJBase.train_test_pairs(Holdout(shuffle=true), 1:10)
@testset "train test pairs" begin
cv = CV(nfolds=5)
pairs = MLJBase.train_test_pairs(cv, 1:24)
@test pairs == [
(6:24, 1:5),
([1:5..., 11:24...], 6:10),
([1:10..., 16:24...], 11:15),
([1:15..., 21:24...], 16:20),
(1:20, 21:24)
]
# Not enough data for the number of folds.
@test_throws ArgumentError MLJBase.train_test_pairs(cv, 1:4)
end
@testset "checking measure/model compatibility" begin
model = ConstantRegressor()
y = rand(rng,4)
# model prediction type is Probablistic but measure is Deterministic:
@test_throws(
MLJBase.ERR_MEASURES_PROBABILISTIC(rms, MLJBase.LOG_SUGGESTION2),
MLJBase._check_measure(rms, predict, model, y),
)
@test MLJBase._check_measure(rms, predict_mean, model, y)
@test MLJBase._check_measure(rms, predict_median, model, y)
# has `y` `Finite` elscitype but measure `rms` is for `Continuous`:
y=categorical(collect("abc"))
@test_throws(
MLJBase.ERR_MEASURES_OBSERVATION_SCITYPE(
rms,
Union{Missing,Infinite},
Multiclass{3},
),
MLJBase._check_measure(rms, predict_median, model, y),
)
model = ConstantClassifier()
# model prediction type is Probablistic but measure is Deterministic:
@test_throws(
MLJBase.ERR_MEASURES_PROBABILISTIC(mcr, MLJBase.LOG_SUGGESTION1),
MLJBase._check_measure(mcr, predict, model, y),
)
@test MLJBase._check_measure(mcr, predict_mode, model, y)
# `Determistic` model but `Probablistic` measure:
model = DeterministicConstantClassifier()
@test_throws(
MLJBase.ERR_MEASURES_DETERMINISTIC(cross_entropy),
MLJBase._check_measure(cross_entropy, predict, model, y),
)
# measure with wrong target_scitype:
@test_throws(
MLJBase.ERR_MEASURES_DETERMINISTIC(brier_score),
MLJBase._check_measures(
[brier_score, rms],
[predict_mode, predict_mean],
model, y,
),
)
model = ConstantClassifier()
@test MLJBase._check_measures([brier_score, cross_entropy, accuracy],
[predict, predict, predict_mode],
model, coerce(y, Multiclass))
end
@testset "check weights" begin
@test_throws(MLJBase.ERR_WEIGHTS_LENGTH,
MLJBase._check_weights([0.5, 0.5], 3))
@test MLJBase._check_weights([0.5, 0.5], 2)
end
@testset "check class weights" begin
w = Dict('a'=> 0.2, 'b'=>0.8)
@test_throws(MLJBase.ERR_WEIGHTS_DICT,
MLJBase._check_class_weights([0.1, 0.4], ['a', 'b']))
@test_throws(MLJBase.ERR_WEIGHTS_CLASSES,
MLJBase._check_class_weights(w, ['a', 'c']))
@test MLJBase._check_class_weights(w, ['b', 'a'])
end
@everywhere begin
user_rms(yhat, y) = mean((yhat -y).^2) |> sqrt
# deliberately omitting `consumes_multiple_observations` trait:
API.@trait typeof(user_rms) kind_of_proxy=LearnAPI.LiteralTarget()
end
@testset_accelerated "folds specified" accel begin
x1 = ones(10)
x2 = ones(10)
X = (x1=x1, x2=x2)
y = [1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0]
resampling = [(3:10, 1:2),
([1, 2, 5, 6, 7, 8, 9, 10], 3:4),
([1, 2, 3, 4, 7, 8, 9, 10], 5:6),
([1, 2, 3, 4, 5, 6, 9, 10], 7:8),
(1:8, 9:10)]
for cache in [true, false]
model = DeterministicConstantRegressor()
mach = machine(model, X, y, cache=cache)
# check detection of incompatible measure (cross_entropy):
@test_throws(
MLJBase.err_incompatible_prediction_types(model, cross_entropy),
evaluate!(
mach,
resampling=resampling,
measure=[cross_entropy, rmslp1],
verbosity=verb,
acceleration=accel,
),
)
result = evaluate!(mach, resampling=resampling, verbosity=verb,
measure=[user_rms, mae, rmslp1], acceleration=accel)
v = [1/2, 3/4, 1/2, 3/4, 1/2]
@test result.per_fold[1] ≈ v
@test result.per_fold[2] ≈ v
@test result.per_fold[3][1] ≈ abs(log(2) - log(2.5))
@test result.per_observation[1] ≈ map(result.per_fold[1]) do μ
fill(μ, 2)
end
@test result.per_observation[2][1] ≈ [1/2, 1/2]
@test result.per_observation[2][2] ≈ [3/4, 3/4]
@test result.measurement[1] ≈ mean(v)
@test result.measurement[2] ≈ mean(v)
# fitted_params and report per fold:
@test map(fp->fp.fitresult, result.fitted_params_per_fold) ≈
[1.5, 1.25, 1.5, 1.25, 1.5]
@test all(isnothing, result.report_per_fold)
end
end
@testset "folds specified - per_observation=false" begin
accel = CPU1()
cache = true
x1 = ones(10)
x2 = ones(10)
X = (x1=x1, x2=x2)
y = [1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0]
resampling = [(3:10, 1:2),
([1, 2, 5, 6, 7, 8, 9, 10], 3:4),
([1, 2, 3, 4, 7, 8, 9, 10], 5:6),
([1, 2, 3, 4, 5, 6, 9, 10], 7:8),
(1:8, 9:10)]
model = DeterministicConstantRegressor()
mach = machine(model, X, y, cache=cache)
result = evaluate!(mach, resampling=resampling, verbosity=verb,
measure=[user_rms, mae, rmslp1], acceleration=accel,
per_observation=false)
v = [1/2, 3/4, 1/2, 3/4, 1/2]
@test result.per_fold[1] ≈ v
@test result.per_fold[2] ≈ v
@test result.per_fold[3][1] ≈ abs(log(2) - log(2.5))
@test result.per_observation isa Vector{Missing}
@test result.measurement[1] ≈ mean(v)
@test result.measurement[2] ≈ mean(v)
# fitted_params and report per fold:
@test map(fp->fp.fitresult, result.fitted_params_per_fold) ≈
[1.5, 1.25, 1.5, 1.25, 1.5]
@test all(isnothing, result.report_per_fold)
end
@testset "repeated resampling" begin
x1 = ones(20)
x2 = ones(20)
X = (x1=x1, x2=x2)
y = rand(rng, 20)
holdout = Holdout(fraction_train=0.75, rng=rng)
model = Models.DeterministicConstantRegressor()
for cache in [true, false]
mach = machine(model, X, y, cache=cache)
result = evaluate!(mach, resampling=holdout, verbosity=verb,
measure=[rms, rmslp1], repeats=6)
per_fold = result.per_fold[1]
@test unique(per_fold) |> length == 6
@test abs(mean(per_fold) - std(y)) < 0.06 # very rough check
cv = CV(nfolds=3, rng=rng)
result = evaluate!(mach, resampling=cv, verbosity=verb,
measure=[rms, rmslp1], repeats=6)
per_fold = result.per_fold[1]
@test unique(per_fold) |> length == 18
@test abs(mean(per_fold) - std(y)) < 0.06 # very rough check
end
end
@testset "insample" begin
rows = rand(Int, 100)
@test MLJBase.train_test_pairs(InSample(), rows) == [(rows, rows),]
X, y = make_regression(20)
model = Models.DeterministicConstantRegressor()
# all rows:
e = evaluate(model, X, y, resampling=InSample(), measure=rms)
@test e.measurement[1] ≈ std(y, corrected=false)
# subsample of rows:
e = evaluate(model, X, y, resampling=InSample(), measure=rms, rows=1:7)
@test e.measurement[1] ≈ std(y[1:7], corrected=false)
end
@testset_accelerated "holdout" accel begin
x1 = ones(4)
x2 = ones(4)
X = (x1=x1, x2=x2)
y = [1.0, 1.0, 2.0, 2.0]
@test MLJBase.show_as_constructed(Holdout)
holdout = Holdout(fraction_train=0.75)
model = Models.DeterministicConstantRegressor()
for cache in [true, false]
mach = machine(model, X, y, cache=cache)
# to see if a default measure is found:
evaluate!(mach, resampling=holdout, verbosity=verb,
acceleration=accel)
result = evaluate!(mach, resampling=holdout, verbosity=verb,
measure=[rms, rmslp1], acceleration=accel)
@test result.measurement[1] ≈ 2/3
# test direct evaluation of a model + data:
result = evaluate(model, X, y, verbosity=0,
resampling=holdout, measure=rms, cache=cache)
@test result.measurement[1] ≈ 2/3
end
X = (x=rand(rng,100),)
y = rand(rng,100)
for cache in [true, false]
mach = machine(model, X, y, cache=cache)
evaluate!(mach, verbosity=verb,
resampling=Holdout(shuffle=true, rng=rng), acceleration=accel)
e1 = evaluate!(mach, verbosity=verb,
resampling=Holdout(shuffle=true),
acceleration=accel).measurement[1]
@test e1 != evaluate!(mach, verbosity=verb,
resampling=Holdout(),
acceleration=accel).measurement[1]
end
end
@testset_accelerated "Exception handling (see issue 235)" accel begin
X, y = make_moons(50)
model = ConstantClassifier()
bad_loss(yhat, y) = throw(Exception())
@test_throws Exception evaluate(model, X, y, measure=bad_loss, verbosity=0)
end
@testset_accelerated "cv" accel begin
x1 = ones(10)
x2 = ones(10)
X = (x1=x1, x2=x2)
y = [1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0]
@test MLJBase.show_as_constructed(CV)
cv=CV(nfolds=5)
for cache in [true, false]
model = Models.DeterministicConstantRegressor()
mach = machine(model, X, y, cache=cache)
result = evaluate!(mach, resampling=cv, measure=[rms, rsq, rmslp1],
acceleration=accel, verbosity=verb)
@test result.per_fold[1] ≈ [1/2, 3/4, 1/2, 3/4, 1/2]
shuffled = evaluate!(mach, resampling=CV(shuffle=true), verbosity=verb,
acceleration=accel) # using rms default
@test shuffled.measurement[1] != result.measurement[1]
end
end
@testset "TimeSeriesCV" begin
tscv = TimeSeriesCV(; nfolds=3)
pairs = MLJBase.train_test_pairs(tscv, 1:10)
@test pairs == [
(1:4, [5, 6]),
(1:6, [7, 8]),
(1:8, [9, 10])
]
pairs = MLJBase.train_test_pairs(tscv, 1:2:15)
@test pairs == [
([1, 3], [5, 7])
([1, 3, 5, 7], [9, 11])
([1, 3, 5, 7, 9, 11], [13, 15])
]
@test_logs(
(:warn, "TimeSeriesCV is being applied to `rows` not in sequence. "),
MLJBase.train_test_pairs(tscv, reverse(1:10))
)
# Not enough data for the number of folds.
@test_throws ArgumentError MLJBase.train_test_pairs(TimeSeriesCV(10), 1:8)
end
@testset "stratified_cv" begin
# check in explicit example:
y = categorical(
['b', 'c', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'a', 'a', 'a']
)
scv = StratifiedCV(nfolds=3)
rows = 1:12
pairs = MLJBase.train_test_pairs(scv, rows, y)
expected_pairs = [
([2, 4, 9, 11, 3, 5, 7, 12], [1, 6, 8, 10]),
([1, 6, 8, 10, 3, 5, 7, 12], [2, 4, 9, 11]),
([1, 6, 8, 10, 2, 4, 9, 11], [3, 5, 7, 12])
]
# Explanation of expected_pairs: The row indices are processed one at
# a time. The test fold that a row index is placed in is determined
# by this lookup:
#
# b b b b c c c c a a a a
# 1 2 3 1 2 3 1 2 3 1 2 3
#
# For example, the first row such that y[row] == 'c' is placed in the
# second fold, and the second row such that y[row] == 'c' is placed in
# the third fold.
@test pairs == expected_pairs
# test invariance to label renaming:
z = replace(y, 'a' => 'b', 'b' => 'c', 'c' => 'a')
pairs = MLJBase.train_test_pairs(scv, rows, z)
@test pairs == expected_pairs
# test the case where rows is a shuffled subset of y:
y = categorical(['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'])
rows = 8:-1:2
pairs = MLJBase.train_test_pairs(scv, rows, y)
@test pairs == [
([5, 4, 6, 2], [8, 7, 3]),
([8, 7, 3, 6, 2], [5, 4]),
([8, 7, 3, 5, 4], [6, 2])
]
# test shuffle:
scv_random = StratifiedCV(nfolds=3, shuffle=true, rng=1)
pairs_random = MLJBase.train_test_pairs(scv_random, rows, y)
@test pairs != pairs_random
# check class distribution is preserved in a larger randomized example:
N = 30
y = shuffle(vcat(fill('a', N), fill('b', 2N),
fill('c', 3N), fill('d', 4N))) |> categorical;
d = Distributions.fit(MLJBase.UnivariateFinite, y)
pairs = MLJBase.train_test_pairs(scv, 1:10N, nothing, y)
folds = vcat(first.(pairs), last.(pairs))
@test all([Distributions.fit(MLJBase.UnivariateFinite, y[fold]) ≈
d for fold in folds])
end
@testset_accelerated "weights in evaluation" accel begin
# cv:
x1 = ones(4)
x2 = ones(4)
X = (x1=x1, x2=x2)
y = [1.0, 2.0, 3.0, 1.0]
w = 1:4
cv=CV(nfolds=2)
model = Models.DeterministicConstantRegressor()
mach = machine(model, X, y)
e = evaluate!(mach, resampling=cv, measure=l1,
weights=w, verbosity=verb, acceleration=accel).measurement[1]
efold1 = mean([1*1, 1*0])
efold2 = mean([3*3/2, 4*1/2])
@test e ≈ mean([efold1, efold2])
# if I don't specify weights in `evaluate!`, then uniform should
# be used:
e = evaluate!(mach, resampling=cv, measure=l1,
verbosity=verb, acceleration=accel).measurement[1]
efold1 = mean([1*1, 1*0])
efold2 = mean([1*3/2, 1*1/2])
@test e ≈ mean([efold1, efold2])
end
@testset_accelerated "class weights in evaluation" accel begin
X, y = make_blobs(rng=rng)
cv=CV(nfolds = 2)
fold1, fold2 = partition(eachindex(y), 0.5)
m = MulticlassFScore()
class_w = Dict(1=>1, 2=>2, 3=>3)
model = Models.DeterministicConstantClassifier()
mach = machine(model, X, y)
# fscore by hand:
fit!(mach, rows=fold1, verbosity=0)
score1 = m(predict(mach, rows=fold2), y[fold2], class_w)
fit!(mach, rows=fold2, verbosity=0)
score2 = m(predict(mach, rows=fold1), y[fold1], class_w)
score_by_hand = mean([score1, score2])
# fscore by evaluate!:
score = evaluate!(
mach,
resampling=cv,
measure=m,
class_weights=class_w,
verbosity=verb,
acceleration=accel,
).measurement[1]
@test score ≈ score_by_hand
# if class weights in `evaluate!` isn't specified:
plain_score = evaluate!(
mach,
resampling=cv,
measure=m,
verbosity=verb,
acceleration=accel,
).measurement[1]
@test !(score ≈ plain_score)
end
@testset_accelerated "resampler as machine" accel begin
N = 50
X = (x1=rand(rng, N), x2=rand(rng, N), x3=rand(rng, N))
y = X.x1 -2X.x2 + 0.05*rand(rng, N)
ridge_model = FooBarRegressor(lambda=20.0)
holdout = Holdout(fraction_train=0.75)
resampler = Resampler(resampling=holdout, model=ridge_model, measure=mae,
acceleration=accel)
@test constructor(resampler) == Resampler
@test package_name(resampler) == "MLJBase"
@test load_path(resampler) == "MLJBase.Resampler"
resampling_machine = machine(resampler, X, y)
@test_logs((:info, r"^Training"), fit!(resampling_machine))
e1=evaluate(resampling_machine).measurement[1]
mach = machine(ridge_model, X, y)
@test e1 ≈ evaluate!(mach, resampling=holdout,
measure=mae, verbosity=verb,
acceleration=CPU1()).measurement[1]
ridge_model.lambda=1.0
fit!(resampling_machine, verbosity=verb)
e2=evaluate(resampling_machine).measurement[1]
@test e1 != e2
resampler.weights = rand(rng,N)
fit!(resampling_machine, verbosity=verb)
e3=evaluate(resampling_machine).measurement[1]
@test e3 != e2
@test MLJBase.package_name(Resampler) == "MLJBase"
@test MLJBase.is_wrapper(Resampler)
# when only `model` changes, the folds shouldn't change, even in
# shuffled case:
cv = CV(rng=StableRNGs.StableRNG(123))
resampler=Resampler(model=ridge_model,
resampling=cv,
repeats=3,
measure=mae, acceleration=accel)
mach = machine(resampler, X, y)
fit!(mach, verbosity=verb)
ev1 = evaluate(mach)
rows1 = ev1.train_test_rows
resampler.model.lambda *= 0.5
fit!(mach, verbosity=verb)
ev2 = evaluate(mach)
@test rows1 == ev2.train_test_rows
resampler.model.lambda *= 2
fit!(mach, verbosity=verb)
@test ev1.measurement[1] ≈ evaluate(mach).measurement[1]
# but if `resampling` or `repeats`, then new
# folds should be generated:
resampler.resampling = CV(rng=cv.rng, nfolds=2)
fit!(mach, verbosity=verb)
rows2 = evaluate(mach).train_test_rows
@test length(rows2) == 2 * resampler.repeats
@test rows2 != rows1
resampler.repeats += 1
fit!(mach, verbosity=verb)
rows3 = evaluate(mach).train_test_rows
@test length(rows3) == 2 * resampler.repeats
end
struct DummyResamplingStrategy <: MLJBase.ResamplingStrategy end
function MLJBase.train_test_pairs(resampling::DummyResamplingStrategy,
rows, X, y)
train = filter(rows) do j
y[j] == y[1]
end
test = setdiff(rows, train)
return [(train, test),]
end
@testset_accelerated "custom strategy depending on X, y" accel begin
X = (x = rand(rng,8), )
y = categorical(string.([:x, :y, :x, :x, :y, :x, :x, :y]))
@test MLJBase.train_test_pairs(DummyResamplingStrategy(), 2:6, X, y) ==
[([3, 4, 6], [2, 5]),]
e = evaluate(ConstantClassifier(), X, y,
measure=misclassification_rate,
resampling=DummyResamplingStrategy(),
operation=predict_mode,
acceleration=accel,
verbosity=verb)
@test e.measurement[1] ≈ 1.0
end
@testset_accelerated "sample weights in training and evaluation" accel begin
yraw = ["Perry", "Antonia", "Perry", "Antonia", "Skater"]
X = (x=rand(rng,5),)
y = categorical(yraw)
w = [1, 10, 1, 10, 5]
for cache in [true, false]
# without weights:
mach = machine(ConstantClassifier(), X, y, cache=cache)
e = evaluate!(mach, resampling=Holdout(fraction_train=0.6),
operation=predict_mode, measure=misclassification_rate,
acceleration=accel, verbosity=verb)
@test e.measurement[1] ≈ 1.0
# with weights in training and evaluation:
mach = machine(ConstantClassifier(), X, y, w, cache=cache)
e = evaluate!(mach, resampling=Holdout(fraction_train=0.6),
operation=predict_mode, measure=misclassification_rate,
acceleration=accel, verbosity=verb, weights=w)
@test e.measurement[1] ≈ mean([10*0, 5*1])
# with different weights in training and evaluation:
e = evaluate!(mach, resampling=Holdout(fraction_train=0.6),
operation=predict_mode, measure=misclassification_rate,
weights = fill(1, 5), acceleration=accel, verbosity=verb)
@test e.measurement[1] ≈ 1/2
@test_throws(DimensionMismatch,
evaluate!(mach, resampling=Holdout(fraction_train=0.6),
operation=predict_mode,
measure=misclassification_rate,
weights = fill(1, 100), acceleration=accel,
verbosity=verb))
end
# resampling on a subset of all rows:
model = KNNClassifier()
N = 200
X = (x = rand(rng,3N), );
y = categorical(rand(rng,"abcd", 3N));
w = rand(rng,3N);
class_w = Dict(zip(levels(y), rand(length(levels(y)))));
rows = StatsBase.sample(1:3N, 2N, replace=false);
Xsmall = selectrows(X, rows);
ysmall = selectrows(y, rows);
wsmall = selectrows(w, rows);
for cache in [true, false]
mach1 = machine(model, Xsmall, ysmall, wsmall, cache=cache)
e1 = evaluate!(mach1,
resampling=CV(),
measure=misclassification_rate,
weights=wsmall,
operation=predict_mode,
acceleration=accel,
verbosity=verb)
mach2 = machine(model, X, y, w, cache=cache)
e2 = evaluate!(mach2,
resampling=CV(),
measure=misclassification_rate,
weights=w,
operation=predict_mode,
rows=rows,
acceleration=accel,
verbosity=verb)
@test e1.per_fold ≈ e2.per_fold
end
for cache in [true, false]
# resampler as machine with evaluation weights not specified:
resampler = Resampler(model=model, resampling=CV();
measure=misclassification_rate,
operation=predict_mode,
cache=cache)
resampling_machine = machine(resampler, X, y, w, cache=false)
fit!(resampling_machine, verbosity=verb)
e1 = evaluate(resampling_machine).measurement[1]
mach = machine(model, X, y, w, cache=cache)
e2 = evaluate!(mach, resampling=CV();
measure=misclassification_rate,
operation=predict_mode,
acceleration=accel, verbosity=verb).measurement[1]
@test e1 ≈ e2
# resampler as machine with evaluation weights specified:
weval = rand(rng,3N);
resampler = Resampler(model=model, resampling=CV();
measure=misclassification_rate,
operation=predict_mode,
weights=weval, acceleration=accel,
cache=cache)
resampling_machine = machine(resampler, X, y, w, cache=false)
fit!(resampling_machine, verbosity=verb)
e1 = evaluate(resampling_machine).measurement[1]
mach = machine(model, X, y, w, cache=cache)
e2 = evaluate!(mach, resampling=CV();
measure=misclassification_rate,
operation=predict_mode,
weights=weval,
acceleration=accel,
verbosity=verb).measurement[1]
@test e1 ≈ e2
end
x = [1,2,3,4,5,6,7]
X, y = Tables.table([x x x x x x]), coerce([1,2,1,3,1,2,2], Multiclass)
model = Models.DeterministicConstantClassifier()
class_w = Dict(zip(levels(y), rand(length(levels(y)))))
for cache in [true, false]
#resampler as a machine with class weights specified
cweval = Dict(zip(levels(y), rand(length(levels(y)))));
resampler = Resampler(model=model, resampling=CV(nfolds=2);
measure=MulticlassFScore(return_type=Vector),
class_weights=cweval, acceleration=accel)
resampling_machine = machine(resampler, X, y, cache=false)
fit!(resampling_machine, verbosity=verb)
e1 = evaluate(resampling_machine).measurement[1]
mach = machine(model, X, y, cache=cache)
e2 = evaluate!(mach, resampling=CV(nfolds=2);
measure=MulticlassFScore(return_type=Vector),
class_weights=cweval,
acceleration=accel,
verbosity=verb).measurement[1]
@test e1 ≈ e2
end
@testset "warnings about measures not supporting weights" begin
model = ConstantClassifier()
N = 100
X, y = make_moons(N)
class_weights = Dict(0=>0.4, 1=>0.6)
@test_logs((:warn, r"Sample weights"),
evaluate(model, X, y,
resampling=Holdout(fraction_train=0.5),
measure=auc, weights=ones(N)))
@test_logs((:warn, r"Class weights"),
evaluate(model, X, y,
resampling=Holdout(fraction_train=0.5),
measure=auc, class_weights=class_weights))
end
end
@testset_accelerated "automatic operations - integration" accel begin
clf = ConstantClassifier()
X, y = make_moons(100)
e1 = evaluate(clf, X, y, resampling=CV(),
measures=[LogLoss(), Accuracy()], verbosity=1)
e2 = evaluate(clf, X, y, resampling=CV(),
operation=[predict, predict_mode],
measures=[LogLoss(), Accuracy()], verbosity=1)
@test e1.measurement ≈ e2.measurement
evaluate(clf, X, y, resampling=CV(),
operation=predict,
measures=[LogLoss(), BrierScore()], verbosity=0)
end
@testset "reported fields in documentation" begin
# Using `evaluate` to obtain a `PerformanceEvaluation` object.
clf = ConstantClassifier()
X, y = make_moons(100)
y = coerce(y, OrderedFactor)
evaluations = evaluate(clf, X, y, resampling=CV())
T = typeof(evaluations)
@test T <: PerformanceEvaluation
show_text = sprint(show, MIME"text/plain"(), evaluations)
cols = ["measure", "operation", "measurement", "1.96*SE", "per_fold"]
@test all(contains.(show_text, cols))
print(show_text)
docstring_text = string(@doc(PerformanceEvaluation))
for fieldname in fieldnames(PerformanceEvaluation)
@test contains(show_text, string(fieldname))
# string(text::Markdown.MD) converts `-` list items to `*`.
@test contains(docstring_text, " * `$fieldname`")
end
measures = [LogLoss(), Accuracy()]
evaluations = evaluate(clf, X, y; measures, resampling=Holdout())
show_text = sprint(show, MIME"text/plain"(), evaluations)
@test !contains(show_text, "std")
# issue #871: trying to calculate SE when inappropriate should not throw an error in
# display.
evaluations = evaluate(
clf,
X,
y,
operation=predict_mode,
measure=ConfusionMatrix(),
resampling=CV(),
);
printed_evaluations = sprint(show, "text/plain", evaluations)
@test contains(printed_evaluations, "N/A")
end
@testset_accelerated "issue with Resampler #954" acceleration begin
knn = KNNClassifier()
cnst =DeterministicConstantClassifier()
X, y = make_blobs(10)
resampler = MLJBase.Resampler(
;model=knn,
measure=accuracy,
operation=nothing,
acceleration,
)
mach = machine(resampler, X, y) |> fit!
resampler.model = cnst
fit!(mach)
end
@testset "compact evaluation objects" begin
model = ConstantClassifier()
X, y = make_blobs(10)
e = evaluate(model, X, y)
ec = evaluate(model, X, y, compact=true)
@test MLJBase.compactify(ec) == ec == MLJBase.compactify(e)
@test e isa PerformanceEvaluation
@test ec isa CompactPerformanceEvaluation
@test startswith(sprint(show, MIME("text/plain"), e), "PerformanceEvaluation")
@test startswith(sprint(show, MIME("text/plain"), ec), "CompactPerformanceEvaluation")
@test e.measurement[1] == ec.measurement[1]
# smoke tests:
mach = machine(model, X, y)
for e in [
evaluate!(mach, measures=[brier_loss, accuracy]),
evaluate!(mach, measures=[brier_loss, accuracy], compact=true),
evaluate!(mach, resampling=Holdout(), measures=[brier_loss, accuracy]),
evaluate!(mach, resampling=Holdout(), measures=[brier_loss, accuracy], compact=true),
]
@test contains(sprint(show, MIME("text/plain"), e), "predict")
@test contains(sprint(show, e), "PerformanceEvaluation(")
end
end
# # TRANSFORMER WITH PREDICT
struct PredictingTransformer <:Unsupervised end
MLJBase.fit(::PredictingTransformer, verbosity, X, y) = (mean(y), nothing, nothing)
MLJBase.fit(::PredictingTransformer, verbosity, X) = (nothing, nothing, nothing)
MLJBase.predict(::PredictingTransformer, fitresult, X) = fill(fitresult, nrows(X))
MLJBase.predict(::PredictingTransformer, ::Nothing, X) = nothing
MLJBase.prediction_type(::Type{<:PredictingTransformer}) = :deterministic
@testset "`Unsupervised` model with a predict" begin
X = rand(10)
y = fill(42.0, 10)
e = evaluate(PredictingTransformer(), X, y, resampling=Holdout(), measure=l2)
@test e.measurement[1] ≈ 0
@test_throws(
MLJBase.ERR_NEED_TARGET,
evaluate(PredictingTransformer(), X, measure=l2),
)
end
# # DUMMY LOGGER
struct DummyLogger end
MLJBase.save(logger::DummyLogger, mach::Machine) = mach.model
@testset "default logger" begin
@test isnothing(default_logger())
model = ConstantClassifier()
mach = machine(model, make_moons(10)...)
fit!(mach, verbosity=0)
@test_throws MLJBase.ERR_INVALID_DEFAULT_LOGGER MLJBase.save(mach)
default_logger(DummyLogger())
@test default_logger() == DummyLogger()
@test MLJBase.save(mach) == model
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 2103 | # To speed up the development workflow, use `TestEnv`.
# For example:
# ```
# $ julia --project
#
# julia> ENV["TEST_MLJBASE"] = "true"
#
# julia> using TestEnv; TestEnv.activate()
#
# julia> include("test/preliminaries.jl")
# [...]
#
# julia> include("test/resampling.jl")
# [...]
# ```
include("preliminaries.jl")
@conditional_testset "misc" begin
@test include("utilities.jl")
@test include("static.jl")
@test include("show.jl")
end
@conditional_testset "interface" begin
@test include("interface/interface.jl")
@test include("interface/data_utils.jl")
end
@conditional_testset "default_measures" begin
@test include("default_measures.jl")
end
@conditional_testset "resampling" begin
@test include("resampling.jl")
end
@conditional_testset "data" begin
@test include("data/data.jl")
@test include("data/datasets.jl")
@test include("data/datasets_synthetic.jl")
end
@conditional_testset "sources" begin
@test include("sources.jl")
end
@conditional_testset "models" begin
@test include("models.jl")
end
@conditional_testset "machines" begin
@test include("machines.jl")
end
@conditional_testset "composition_learning_networks" begin
@test include("composition/learning_networks/nodes.jl")
@test include("composition/learning_networks/inspection.jl")
@test include("composition/learning_networks/signatures.jl")
@test include("composition/learning_networks/replace.jl")
end
@conditional_testset "composition_models" begin
@test include("composition/models/network_composite.jl")
@test include("composition/models/inspection.jl")
@test include("composition/models/pipelines.jl")
@test include("composition/models/transformed_target_model.jl")
@test include("composition/models/stacking.jl")
@test include("composition/models/static_transformers.jl")
end
@conditional_testset "operations" begin
@test include("operations.jl")
end
@conditional_testset "hyperparam" begin
@test include("hyperparam/one_dimensional_ranges.jl")
@test include("hyperparam/one_dimensional_range_methods.jl")
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 430 | using .Models
@testset "display of models" begin
io = IOBuffer()
show(io, KNNRegressor())
@test String(take!(io)) == "KNNRegressor(K = 5, …)"
show(io, MIME("text/plain"), KNNRegressor())
@test String(take!(io)) ==
"KNNRegressor(\n K = 5, \n algorithm = :kdtree, \n "*
"metric = Distances.Euclidean(0.0), \n leafsize = 10, \n "*
"reorder = true, \n weights = :uniform)"
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 261 | module TestSources
using MLJBase
using Test
X = 7
Xs = source(X)
@test Xs() == X
@test Xs(8) == 8
@test elscitype(Xs) == Count
@test scitype(Xs) == MLJBase.CallableReturning{Count}
rebind!(Xs, nothing)
@test isempty(Xs)
@test Xs.scitype == Nothing
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 1414 | module TestStatic
using Test, MLJBase
using StableRNGs
rng = StableRNG(5312515)
## SIMPLE UNIVARIATE FUNCTION
mutable struct Scale <: MLJBase.Static
scaling::Float64
end
function MLJBase.transform(s::Scale, _, X)
X isa AbstractVecOrMat && return X * s.scaling
MLJBase.table(s.scaling * MLJBase.matrix(X), prototype=X)
end
function MLJBase.inverse_transform(s::Scale, _, X)
X isa AbstractVecOrMat && return X / s.scaling
MLJBase.table(MLJBase.matrix(X) / s.scaling, prototype=X)
end
s = Scale(2)
X = randn(rng, 2, 3)
Xt = MLJBase.table(X)
R = transform(s, nothing, X)
IR = inverse_transform(s, nothing, R)
@test IR ≈ X
R = transform(s, nothing, Xt)
IR = inverse_transform(s, nothing, R)
@test MLJBase.matrix(IR) ≈ X
## MULTIVARIATE FUNCTION
mutable struct PermuteArgs <: MLJBase.Static
permutation::NTuple{N,Int} where N
end
MLJBase.transform(p::PermuteArgs, _, args...) =
Tuple([args[i] for i in p.permutation])
MLJBase.inverse_transform(p::PermuteArgs, _, args...) =
Tuple([args[i] for i in sortperm(p.permutation |> collect)])
p = PermuteArgs((2, 3, 1))
@test transform(p, nothing, 10, 20, 30) == (20, 30, 10)
@test inverse_transform(p, nothing, 20, 30, 10) == (10, 20, 30)
# no-op
fitresult, _, _ = MLJBase.fit(p, 1, (1, 2, 3))
@test transform(p, fitresult, 10, 20, 30) == (20, 30, 10)
@test inverse_transform(p, fitresult, 20, 30, 10) == (10, 20, 30)
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 3609 | module TestUtilities
using Test
using MLJBase
export @testset_accelerated, include_everywhere, @test_mach_sequence,
@test_model_sequence
using ComputationalResources
using ComputationalResources: CPUProcesses
macro testset_accelerated(name::String, var, ex)
testset_accelerated(name, var, ex)
end
macro testset_accelerated(name::String, var, opts::Expr, ex)
testset_accelerated(name, var, ex; eval(opts)...)
end
function testset_accelerated(name::String, var, ex; exclude=[])
final_ex = quote
local $var = CPU1()
@testset $name $ex
end
resources = AbstractResource[CPUProcesses(), CPUThreads()]
for res in resources
if any(x->typeof(res)<:x, exclude)
push!(final_ex.args, quote
local $var = $res
@testset $(name*" ($(typeof(res).name))") begin
@test_broken false
end
end)
else
push!(final_ex.args, quote
local $var = $res
@testset $(name*" ($(typeof(res).name))") $ex
end)
end
end
# preserve outer location if possible
if ex isa Expr && ex.head === :block && !isempty(ex.args) && ex.args[1] isa LineNumberNode
final_ex = Expr(:block, ex.args[1], final_ex)
end
return esc(final_ex)
end
"""
sedate!(fit_ex)
The input is a fit expression as `fit!(mach, kws...)`. This function
throws an error if the verbosity level is set and sets the verbosity level
to -5000 otherwise.
"""
function sedate!(fit_ex)
kwarg_exs = filter(fit_ex.args) do arg
arg isa Expr && arg.head == :kw
end
keys = map(kwarg_exs) do arg
arg.args[1]
end
:verbosity in keys &&
error("You cannot specify `verbosity` in @test_mach_sequence "*
"or @test_model_sequence. ")
push!(fit_ex.args, Expr(:kw, :verbosity, -5000))
return fit_ex
end
macro test_mach_sequence(fit_ex, sequence_exs...)
sedate!(fit_ex)
seq = gensym(:sequence)
esc(quote
MLJBase.flush!(MLJBase.MACHINE_CHANNEL)
$fit_ex
local $seq = MLJBase.flush!(MLJBase.MACHINE_CHANNEL)
# for s in $seq
# println(s)
# end
@test $seq in [$(sequence_exs...)]
end)
end
# function weakly_in(object:Tuple{Symbol,Model}, itr)
# for tup in itr
# tup[1] === object[1] && tup[2] == tup
macro test_model_sequence(fit_ex, sequence_exs...)
sedate!(fit_ex)
seq = gensym(:sequence)
esc(quote
MLJBase.flush!(MLJBase.MACHINE_CHANNEL)
$fit_ex
local $seq = map(MLJBase.flush!(MLJBase.MACHINE_CHANNEL)) do tup
(tup[1], tup[2].model)
end
# for s in $seq
# println(s)
# end
@test $seq in [$(sequence_exs...)]
end)
end
###############################################################################
##### THE FOLLOWINGS ARE USED TO TEST SERIALIZATION CAPACITIES #####
###############################################################################
function test_args(mach)
# Check source nodes are empty if any
for arg in mach.args
if arg isa Source
@test arg == source()
end
end
end
test_data(mach) = all([:old_rows, :data, :resampled_data, :cache]) do field
getfield(mach, field) |> isnothing
end
function generic_tests(mach₁, mach₂)
test_args(mach₂)
test_data(mach₂)
@test mach₂.state == -1
for field in (:frozen, :model, :old_model, :old_upstream_state)
@test getfield(mach₁, field) == getfield(mach₂, field)
end
end
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 7271 | module Utilities
using Test
using MLJBase
using StableRNGs
import Random
using ComputationalResources
@test MLJBase.finaltypes(Union{Missing,Int}) == [Union{Missing,Int64}]
@test MLJBase.finaltypes(Float64) == [Float64]
abstract type Foo end
struct Bar <: Foo end
struct Baz <: Foo end
@test MLJBase.finaltypes(Foo) == [Bar, Baz]
@testset "flat_values" begin
t = (X = (x = 1, y = 2), Y = 3)
@test flat_values(t) == (1, 2, 3)
end
mutable struct M
a1
a2
end
mutable struct A1
a11
a12
end
mutable struct A2
a21
end
mutable struct A21
a211
a212
end
@testset "prepend" begin
MLJBase.prepend(:x, :(y.z.w)) == :(x.y.z.w)
MLJBase.prepend(:x, nothing) == nothing
end
@testset "recursive getproperty, setproperty!" begin
m = (a1 = (a11 = 10, a12 = 20), a2 = (a21 = (a211 = 30, a212 = 40),))
@test MLJBase.recursive_getproperty(m, :(a1.a12)) == 20
@test MLJBase.recursive_getproperty(m, :a1) == (a11 = 10, a12 = 20)
@test MLJBase.recursive_getproperty(m, :(a2.a21.a212)) == 40
m = M(A1(10, 20), A2(A21(30, 40)))
MLJBase.recursive_setproperty!(m, :(a2.a21.a212), 42)
@test MLJBase.recursive_getproperty(m, :(a1.a11)) == 10
@test MLJBase.recursive_getproperty(m, :(a1.a12)) == 20
@test MLJBase.recursive_getproperty(m, :(a2.a21.a211)) == 30
@test MLJBase.recursive_getproperty(m, :(a2.a21.a212)) == 42
@test MLJBase.recursive_getproperty(
MLJBase.recursive_getproperty(m, :(a2.a21)), :a212) == 42
end
@testset "shuffle rows" begin
rng = StableRNG(5996661)
# check dims
x = randn(rng, 5)
y = randn(rng, 5, 5)
z = randn(rng, 5, 5)
@test MLJBase.check_same_nrows(x, y) === nothing
@test MLJBase.check_same_nrows(z, x) === nothing
@test MLJBase.check_same_nrows(y, z) === nothing
@test_throws DimensionMismatch MLJBase.check_same_nrows(x, randn(rng, 4))
x = 1:5 |> collect
y = 1:5 |> collect
# In the following tests it is crucial to recreate a new `RNG` each time.
perm = Random.randperm(StableRNG(11900), 5)
@test MLJBase.shuffle_rows(x, y; rng=StableRNG(11900)) == (x[perm], y[perm])
y = randn(StableRNG(11900), 5, 5)
@test MLJBase.shuffle_rows(x, y; rng=StableRNG(11900)) == (x[perm], y[perm, :])
@test MLJBase.shuffle_rows(z, y; rng=StableRNG(11900)) == (z[perm,:], y[perm, :])
@test MLJBase.shuffle_rows(x, x; rng=StableRNG(11900)) == (x[perm], x[perm])
end
@testset "init_rng" begin
rng = 129
@test MLJBase.init_rng(rng) == Random.MersenneTwister(rng)
rng = StableRNG(129)
@test MLJBase.init_rng(rng) == rng
rng = -20
@test_throws ArgumentError MLJBase.init_rng(rng)
end
@testset "unwind" begin
iterators = ([1, 2], ["a","b"], ["x", "y", "z"])
@test unwind(iterators...) ==
[1 "a" "x";
2 "a" "x";
1 "b" "x";
2 "b" "x";
1 "a" "y";
2 "a" "y";
1 "b" "y";
2 "b" "y";
1 "a" "z";
2 "a" "z";
1 "b" "z";
2 "b" "z"]
end
@testset "comp resources" begin
@test default_resource() == CPU1()
default_resource(ComputationalResources.CPUProcesses())
@test default_resource() == ComputationalResources.CPUProcesses()
default_resource(CPU1())
end
@static if VERSION >= v"1.3.0-DEV.573"
@testset "Chunks" begin
nthreads = Threads.nthreads()
#test for cases with exactly same work as threads
@test length(MLJBase.chunks(1:nthreads, nthreads)) == nthreads
#test for cases with more work than threads
@test length(MLJBase.chunks(1:nthreads + rand(1:nthreads), nthreads)) == nthreads
#test for cases with less work than threads
@test length(MLJBase.chunks(1:nthreads-1, nthreads)) == nthreads - 1
end
end
@testset "available_name" begin
@test MLJBase.available_name(Utilities, :pizza) == :pizza
Utilities.eval(:(orange = 5))
@test MLJBase.available_name(Utilities, :orange) == :orange2
Utilities.eval(:(orange2 = 6))
@test MLJBase.available_name(Utilities, :orange) == :orange3
end
struct FooBar{A} end
@testset "generate_name!" begin
existing_names = Symbol[]
@test MLJBase.generate_name!(FooBar{Int}, existing_names) ==
:foo_bar
@test MLJBase.generate_name!(FooBar{Float64}, existing_names) ==
:foo_bar2
@test MLJBase.generate_name!(FooBar{Float64},
existing_names,
only=Number) == :f
@test MLJBase.generate_name!(FooBar{Float64},
existing_names,
only=Number) == :f2
@test MLJBase.generate_name!(FooBar{Float64},
existing_names,
only=Number,
substitute=:g) == :g
@test MLJBase.generate_name!(FooBar{Float64},
existing_names,
only=Number,
substitute=:g) == :g2
@test existing_names == [:foo_bar, :foo_bar2, :f, :f2, :g, :g2]
@test MLJBase.generate_name!(42, [], only=Array) == :f
@test MLJBase.generate_name!(42, [], only=Number, substitute=:g) == :int64
end
@testset "sequence_string" begin
@test MLJBase.sequence_string(1:10, 2) == "1, 2, ..."
@test MLJBase.sequence_string(1:10) == "1, 2, 3, ..."
@test MLJBase.sequence_string(1:3) == "1, 2, 3"
@test MLJBase.sequence_string(1:2) == "1, 2"
@test MLJBase.sequence_string([sin, cos, tan, asin]) ==
"sin, cos, tan, ..."
end
@testset "observation" begin
@test MLJBase.observation(AbstractVector{Count}) ==
Count
@test MLJBase.observation(AbstractVector{<:Count}) ==
Count
@test MLJBase.observation(AbstractVector{<:Union{Missing,Count}}) ==
Union{Missing,Count}
@test MLJBase.observation(AbstractMatrix{<:Count}) ==
AbstractVector{<:Count}
@test MLJBase.observation(AbstractMatrix{Union{Missing,Count}}) ==
AbstractVector{Union{Missing,Count}}
@test MLJBase.observation(AbstractMatrix{<:Union{Missing,Count}}) ==
AbstractVector{<:Union{Missing,Count}}
@test MLJBase.observation(Table(Count)) == AbstractVector{<:Count}
end
@testset "guess_observation_scitype" begin
@test MLJBase.guess_observation_scitype([missing, 1, 2, 3]) ==
Union{Missing, Count}
@test MLJBase.guess_observation_scitype(rand(3, 2)) ==
AbstractVector{Continuous}
@test MLJBase.guess_observation_scitype((x=rand(3), y=rand(Bool, 3))) ==
AbstractVector{Union{Continuous, Count}}
@test MLJBase.guess_observation_scitype((x=[missing, 1, 2], y=[1, 2, 3])) ==
Unknown
@test MLJBase.guess_observation_scitype(5) == Unknown
end
mutable struct DRegressor2 <: Deterministic end
MLJBase.target_scitype(::Type{<:DRegressor2}) =
AbstractVector{<:Continuous}
@test MLJBase.guess_model_target_observation_scitype(DRegressor2()) == Continuous
@testset "pretty" begin
X = (x=fill(1, 3), y=fill(2, 3))
io = IOBuffer()
pretty(X)
pretty(io, X)
str = take!(io) |> String
@test contains(str, "x")
@test contains(str, "y")
@test contains(str, "│")
end
end # module
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 6364 | ## THE CONSTANT REGRESSOR
export ConstantClassifier, ConstantRegressor,
DeterministicConstantClassifier,
ProbabilisticConstantClassifer,
DeterministicConstantRegressor
import Distributions
"""
ConstantRegressor(; distribution_type=Distributions.Normal)
A regressor that, for any new input pattern, predicts the univariate
probability distribution best fitting the training target data. Use
`predict_mean` to predict the mean value instead.
"""
struct ConstantRegressor{D} <: MMI.Probabilistic end
function ConstantRegressor(; distribution_type=Distributions.Normal)
model = ConstantRegressor{distribution_type}()
message = clean!(model)
isempty(message) || @warn message
return model
end
function MMI.clean!(model::ConstantRegressor{D}) where D
message = ""
D <: Distributions.Sampleable ||
error("$model.distribution_type is not a valid distribution_type.")
return message
end
MMI.reformat(::ConstantRegressor, X) = (MMI.matrix(X),)
MMI.reformat(::ConstantRegressor, X, y) = (MMI.matrix(X), y)
MMI.selectrows(::ConstantRegressor, I, A) = (view(A, I, :),)
MMI.selectrows(::ConstantRegressor, I, A, y) = (view(A, I, :), y[I])
function MMI.fit(::ConstantRegressor{D}, verbosity::Int, A, y) where D
fitresult = Distributions.fit(D, y)
cache = nothing
report = nothing
return fitresult, cache, report
end
MMI.fitted_params(::ConstantRegressor, fitresult) =
(target_distribution=fitresult,)
MMI.predict(::ConstantRegressor, fitresult, Xnew) =
fill(fitresult, nrows(Xnew))
##
## THE CONSTANT DETERMINISTIC REGRESSOR (FOR TESTING)
##
struct DeterministicConstantRegressor <: MMI.Deterministic end
function MMI.fit(::DeterministicConstantRegressor, verbosity::Int, X, y)
fitresult = mean(y)
cache = nothing
report = nothing
return fitresult, cache, report
end
MMI.reformat(::DeterministicConstantRegressor, X) = (MMI.matrix(X),)
MMI.reformat(::DeterministicConstantRegressor, X, y) = (MMI.matrix(X), y)
MMI.selectrows(::DeterministicConstantRegressor, I, A) = (view(A, I, :),)
MMI.selectrows(::DeterministicConstantRegressor, I, A, y) =
(view(A, I, :), y[I])
MMI.predict(::DeterministicConstantRegressor, fitresult, Xnew) =
fill(fitresult, nrows(Xnew))
##
## THE CONSTANT CLASSIFIER
##
"""
ConstantClassifier()
A classifier that, for any new input pattern, `predict`s the
`UnivariateFinite` probability distribution `d` best fitting the
training target data. So, `pdf(d, level)` is the proportion of levels
in the training data coinciding with `level`. Use `predict_mode` to
obtain the training target mode instead.
"""
mutable struct ConstantClassifier <: MMI.Probabilistic
testing::Bool
bogus::Int
end
ConstantClassifier(; testing=false, bogus=0) =
ConstantClassifier(testing, bogus)
function MMI.reformat(model::ConstantClassifier, X)
model.testing && @info "reformatting X"
return (MMI.matrix(X),)
end
function MMI.reformat(model::ConstantClassifier, X, y)
model.testing && @info "reformatting X, y"
return (MMI.matrix(X), y)
end
function MMI.reformat(model::ConstantClassifier, X, y, w)
model.testing && @info "reformatting X, y, w"
return (MMI.matrix(X), y, w)
end
function MMI.selectrows(model::ConstantClassifier, I, A)
model.testing && @info "resampling X"
return (view(A, I, :),)
end
function MMI.selectrows(model::ConstantClassifier, I, A, y)
model.testing && @info "resampling X, y"
return (view(A, I, :), y[I])
end
function MMI.selectrows(model::ConstantClassifier, I, A, y, ::Nothing)
model.testing && @info "resampling X, y, nothing"
return (view(A, I, :), y[I], nothing)
end
function MMI.selectrows(model::ConstantClassifier, I, A, y, w)
model.testing && @info "resampling X, y, nothing"
return (view(A, I, :), y[I], w[I])
end
# here `args` is `y` or `y, w`:
function MMI.fit(::ConstantClassifier, verbosity::Int, A, y, w=nothing)
fitresult = Distributions.fit(MLJBase.UnivariateFinite, y, w)
cache = nothing
report = nothing
return fitresult, cache, report
end
MMI.fitted_params(::ConstantClassifier, fitresult) =
(target_distribution=fitresult,)
MMI.predict(::ConstantClassifier, fitresult, Xnew) =
fill(fitresult, nrows(Xnew))
##
## DETERMINISTIC CONSTANT CLASSIFIER (FOR TESTING)
##
struct DeterministicConstantClassifier <: MMI.Deterministic end
function MMI.fit(::DeterministicConstantClassifier, verbosity::Int, X, y)
# dump missing target values and make into a regular array:
fitresult = mode(skipmissing(y) |> collect) # a CategoricalValue
cache = nothing
report = nothing
return fitresult, cache, report
end
MMI.reformat(::DeterministicConstantClassifier, X) = (MMI.matrix(X),)
MMI.reformat(::DeterministicConstantClassifier, X, y) = (MMI.matrix(X), y)
MMI.selectrows(::DeterministicConstantClassifier, I, A) = (view(A, I, :),)
MMI.selectrows(::DeterministicConstantClassifier, I, A, y) =
(view(A, I, :), y[I])
MMI.predict(::DeterministicConstantClassifier, fitresult, Xnew) =
fill(fitresult, nrows(Xnew))
#
# METADATA
#
metadata_pkg.((ConstantRegressor, ConstantClassifier,
DeterministicConstantRegressor, DeterministicConstantClassifier),
name="MLJModels",
uuid="d491faf4-2d78-11e9-2867-c94bc002c0b7",
url="https://github.com/JuliaAI/MLJModels.jl",
julia=true,
license="MIT",
is_wrapper=false)
metadata_model(ConstantRegressor,
input=MMI.Table,
target=AbstractVector{MMI.Continuous},
weights=false,
path="MLJModels.ConstantRegressor")
metadata_model(DeterministicConstantRegressor,
input=MMI.Table,
target=AbstractVector{MMI.Continuous},
weights=false,
path="MLJModels.DeterministicConstantRegressor")
metadata_model(ConstantClassifier,
input=MMI.Table,
target=AbstractVector{<:MMI.Finite},
weights=true,
path="MLJModels.ConstantClassifier")
metadata_model(DeterministicConstantClassifier,
input=MMI.Table,
target=AbstractVector{<:MMI.Finite},
weights=false,
path="MLJModels.DeterministicConstantClassifier")
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 8081 | export DecisionTreeClassifier, DecisionTreeRegressor
import MLJBase
import MLJBase: @mlj_model, metadata_pkg, metadata_model
import MLJBase.Tables
using ScientificTypes
using CategoricalArrays
import DecisionTree
## DESCRIPTIONS
const DTC_DESCR = "Decision Tree Classifier."
const DTR_DESCR = "Decision Tree Regressor."
## CLASSIFIER
struct TreePrinter{T}
tree::T
end
(c::TreePrinter)(depth) = DecisionTree.print_tree(c.tree, depth)
(c::TreePrinter)() = DecisionTree.print_tree(c.tree, 5)
Base.show(stream::IO, c::TreePrinter) =
print(stream, "TreePrinter object (call with display depth)")
"""
DecisionTreeClassifer(; kwargs...)
A variation on the CART decision tree classifier from [https://github.com/bensadeghi/DecisionTree.jl/blob/master/README.md](https://github.com/bensadeghi/DecisionTree.jl/blob/master/README.md).
Inputs are tables with ordinal columns. That is, the element scitype
of each column can be `Continuous`, `Count` or `OrderedFactor`.
Instead of predicting the mode class at each leaf, a UnivariateFinite
distribution is fit to the leaf training classes, with smoothing
controlled by an additional hyperparameter `pdf_smoothing`: If `n` is
the number of observed classes, then each class probability is
replaced by `pdf_smoothing/n`, if it falls below that ratio, and the
resulting vector of probabilities is renormalized. Smoothing is only
applied to classes actually observed in training. Unseen classes
retain zero-probability predictions.
To visualize the fitted tree in the REPL, set `verbosity=2` when
fitting, or call `report(mach).print_tree(display_depth)` where `mach`
is the fitted machine, and `display_depth` the desired
depth. Interpretting the results will require a knowledge of the
internal integer encodings of classes, which are given in
`fitted_params(mach)` (which also stores the raw learned tree object
from the DecisionTree.jl algorithm).
For post-fit pruning, set `post-prune=true` and set
`min_purity_threshold` appropriately. Other hyperparameters as per
package documentation cited above.
"""
@mlj_model mutable struct DecisionTreeClassifier <: MLJBase.Probabilistic
pruning_purity::Float64 = 1.0::(_ ≤ 1)
max_depth::Int = (-)(1)::(_ ≥ -1)
min_samples_leaf::Int = 1::(_ ≥ 0)
min_samples_split::Int = 2::(_ ≥ 2)
min_purity_increase::Float64 = 0.0::(_ ≥ 0)
n_subfeatures::Int = 0::(_ ≥ 0)
display_depth::Int = 5::(_ ≥ 1)
post_prune::Bool = false
merge_purity_threshold::Float64 = 0.9::(0 ≤ _ ≤ 1)
pdf_smoothing::Float64 = 0.05::(0 ≤ _ ≤ 1)
end
#> A required `fit` method returns `fitresult, cache, report`. (Return
#> `cache=nothing` unless you are overloading `update`)
function MLJBase.fit(model::DecisionTreeClassifier, verbosity::Int, X, y)
Xmatrix = MLJBase.matrix(X)
yplain = MLJBase.int(y)
classes_seen = filter(in(unique(y)), MLJBase.classes(y[1]))
integers_seen = MLJBase.int(classes_seen) #unique(yplain)
tree = DecisionTree.build_tree(yplain, Xmatrix,
model.n_subfeatures,
model.max_depth,
model.min_samples_leaf,
model.min_samples_split,
model.min_purity_increase)
if model.post_prune
tree = DecisionTree.prune_tree(tree, model.merge_purity_threshold)
end
verbosity < 2 || DecisionTree.print_tree(tree, model.display_depth)
fitresult = (tree, classes_seen, integers_seen)
#> return package-specific statistics (eg, feature rankings,
#> internal estimates of generalization error) in `report`, which
#> should be a named tuple with the same type every call (can have
#> empty values):
cache = nothing
report = (
classes_seen=classes_seen,
print_tree=TreePrinter(tree),
features=Tables.columnnames(Tables.columns(X)) |> collect,
)
return fitresult, cache, report
end
function get_encoding(classes_seen)
a_cat_element = classes_seen[1]
return Dict(c => MLJBase.int(c) for c in MLJBase.classes(a_cat_element))
end
MLJBase.fitted_params(::DecisionTreeClassifier, fitresult) =
(tree_or_leaf=fitresult[1], encoding=get_encoding(fitresult[2]))
function smooth(prob_vector, smoothing)
threshold = smoothing/length(prob_vector)
smoothed_vector = map(prob_vector) do p
p < threshold ? threshold : p
end
smoothed_vector = smoothed_vector/sum(smoothed_vector)
return smoothed_vector
end
function MLJBase.predict(model::DecisionTreeClassifier
, fitresult
, Xnew)
Xmatrix = MLJBase.matrix(Xnew)
tree, classes_seen, integers_seen = fitresult
y_probabilities =
DecisionTree.apply_tree_proba(tree, Xmatrix, integers_seen)
return [MLJBase.UnivariateFinite(classes_seen,
smooth(y_probabilities[i,:],
model.pdf_smoothing))
for i in 1:size(y_probabilities, 1)]
end
MLJBase.reports_feature_importances(::Type{<:DecisionTreeClassifier}) = true
function MMI.feature_importances(m::DecisionTreeClassifier, fitresult, report)
features = report.features
fi = DecisionTree.impurity_importance(first(fitresult), normalize=true)
fi_pairs = Pair.(features, fi)
# sort descending
sort!(fi_pairs, by= x->-x[2])
return fi_pairs
end
## REGRESSOR
"""
DecisionTreeRegressor(; kwargs...)
CART decision tree classifier from
[https://github.com/bensadeghi/DecisionTree.jl/blob/master/README.md](https://github.com/bensadeghi/DecisionTree.jl/blob/master/README.md). Predictions
are Deterministic.
Inputs are tables with ordinal columns. That is, the element scitype
of each column can be `Continuous`, `Count` or `OrderedFactor`.
For post-fit pruning, set `post-prune=true` and set
`pruning_purity_threshold` appropriately. Other hyperparameters as per
package documentation cited above.
"""
@mlj_model mutable struct DecisionTreeRegressor <: MLJBase.Deterministic
pruning_purity_threshold::Float64 = 0.0::(0 ≤ _ ≤ 1)
max_depth::Int = (-)(1)::(_ ≥ -1)
min_samples_leaf::Int = 5::(_ ≥ 0)
min_samples_split::Int = 2::(_ ≥ 2)
min_purity_increase::Float64 = 0.0::(_ ≥ 0)
n_subfeatures::Int = 0::(_ ≥ 0)
post_prune::Bool = false
end
function MLJBase.fit(model::DecisionTreeRegressor, verbosity::Int, X, y)
Xmatrix = MLJBase.matrix(X)
fitresult = DecisionTree.build_tree(float.(y), Xmatrix
, model.n_subfeatures
, model.max_depth
, model.min_samples_leaf
, model.min_samples_split
, model.min_purity_increase)
if model.post_prune
fitresult = DecisionTree.prune_tree(fitresult,
model.pruning_purity_threshold)
end
cache = nothing
report = nothing
return fitresult, cache, report
end
MLJBase.fitted_params(::DecisionTreeRegressor, fitresult) =
(tree_or_leaf = fitresult,)
function MLJBase.predict(model::DecisionTreeRegressor
, fitresult
, Xnew)
Xmatrix = MLJBase.matrix(Xnew)
return DecisionTree.apply_tree(fitresult,Xmatrix)
end
##
## METADATA
##
metadata_pkg.((DecisionTreeClassifier, DecisionTreeRegressor),
name="DecisionTree",
uuid="7806a523-6efd-50cb-b5f6-3fa6f1930dbb",
url="https://github.com/bensadeghi/DecisionTree.jl",
julia=true,
license="MIT",
is_wrapper=false)
metadata_model(DecisionTreeClassifier,
input=MLJBase.Table(Continuous, Count, OrderedFactor),
target=AbstractVector{<:MLJBase.Finite},
weights=false,)
metadata_model(DecisionTreeRegressor,
input=MLJBase.Table(Continuous, Count, OrderedFactor),
target=AbstractVector{MLJBase.Continuous},
weights=false,)
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 4263 | export RidgeRegressor, PCA
import MLJBase
import MLJBase: @mlj_model, metadata_model, metadata_pkg
# using Distances
using LinearAlgebra
using Tables
using ScientificTypes
import MultivariateStats
const MS = MultivariateStats
struct LinearFitresult{F} <: MLJBase.MLJType
coefficients::Vector{F}
intercept::F
end
const RIDGE_DESCR = "Ridge regressor with regularization parameter lambda. Learns a linear regression with a penalty on the l2 norm of the coefficients."
const PCA_DESCR = "Principal component analysis. Learns a linear transformation to project the data on a lower dimensional space while preserving most of the initial variance."
####
#### RIDGE
####
"""
RidgeRegressor(; lambda=1.0)
$RIDGE_DESCR
## Parameters
* `lambda=1.0`: non-negative parameter for the regularization strength.
"""
@mlj_model mutable struct RidgeRegressor <: MLJBase.Deterministic
lambda::Real = 1.0::(_ ≥ 0)
end
function MLJBase.fit(model::RidgeRegressor, verbosity::Int, X, y)
Xmatrix = MLJBase.matrix(X)
features = Tables.schema(X).names
θ = MS.ridge(Xmatrix, y, model.lambda)
coefs = θ[1:end-1]
intercept = θ[end]
fitresult = LinearFitresult(coefs, intercept)
report = nothing
cache = nothing
return fitresult, cache, report
end
MLJBase.fitted_params(::RidgeRegressor, fr) =
(coefficients=fr.coefficients, intercept=fr.intercept)
function MLJBase.predict(::RidgeRegressor, fr, Xnew)
Xmatrix = MLJBase.matrix(Xnew)
return Xmatrix * fr.coefficients .+ fr.intercept
end
####
#### PCA
####
const PCAFitResultType = MS.PCA
"""
PCA(; maxoutdim=nothing, method=:auto, pratio=0.99, mean=nothing)
$PCA_DESCR
## Parameters
* `maxoutdim=nothing`: maximum number of output dimensions, unconstrained if nothing.
* `method=:auto`: method to use to solve the problem, one of `:auto`, `:cov` or `:svd`
* `pratio=0.99`: ratio of variance preserved
* `mean=nothing`: if set to nothing centering will be computed and applied, if set to `0` no centering (assumed pre-centered), if a vector is passed, the centering is done with that vector.
"""
@mlj_model mutable struct PCA <: MLJBase.Unsupervised
maxoutdim::Union{Nothing,Int} = nothing::(_ === nothing || _ ≥ 1)
method::Symbol = :auto::(_ in (:auto, :cov, :svd))
pratio::Float64 = 0.99::(0.0 < _ ≤ 1.0)
mean::Union{Nothing, Real, Vector{Float64}} = nothing::(_ === nothing || (_ isa Real && iszero(_)) || true)
end
function MLJBase.fit(model::PCA, verbosity::Int, X)
Xarray = MLJBase.matrix(X)
mindim = minimum(size(Xarray))
maxoutdim = model.maxoutdim === nothing ? mindim : model.maxoutdim
# NOTE: copy/transpose
fitresult = MS.fit(MS.PCA, permutedims(Xarray);
method=model.method,
pratio=model.pratio,
maxoutdim=maxoutdim,
mean=model.mean)
cache = nothing
report = (indim=size(fitresult)[1],
outdim=size(fitresult)[2],
mean=MS.mean(fitresult),
principalvars=MS.principalvars(fitresult),
tprincipalvar=MS.tprincipalvar(fitresult),
tresidualvar=MS.tresidualvar(fitresult),
tvar=MS.var(fitresult))
return fitresult, cache, report
end
MLJBase.fitted_params(::PCA, fr) = (projection=fr,)
function MLJBase.transform(::PCA, fr::PCAFitResultType, X)
# X is n x d, need to transpose and copy twice...
Xarray = MLJBase.matrix(X)
Xnew = permutedims(MS.predict(fr, permutedims(Xarray)))
return MLJBase.table(Xnew, prototype=X)
end
####
#### METADATA
####
metadata_pkg.((RidgeRegressor, PCA),
name="MultivariateStats",
uuid="6f286f6a-111f-5878-ab1e-185364afe411",
url="https://github.com/JuliaStats/MultivariateStats.jl",
license="MIT",
julia=true,
is_wrapper=false)
metadata_model(RidgeRegressor,
input=MLJBase.Table(MLJBase.Continuous),
target=AbstractVector{MLJBase.Continuous},
weights=false,)
metadata_model(PCA,
input=MLJBase.Table(MLJBase.Continuous),
target=MLJBase.Table(MLJBase.Continuous),
weights=false,)
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 5801 | export KNNRegressor, KNNClassifier
import MLJBase: @mlj_model, metadata_model, metadata_pkg
using Distances
import NearestNeighbors
const NN = NearestNeighbors
const KNNRegressorDescription =
"""
K-Nearest Neighbors regressor: predicts the response associated with a new point
by taking an average of the response of the K-nearest points.
"""
const KNNClassifierDescription =
"""
K-Nearest Neighbors classifier: predicts the class associated with a new point
by taking a vote over the classes of the K-nearest points.
"""
const KNNFields =
"""
## Keywords
* `K=5` : number of neighbors
* `algorithm=:kdtree` : one of `(:kdtree, :brutetree, :balltree)`
* `metric=Euclidean()` : a `Metric` object for the distance between points
* `leafsize=10` : at what number of points to stop splitting the tree
* `reorder=true` : if true puts points close in distance close in memory
* `weights=:uniform` : one of `(:uniform, :distance)` if `:uniform` all neighbors are
considered as equally important, if `:distance`, closer neighbors
are proportionally more important.
See also the [package documentation](https://github.com/KristofferC/NearestNeighbors.jl).
"""
"""
KNNRegressoor(;kwargs...)
$KNNRegressorDescription
$KNNFields
"""
@mlj_model mutable struct KNNRegressor <: MLJBase.Deterministic
K::Int = 5::(_ > 0)
algorithm::Symbol = :kdtree::(_ in (:kdtree, :brutetree, :balltree))
metric::Metric = Euclidean()
leafsize::Int = 10::(_ ≥ 0)
reorder::Bool = true
weights::Symbol = :uniform::(_ in (:uniform, :distance))
end
"""
KNNRegressor(;kwargs...)
$KNNClassifierDescription
$KNNFields
"""
@mlj_model mutable struct KNNClassifier <: MLJBase.Probabilistic
K::Int = 5::(_ > 0)
algorithm::Symbol = :kdtree::(_ in (:kdtree, :brutetree, :balltree))
metric::Metric = Euclidean()
leafsize::Int = 10::(_ ≥ 0)
reorder::Bool = true
weights::Symbol = :uniform::(_ in (:uniform, :distance))
end
const KNN = Union{KNNRegressor, KNNClassifier}
MMI.reformat(::KNN, X) = (MMI.matrix(X, transpose=true),)
MMI.reformat(::KNN, X, y) = (MMI.matrix(X, transpose=true), y)
MMI.reformat(::KNN, X, y, w) =
error("$Weights must be abstract vectors with `AbstractFloat` "*
"or `Integer` eltype, or be `nothing`. ")
MMI.reformat(::KNN, X, y, w::Union{Nothing,AbstractVector{<:AbstractFloat}}) =
(MMI.matrix(X, transpose=true), y, w)
MMI.reformat(::KNN, X, y, w::AbstractVector{<:Integer}) =
(MMI.matrix(X, transpose=true), y, float.(w))
MMI.selectrows(::KNN, I, Xmatrix) =
(view(Xmatrix, :, I),)
MMI.selectrows(::KNN, I, Xmatrix, y) =
(view(Xmatrix, :, I), view(y, I))
MMI.selectrows(::KNN, I, Xmatrix, y, w) =
(view(Xmatrix, :, I), view(y, I), view(w, I))
MMI.selectrows(::KNN, I, Xmatrix, y, ::Nothing) =
(view(Xmatrix, :, I), view(y, I), nothing)
function MLJBase.fit(m::KNN, verbosity::Int, Xmatrix, y, w=nothing)
if m.algorithm == :kdtree
tree = NN.KDTree(Xmatrix; leafsize=m.leafsize, reorder=m.reorder)
elseif m.algorithm == :balltree
tree = NN.BallTree(Xmatrix; leafsize=m.leafsize, reorder=m.reorder)
elseif m.algorithm == :brutetree
tree = NN.BruteTree(Xmatrix; leafsize=m.leafsize, reorder=m.reorder)
end
report = NamedTuple{}()
return (tree, y, w), nothing, report
end
MLJBase.fitted_params(model::KNN, (tree, _)) = (tree=tree,)
function MLJBase.predict(m::KNNClassifier, (tree, y, w), Xmatrix)
# for each entry, get the K closest training point + their distance
idxs, dists = NN.knn(tree, Xmatrix, m.K)
preds = Vector{MLJBase.UnivariateFinite}(undef, length(idxs))
classes = MLJBase.classes(y[1])
probas = zeros(length(classes))
w_ = ones(m.K)
# go over each test record, and for each go over the k nearest entries
for i in eachindex(idxs)
idxs_ = idxs[i]
dists_ = dists[i]
labels = y[idxs_]
if w !== nothing
w_ = w[idxs_]
end
probas .*= 0.0
if m.weights == :uniform
for (k, label) in enumerate(labels)
probas[classes .== label] .+= 1.0 / m.K * w_[k]
end
else
for (k, label) in enumerate(labels)
probas[classes .== label] .+= 1.0 / dists_[k] * w_[k]
end
end
# normalize so that sum to 1
probas ./= sum(probas)
preds[i] = MLJBase.UnivariateFinite(classes, probas)
end
return [preds...]
end
function MLJBase.predict(m::KNNRegressor, (tree, y, w), Xmatrix)
idxs, dists = NN.knn(tree, Xmatrix, m.K)
preds = zeros(length(idxs))
w_ = ones(m.K)
for i in eachindex(idxs)
idxs_ = idxs[i]
dists_ = dists[i]
values = y[idxs_]
if w !== nothing
w_ = w[idxs_]
end
if m.weights == :uniform
preds[i] = sum(values .* w_) / sum(w_)
else
preds[i] = sum(values .* w_ .* (1.0 .- dists_ ./ sum(dists_))) / (sum(w_) - 1)
end
end
return preds
end
# ====
metadata_pkg.((KNNRegressor, KNNClassifier),
name="NearestNeighbors",
uuid="b8a86587-4115-5ab1-83bc-aa920d37bbce",
url="https://github.com/KristofferC/NearestNeighbors.jl",
julia=true,
license="MIT",
is_wrapper=false
)
metadata_model(KNNRegressor,
input=MLJBase.Table(MLJBase.Continuous),
target=AbstractVector{MLJBase.Continuous},
weights=true,
)
metadata_model(KNNClassifier,
input=MLJBase.Table(MLJBase.Continuous),
target=AbstractVector{<:MLJBase.Finite},
weights=true,
)
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 21282 | ## CONSTANTS
export FeatureSelector,
UnivariateStandardizer, Standardizer,
UnivariateBoxCoxTransformer,
OneHotEncoder, UnivariateDiscretizer, Averager
using Statistics
const N_VALUES_THRESH = 16 # for BoxCoxTransformation
const CategoricalValue = MLJBase.CategoricalValue
## DESCRIPTIONS (see also metadata at the bottom)
const FEATURE_SELECTOR_DESCR = "Filter features (columns) of a table by name."
const UNIVARIATE_STD_DESCR = "Standardize (whiten) univariate data."
const UNIVARIATE_DISCR_DESCR = "Discretize continuous variables via quantiles."
const STANDARDIZER_DESCR = "Standardize (whiten) data."
const UNIVARIATE_BOX_COX_DESCR = "Box-Cox transformation of univariate data."
const ONE_HOT_DESCR = "One-Hot-Encoding of the categorical data."
##
## FOR FEATURE (COLUMN) SELECTION
##
"""
FeatureSelector(features=Symbol[])
An unsupervised model for filtering features (columns) of a table.
Only those features encountered during fitting will appear in
transformed tables if `features` is empty (the default).
Alternatively, if a non-empty `features` is specified, then only the
specified features are used. Throws an error if a recorded or
specified feature is not present in the transformation input.
"""
mutable struct FeatureSelector <: MLJBase.Unsupervised
features::Vector{Symbol}
end
FeatureSelector(; features=Symbol[]) = FeatureSelector(features)
function MLJBase.fit(transformer::FeatureSelector, verbosity::Int, X)
namesX = collect(Tables.schema(X).names)
if isempty(transformer.features)
fitresult = namesX
else
all(e -> e in namesX, transformer.features) ||
throw(error("Attempting to select non-existent feature(s)."))
fitresult = transformer.features
end
report = nothing
return fitresult, nothing, report
end
MLJBase.fitted_params(::FeatureSelector, fitresult) = (features_to_keep=fitresult,)
function MLJBase.transform(transformer::FeatureSelector, features, X)
all(e -> e in Tables.schema(X).names, features) ||
throw(error("Supplied frame does not admit previously selected features."))
return MLJBase.selectcols(X, features)
end
##
## UNIVARIATE Discretizer
##
# helper functions
reftype(::CategoricalArray{<:Any,<:Any,R}) where R = R
"""
UnivariateDiscretizer(n_classes=512)
Returns an `MLJModel` for for discretizing any continuous vector `v`
(`scitype(v) <: AbstractVector{Continuous}`), where `n_classes`
describes the resolution of the discretization.
Transformed output `w` is a vector of ordered factors (`scitype(w) <:
AbstractVector{<:OrderedFactor}`). Specifically, `w` is a
`CategoricalVector`, with element type
`CategoricalValue{R,R}`, where `R<Unsigned` is optimized.
The transformation is chosen so that the vector on which the
transformer is fit has, in transformed form, an approximately uniform
distribution of values.
### Example
using MLJ
t = UnivariateDiscretizer(n_classes=10)
discretizer = machine(t, randn(1000))
fit!(discretizer)
v = rand(10)
w = transform(discretizer, v)
v_approx = inverse_transform(discretizer, w) # reconstruction of v from w
"""
mutable struct UnivariateDiscretizer <:MLJBase.Unsupervised
n_classes::Int
end
UnivariateDiscretizer(; n_classes=512) = UnivariateDiscretizer(n_classes)
struct UnivariateDiscretizerResult{C}
odd_quantiles::Vector{Float64}
even_quantiles::Vector{Float64}
element::C
end
function MLJBase.fit(transformer::UnivariateDiscretizer, verbosity::Int,X)
n_classes = transformer.n_classes
quantiles = quantile(X, Array(range(0, stop=1, length=2*n_classes+1)))
clipped_quantiles = quantiles[2:2*n_classes] # drop 0% and 100% quantiles
# odd_quantiles for transforming, even_quantiles used for
# inverse_transforming:
odd_quantiles = clipped_quantiles[2:2:(2*n_classes-2)]
even_quantiles = clipped_quantiles[1:2:(2*n_classes-1)]
# determine optimal reference type for encoding as categorical:
R = reftype(categorical(1:n_classes, true))
output_prototype = categorical(R(1):R(n_classes), true, ordered=true)
element = output_prototype[1]
cache = nothing
report = nothing
res = UnivariateDiscretizerResult(odd_quantiles, even_quantiles, element)
return res, cache, report
end
# acts on scalars:
function transform_to_int(
result::UnivariateDiscretizerResult{<:MLJBase.CategoricalValue},
r::R) where R <: Real
k = R(1)
for q in result.odd_quantiles
if r > q
k += R(1)
end
end
return k
end
# transforming scalars:
MLJBase.transform(::UnivariateDiscretizer, result, r::Real) =
transform(result.element, transform_to_int(result, r))
# transforming vectors:
function MLJBase.transform(::UnivariateDiscretizer, result, v)
w = [transform_to_int(result, r) for r in v]
return transform(result.element, w)
end
# inverse_transforming raw scalars:
function MLJBase.inverse_transform(
transformer::UnivariateDiscretizer, result , k::Integer)
k <= transformer.n_classes && k > 0 ||
error("Cannot transform an integer outside the range "*
"`[1, n_classes]`, where `n_classes = $(transformer.n_classes)`")
return result.even_quantiles[k]
end
# inverse transforming a categorical value:
function MLJBase.inverse_transform(
transformer::UnivariateDiscretizer, result, e::CategoricalValue)
k = MLJBase.unwrap(e)
return inverse_transform(transformer, result, k)
end
# inverse transforming raw vectors:
MLJBase.inverse_transform(transformer::UnivariateDiscretizer, result,
w::AbstractVector{<:Integer}) =
[inverse_transform(transformer, result, k) for k in w]
# inverse transforming vectors of categorical elements:
function MLJBase.inverse_transform(transformer::UnivariateDiscretizer, result,
wcat::AbstractVector{<:CategoricalValue})
w = MLJBase.int(wcat)
return [inverse_transform(transformer, result, k) for k in w]
end
## UNIVARIATE STANDARDIZATION
"""
UnivariateStandardizer()
Unsupervised model for standardizing (whitening) univariate data.
"""
mutable struct UnivariateStandardizer <: MLJBase.Unsupervised end
function MLJBase.fit(transformer::UnivariateStandardizer, verbosity::Int,
v::AbstractVector{T}) where T<:Real
std(v) > eps(Float64) ||
@warn "Extremely small standard deviation encountered "*
"in standardization."
fitresult = (mean(v), std(v))
cache = nothing
report = nothing
return fitresult, cache, report
end
# for transforming single value:
function MLJBase.transform(transformer::UnivariateStandardizer, fitresult, x::Real)
mu, sigma = fitresult
return (x - mu)/sigma
end
# for transforming vector:
MLJBase.transform(transformer::UnivariateStandardizer, fitresult, v) =
[transform(transformer, fitresult, x) for x in v]
# for single values:
function MLJBase.inverse_transform(transformer::UnivariateStandardizer, fitresult, y::Real)
mu, sigma = fitresult
return mu + y*sigma
end
# for vectors:
MLJBase.inverse_transform(transformer::UnivariateStandardizer, fitresult, w) =
[inverse_transform(transformer, fitresult, y) for y in w]
## STANDARDIZATION OF ORDINAL FEATURES OF TABULAR DATA
"""
Standardizer(; features=Symbol[])
Unsupervised model for standardizing (whitening) the columns of
tabular data. If `features` is empty then all columns `v` for which
all elements have `Continuous` scitypes are standardized. For
different behaviour (e.g. standardizing counts as well), specify the
names of features to be standardized.
using DataFrames
X = DataFrame(x1=[0.2, 0.3, 1.0], x2=[4, 2, 3])
stand_model = Standardizer()
transform(fit!(machine(stand_model, X)), X)
3×2 DataFrame
│ Row │ x1 │ x2 │
│ │ Float64 │ Int64 │
├─────┼───────────┼───────┤
│ 1 │ -0.688247 │ 4 │
│ 2 │ -0.458831 │ 2 │
│ 3 │ 1.14708 │ 3 │
"""
mutable struct Standardizer <: MLJBase.Unsupervised
features::Vector{Symbol}
end
Standardizer(; features=Symbol[]) = Standardizer(features)
function MLJBase.fit(transformer::Standardizer, verbosity::Int, X::Any)
all_features = Tables.schema(X).names
mach_types = collect(eltype(selectcols(X, c)) for c in all_features)
# determine indices of all_features to be transformed
if isempty(transformer.features)
cols_to_fit = filter!(eachindex(all_features) |> collect) do j
mach_types[j] <: AbstractFloat
end
else
issubset(transformer.features, all_features) ||
@warn "Some specified features not present in table to be fit. "
cols_to_fit = filter!(eachindex(all_features) |> collect) do j
all_features[j] in transformer.features && mach_types[j] <: Real
end
end
fitresult_given_feature = Dict{Symbol,Tuple{Float64,Float64}}()
# fit each feature
verbosity < 2 || @info "Features standarized: "
for j in cols_to_fit
col_fitresult, cache, report =
fit(UnivariateStandardizer(), verbosity - 1, selectcols(X, j))
fitresult_given_feature[all_features[j]] = col_fitresult
verbosity < 2 ||
@info " :$(all_features[j]) mu=$(col_fitresult[1]) sigma=$(col_fitresult[2])"
end
fitresult = fitresult_given_feature
cache = nothing
report = (features_fit=keys(fitresult_given_feature),)
return fitresult, cache, report
end
MLJBase.fitted_params(::Standardizer, fitresult) = (mean_and_std_given_feature=fitresult,)
function MLJBase.transform(transformer::Standardizer, fitresult, X)
# `fitresult` is dict of column fitresults, keyed on feature names
features_to_be_transformed = keys(fitresult)
all_features = Tables.schema(X).names
all(e -> e in all_features, features_to_be_transformed) ||
error("Attempting to transform data with incompatible feature labels.")
col_transformer = UnivariateStandardizer()
cols = map(all_features) do ftr
if ftr in features_to_be_transformed
transform(col_transformer, fitresult[ftr], selectcols(X, ftr))
else
selectcols(X, ftr)
end
end
named_cols = NamedTuple{all_features}(tuple(cols...))
return MLJBase.table(named_cols, prototype=X)
end
##
## UNIVARIATE BOX-COX TRANSFORMATIONS
##
function standardize(v)
map(v) do x
(x - mean(v))/std(v)
end
end
function midpoints(v::AbstractVector{T}) where T <: Real
return [0.5*(v[i] + v[i + 1]) for i in 1:(length(v) -1)]
end
function normality(v)
n = length(v)
v = standardize(convert(Vector{Float64}, v))
# sort and replace with midpoints
v = midpoints(sort!(v))
# find the (approximate) expected value of the size (n-1)-ordered statistics for
# standard normal:
d = Distributions.Normal(0,1)
w = map(collect(1:(n-1))/n) do x
quantile(d, x)
end
return cor(v, w)
end
function boxcox(lambda, c, x::Real)
c + x >= 0 || throw(DomainError)
if lambda == 0.0
c + x > 0 || throw(DomainError)
return log(c + x)
end
return ((c + x)^lambda - 1)/lambda
end
boxcox(lambda, c, v::AbstractVector{T}) where T <: Real =
[boxcox(lambda, c, x) for x in v]
"""
UnivariateBoxCoxTransformer(; n=171, shift=false)
Unsupervised model specifying a univariate Box-Cox
transformation of a single variable taking non-negative values, with a
possible preliminary shift. Such a transformation is of the form
x -> ((x + c)^λ - 1)/λ for λ not 0
x -> log(x + c) for λ = 0
On fitting to data `n` different values of the Box-Cox
exponent λ (between `-0.4` and `3`) are searched to fix the value
maximizing normality. If `shift=true` and zero values are encountered
in the data then the transformation sought includes a preliminary
positive shift `c` of `0.2` times the data mean. If there are no zero
values, then no shift is applied.
"""
mutable struct UnivariateBoxCoxTransformer <: MLJBase.Unsupervised
n::Int
shift::Bool
end
UnivariateBoxCoxTransformer(; n=171, shift=false) =
UnivariateBoxCoxTransformer(n, shift)
function MLJBase.fit(transformer::UnivariateBoxCoxTransformer, verbosity::Int,
v::AbstractVector{T}) where T <: Real
m = minimum(v)
m >= 0 || error("Cannot perform a Box-Cox transformation on negative data.")
c = 0.0 # default
if transformer.shift
if m == 0
c = 0.2*mean(v)
end
else
m != 0 || error("Zero value encountered in data being Box-Cox transformed.\n"*
"Consider calling `fit!` with `shift=true`.")
end
lambdas = range(-0.4, stop=3, length=transformer.n)
scores = Float64[normality(boxcox(l, c, v)) for l in lambdas]
lambda = lambdas[argmax(scores)]
return (lambda, c), nothing, nothing
end
MLJBase.fitted_params(::UnivariateBoxCoxTransformer, fitresult) =
(λ=fitresult[1], c=fitresult[2])
# for X scalar or vector:
MLJBase.transform(transformer::UnivariateBoxCoxTransformer, fitresult, X) =
boxcox(fitresult..., X)
# scalar case:
function MLJBase.inverse_transform(transformer::UnivariateBoxCoxTransformer,
fitresult, x::Real)
lambda, c = fitresult
if lambda == 0
return exp(x) - c
else
return (lambda*x + 1)^(1/lambda) - c
end
end
# vector case:
function MLJBase.inverse_transform(transformer::UnivariateBoxCoxTransformer,
fitresult, w::AbstractVector{T}) where T <: Real
return [inverse_transform(transformer, fitresult, y) for y in w]
end
## ONE HOT ENCODING
"""
OneHotEncoder(; features=Symbol[], drop_last=false, ordered_factor=true)
Unsupervised model for one-hot encoding all features of `Finite`
scitype, within some table. If `ordered_factor=false` then
only `Multiclass` features are considered. The features encoded are
further restricted to those in `features`, when specified and
non-empty.
If `drop_last` is true, the column for the last level of each
categorical feature is dropped. New data to be transformed may lack
features present in the fit data, but no new features can be present.
*Warning:* This transformer assumes that the elements of a categorical
feature in new data to be transformed point to the same
CategoricalPool object encountered during the fit.
"""
mutable struct OneHotEncoder <: MLJBase.Unsupervised
features::Vector{Symbol}
drop_last::Bool
ordered_factor::Bool
end
OneHotEncoder(; features=Symbol[], drop_last=false, ordered_factor=true) =
OneHotEncoder(features, drop_last, ordered_factor)
# we store the categorical refs for each feature to be encoded and the
# corresponing feature labels generated (called
# "names"). `all_features` is stored to ensure no new features appear
# in new input data, causing potential name clashes.
struct OneHotEncoderResult <: MLJBase.MLJType
all_features::Vector{Symbol} # all feature labels
ref_name_pairs_given_feature::Dict{Symbol,Vector{Pair{<:Unsigned,Symbol}}}
end
# join feature and level into new label without clashing with anything
# in all_features:
function compound_label(all_features, feature, level)
label = Symbol(string(feature, "__", level))
# in the (rare) case subft is not a new feature label:
while label in all_features
label = Symbol(string(label,"_"))
end
return label
end
function MLJBase.fit(transformer::OneHotEncoder, verbosity::Int, X)
all_features = Tables.schema(X).names # a tuple not vector
specified_features =
isempty(transformer.features) ? collect(all_features) : transformer.features
#
ref_name_pairs_given_feature = Dict{Symbol,Vector{Pair{<:Unsigned,Symbol}}}()
allowed_scitypes = ifelse(transformer.ordered_factor, Finite, Multiclass)
col_scitypes = schema(X).scitypes
# apply on each feature
for j in eachindex(all_features)
ftr = all_features[j]
col = MLJBase.selectcols(X,j)
T = col_scitypes[j]
if T <: allowed_scitypes && ftr in specified_features
ref_name_pairs_given_feature[ftr] = Pair{<:Unsigned,Symbol}[]
shift = transformer.drop_last ? 1 : 0
levels = MLJBase.classes(first(col))
if verbosity > 0
@info "Spawning $(length(levels)-shift) sub-features "*
"to one-hot encode feature :$ftr."
end
for level in levels[1:end-shift]
ref = MLJBase.int(level)
name = compound_label(all_features, ftr, level)
push!(ref_name_pairs_given_feature[ftr], ref => name)
end
end
end
fitresult = OneHotEncoderResult(collect(all_features),
ref_name_pairs_given_feature)
# get new feature names
d = ref_name_pairs_given_feature
new_features = Symbol[]
features_to_be_transformed = keys(d)
for ftr in all_features
if ftr in features_to_be_transformed
append!(new_features, last.(d[ftr]))
else
push!(new_features, ftr)
end
end
report = (features_to_be_encoded=
collect(keys(ref_name_pairs_given_feature)),
new_features=new_features)
cache = nothing
return fitresult, cache, report
end
# If v=categorical('a', 'a', 'b', 'a', 'c') and MLJBase.int(v[1]) = ref
# then `_hot(v, ref) = [true, true, false, true, false]`
_hot(v::AbstractVector{<:CategoricalValue}, ref) = map(v) do c
MLJBase.int(c) == ref
end
function MLJBase.transform(transformer::OneHotEncoder, fitresult, X)
features = Tables.schema(X).names # tuple not vector
d = fitresult.ref_name_pairs_given_feature
# check the features match the fit result
all(e -> e in fitresult.all_features, features) ||
error("Attempting to transform table with feature "*
"labels not seen in fit. ")
new_features = Symbol[]
new_cols = Vector[]
features_to_be_transformed = keys(d)
for ftr in features
col = MLJBase.selectcols(X, ftr)
if ftr in features_to_be_transformed
append!(new_features, last.(d[ftr]))
pairs = d[ftr]
refs = first.(pairs)
names = last.(pairs)
cols_to_add = map(refs) do ref
float.(_hot(col, ref))
end
append!(new_cols, cols_to_add)
else
push!(new_features, ftr)
push!(new_cols, col)
end
end
named_cols = NamedTuple{tuple(new_features...)}(tuple(new_cols)...)
return MLJBase.table(named_cols, prototype=X)
end
"""
Averager(; mix = 0.5)
Static Unsupervised mode for computing the weighted sum of two vectors with weight `mix`.
"""
mutable struct Averager <: Static
mix::Float64
end
function MLJBase.clean!(model::Averager)
warning = ""
if model.mix > 1 || model.mix < 0
warning *= "Need `0 ≤ mix ≤ 1`. Resetting `mix=0.5`` "
model.mix = 0.5
end
return warning
end
function Averager(; mix=0.0)
model = Averager(mix)
message = MLJBase.clean!(model)
isempty(message) || @warn message
return model
end
function MLJBase.transform(m::Averager, _, y1, y2)
if !(y1 isa AbstractVector && y2 isa AbstractVector)
throw(
ArgumentError(
"Both inputs to the `Averager` transform must be "*
"instances of `AbstractVector`"
)
)
end
average = ((1 - m.mix) .* y1) .+ (m.mix .* y2)
return average
end
##
## Metadata for all built-in transformers
##
metadata_pkg.(
(
FeatureSelector,
UnivariateStandardizer,
UnivariateDiscretizer,
Standardizer,
UnivariateBoxCoxTransformer,
OneHotEncoder,
Averager
),
name="MLJBase",
julia=true,
license="MIT"
)
metadata_model(FeatureSelector,
input=MLJBase.Table,
output=MLJBase.Table,
weights=false,
path="MLJBase.FeatureSelector")
metadata_model(UnivariateDiscretizer,
input=AbstractVector{<:MLJBase.Continuous},
output=AbstractVector{<:MLJBase.OrderedFactor},
weights=false,
path="MLJBase.UnivariateDiscretizer")
metadata_model(UnivariateStandardizer,
input=AbstractVector{<:MLJBase.Infinite},
output=AbstractVector{MLJBase.Continuous},
weights=false,
path="MLJBase.UnivariateStandardizer")
metadata_model(Standardizer,
input=MLJBase.Table,
output=MLJBase.Table,
weights=false,
path="MLJBase.Standardizer")
metadata_model(UnivariateBoxCoxTransformer,
input=AbstractVector{MLJBase.Continuous},
output=AbstractVector{MLJBase.Continuous},
weights=false,
path="MLJBase.UnivariateBoxCoxTransformer")
metadata_model(OneHotEncoder,
input=MLJBase.Table,
output=MLJBase.Table,
weights=false,
path="MLJBase.OneHotEncoder")
metadata_model(Averager,
output=AbstractVector{MLJBase.Continuous},
weights=false,
path="MLJBase.Averager"
)
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 1485 | # a intercept-free ridge regressor for testing meta-alogorithms
export FooBarRegressor
import MLJBase
using LinearAlgebra
using ScientificTypes
mutable struct FooBarRegressor <: MLJBase.Deterministic
lambda::Float64
end
function FooBarRegressor(; lambda=0.0)
simpleridgemodel = FooBarRegressor(lambda)
message = MLJBase.clean!(simpleridgemodel)
isempty(message) || @warn message
return simpleridgemodel
end
function MLJBase.clean!(model::FooBarRegressor)
warning = ""
if model.lambda < 0
warning *= "Need lambda ≥ 0. Resetting lambda=0. "
model.lambda = 0
end
return warning
end
function MLJBase.fitted_params(::FooBarRegressor, fitresult)
return (coefficients=fitresult)
end
function MLJBase.fit(model::FooBarRegressor, verbosity::Int, X, y)
x = MLJBase.matrix(X)
fitresult = (x'x - model.lambda*I)\(x'y)
cache = nothing
report = nothing
return fitresult, cache, report
end
function MLJBase.predict(model::FooBarRegressor, fitresult, Xnew)
x = MLJBase.matrix(Xnew)
return x*fitresult
end
# metadata:
MLJBase.load_path(::Type{<:FooBarRegressor}) = "MLJBase.FooBarRegressor"
MLJBase.package_name(::Type{<:FooBarRegressor}) = "MLJBase"
MLJBase.package_uuid(::Type{<:FooBarRegressor}) = ""
MLJBase.is_pure_julia(::Type{<:FooBarRegressor}) = true
MLJBase.input_scitype(::Type{<:FooBarRegressor}) = Table(Continuous)
MLJBase.target_scitype(::Type{<:FooBarRegressor}) = AbstractVector{Continuous}
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 346 | module Models
using MLJModelInterface
import MLJBase # needed for UnivariateFinite in ConstantClassifier
const MMI = MLJModelInterface
include("Constant.jl")
include("DecisionTree.jl")
include("NearestNeighbors.jl")
include("MultivariateStats.jl")
include("Transformers.jl")
include("foobarmodel.jl")
include("simple_composite_model.jl")
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 2605 | export SimpleDeterministicNetworkCompositeModel,
SimpleProbabilisticNetworkCompositeModel
using MLJBase
const COMPOSITE_MODELS = [
:SimpleDeterministicNetworkCompositeModel,
:SimpleProbabilisticNetworkCompositeModel
]
const REGRESSORS = Dict(
:SimpleDeterministicNetworkCompositeModel => :DeterministicConstantRegressor,
:SimpleProbabilisticNetworkCompositeModel => :ConstantRegressor,
)
const REGRESSOR_SUPERTYPES = Dict(
:SimpleDeterministicNetworkCompositeModel => :Deterministic,
:SimpleProbabilisticNetworkCompositeModel => :Probabilistic,
)
const COMPOSITE_SUPERTYPES = Dict(
:SimpleDeterministicNetworkCompositeModel => :DeterministicNetworkComposite,
:SimpleProbabilisticNetworkCompositeModel => :ProbabilisticNetworkComposite,
)
for model in COMPOSITE_MODELS
regressor = REGRESSORS[model]
regressor_supertype = REGRESSOR_SUPERTYPES[model]
composite_supertype = COMPOSITE_SUPERTYPES[model]
quote
"""
(model)(; regressor=$($(regressor))(), transformer=FeatureSelector())
Construct a composite model consisting of a transformer
(`Unsupervised` model) followed by a `$($(regressor_supertype))` model.
Intended for internal testing only.
"""
mutable struct $(model){
L<:$(regressor_supertype),
T<:Unsupervised
} <: $(composite_supertype)
model::L
transformer::T
end
function $(model)(;
model=$(regressor)(), transformer=FeatureSelector()
)
composite = $(model)(model, transformer)
message = MLJBase.clean!(composite)
isempty(message) || @warn message
return composite
end
MLJBase.metadata_pkg(
$(model);
package_url = "https://github.com/JuliaAI/MLJBase.jl",
is_pure_julia = true,
is_wrapper = true
)
MLJBase.input_scitype(::Type{<:$(model){L,T}}) where {L,T} =
MLJBase.input_scitype(T)
MLJBase.target_scitype(::Type{<:$(model){L,T}}) where {L,T} =
MLJBase.target_scitype(L)
end |> eval
end
## FIT METHODS
for model in COMPOSITE_MODELS
@eval function MLJBase.prefit(
composite::$(model),
verbosity::Integer,
Xtrain,
ytrain
)
X = source(Xtrain) # instantiates a source node
y = source(ytrain)
t = machine(:transformer, X)
Xt = transform(t, X)
l = machine(:model, Xt, y)
yhat = predict(l, Xt)
(predict=yhat,)
end
end
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 2209 | module TestLearningCompositesInspection
using Test
using MLJBase
using ..Models
KNNRegressor()
@constant X = source()
@constant y = source()
hot = OneHotEncoder()
hotM = machine(hot, X)
@constant W = transform(hotM, X)
knn = KNNRegressor()
knnM = machine(knn, W, y)
@constant yhat = predict(knnM, W)
@constant K = 2*X
@constant all = glb(yhat, K)
@test MLJBase.tree(yhat) == (operation = predict,
model = knn,
arg1 = (operation = transform,
model = hot,
arg1 = (source = X, ),
train_arg1 = (source = X, )),
train_arg1 = (operation = transform,
model = hot,
arg1 = (source = X, ),
train_arg1 = (source = X, )),
train_arg2 = (source = y,))
@test Set(MLJBase.models(yhat)) == Set([hot, knn])
@test Set(sources(yhat)) == Set([X, y])
@test Set(origins(yhat)) == Set([X,])
@test Set(machines(yhat)) == Set([knnM, hotM])
@test Set(MLJBase.args(yhat)) == Set([W, ])
@test Set(MLJBase.train_args(yhat)) == Set([W, y])
@test Set(MLJBase.children(X, all)) == Set([W, K])
@constant Q = 2X
@constant R = 3X
@constant S = glb(X, Q, R)
@test Set(MLJBase.children(X, S)) == Set([Q, R, S])
@test MLJBase.lower_bound([Int, Float64]) == Union{}
@test MLJBase.lower_bound([Int, Integer]) == Int
@test MLJBase.lower_bound([Int, Integer]) == Int
@test MLJBase.lower_bound([]) == Any
@test input_scitype(2X) == Unknown
@test input_scitype(yhat) == input_scitype(KNNRegressor())
W2 = transform(machine(UnivariateStandardizer(), X), X)
# @test input_scitype(X, glb(W, W2)) == Union{}
# @test input_scitype(X, glb(Q, W)) == Unknown
y1 = predict(machine(DecisionTreeRegressor(), X, y), X)
@test input_scitype(y1) == Table(Continuous, OrderedFactor, Count)
y2 = predict(machine(KNNRegressor(), X, y), X)
@test input_scitype(y2) == Table(Continuous)
# @test input_scitype(X, glb(y1, y2)) == Table(Continuous)
# @test input_scitype(X, glb(y1, y2, Q)) == Unknown
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 14383 | module TestLearningCompositesCore
# using Revise
using Test
using MLJBase
using ..Models
using ..TestUtilities
using CategoricalArrays
using StatisticalMeasures
import Random.seed!
seed!(1234)
N =100
X = (x1=rand(N), x2=rand(N), x3=rand(N));
y = 2X.x1 - X.x2 + 0.05*rand(N);
@testset "error messages for invalid learning networks" begin
Xs = source(X)
ys = source(y)
mach1 = machine(Standardizer(), Xs)
W = transform(mach1, Xs)
fit!(W, verbosity=0)
@test_logs((:error, r"Failed"), @test_throws Exception W(34))
mach2 = machine(DecisionTreeClassifier(), W, ys)
yhat = predict(mach2, W)
@test_logs((:error, r"Problem fitting"),
(:info, r"Running type checks"),
(:warn, MLJBase.alert_generic_scitype_mismatch(
scitype((X, y)),
MLJBase.fit_data_scitype(mach2.model),
typeof(mach2.model)
)),
(:info, r"^It seems an upstream"),
(:error, r"^Problem fitting"),
@test_throws Exception fit!(yhat, verbosity=-1))
Xs = source()
mach1 = machine(Standardizer(), Xs)
W = transform(mach1, Xs)
@test_logs((:error, r"Problem fitting"),
(:warn, r"^Some learning network source"),
(:info, r"Running type checks"),
(:warn, MLJBase.alert_generic_scitype_mismatch(
Tuple{Nothing},
MLJBase.fit_data_scitype(mach1.model),
typeof(mach1.model)
)),
(:info, r"^It seems an upstream"),
(:error, r"^Problem fitting"),
@test_throws Exception fit!(W, verbosity=-1))
end
@testset "network #1" begin
knn_ = KNNRegressor(K=7)
# split the rows:
allrows = eachindex(y);
train, valid, test = partition(allrows, 0.7, 0.15);
Xtrain = selectrows(X, train);
ytrain = y[train];
Xs = source(Xtrain);
ys = source(ytrain);
knn1 = machine(knn_, Xs, ys)
@test_mach_sequence fit_only!(knn1) [(:train, knn1),]
knn_.K = 5
@test_logs((:info, r"Training"),
fit_only!(knn1, rows=train[1:end-10], verbosity=2))
@test_logs (:info, r"Training") fit_only!(knn1, verbosity=2)
yhat = predict(knn1, Xs);
yhat(selectrows(X, test))
@test rms(yhat(selectrows(X, test)), y[test]) < 0.3
@test_mach_sequence fit!(yhat) [(:skip, knn1),]
pred = yhat();
end
@testset "network #2" begin
N =100
X = (x1=rand(N),
x2=rand(N),
x3=categorical(rand("yn",N)),
x4=categorical(rand("yn",N)));
y = 2X.x1 - X.x2 + 0.05*rand(N);
X = source(X)
y = source(y)
hot = OneHotEncoder()
hotM = machine(hot, X)
W = transform(hotM, X)
@test scitype(W) == CallableReturning{Table}
knn = KNNRegressor()
knnM = machine(knn, W, y)
yhat = predict(knnM, W)
@test scitype(yhat) ==
MLJBase.CallableReturning{AbstractVector{Continuous}}
@test_mach_sequence fit!(yhat) [(:train, hotM), (:train, knnM)]
@test_mach_sequence fit!(yhat) [(:skip, hotM), (:skip, knnM)]
hot.drop_last = true
@test_mach_sequence fit!(yhat) [(:update, hotM), (:train, knnM)]
@test_mach_sequence fit!(yhat) [(:skip, hotM), (:skip, knnM)]
knn.K = 17
@test_mach_sequence fit!(yhat) [(:skip, hotM), (:update, knnM)]
# change rows:
@test_mach_sequence fit!(yhat, rows=1:100) [(:train, hotM), (:train, knnM)]
# change rows again:
@test_mach_sequence fit!(yhat) [(:train, hotM), (:train, knnM)]
# force:
@test_mach_sequence fit!(yhat, force=true) [(:train, hotM), (:train, knnM)]
end
@testset "with parallel regressors and pre-processors" begin
N =100
X = (x1=rand(N),
x2=rand(N),
x3=categorical(rand("yn",N)),
x4=categorical(rand("yn",N)));
y = abs.(2X.x1 - X.x2 + 0.05*rand(N));
X = source(X)
y = source(y)
cox = UnivariateBoxCoxTransformer()
coxM = machine(cox, y, cache=true)
z = transform(coxM, y)
hot = OneHotEncoder()
hotM = machine(hot, X, cache=true)
W = transform(hotM, X)
knn = KNNRegressor()
knnM = machine(knn, W, z, cache=true)
zhat1 = predict(knnM, W)
@test_mach_sequence fit!(W) [(:train, hotM), ]
@test_mach_sequence fit!(z) [(:train, coxM), ]
evaluate!(knnM, measure=l2, verbosity=0);
fit!(knnM, verbosity=0)
cox.shift=true
@test_mach_sequence(fit!(zhat1),
[(:update, coxM), (:skip, hotM), (:train, knnM)],
[(:skip, hotM), (:update, coxM), (:train, knnM)])
# no training:
@test_mach_sequence(fit!(zhat1),
[(:skip, coxM), (:skip, hotM), (:skip, knnM)],
[(:skip, hotM), (:skip, coxM), (:skip, knnM)])
tree = DecisionTreeRegressor()
treeM = machine(tree, W, z, cache=true)
zhat2 = predict(treeM, W)
zhat = 0.5*zhat1 + 0.5*zhat2
@test elscitype(zhat) == Unknown
@test_mach_sequence(fit!(zhat),
[(:skip, coxM), (:skip, hotM),
(:skip, knnM), (:train, treeM)],
[(:skip, hotM), (:skip, coxM),
(:skip, knnM), (:train, treeM)],
[(:skip, coxM), (:skip, hotM),
(:train, treeM), (:skip, knnM)],
[(:skip, hotM), (:skip, coxM),
(:train, treeM), (:skip, knnM)])
yhat = inverse_transform(coxM, zhat)
@test_mach_sequence(fit!(yhat),
[(:skip, coxM), (:skip, hotM),
(:skip, knnM), (:skip, treeM)],
[(:skip, hotM), (:skip, coxM),
(:skip, knnM), (:skip, treeM)],
[(:skip, coxM), (:skip, hotM),
(:skip, treeM), (:skip, knnM)],
[(:skip, hotM), (:skip, coxM),
(:skip, treeM), (:skip, knnM)])
# error handling:
MLJBase.rebind!(X, "junk5")
@test_logs((:error, r""),
(:error, r""),
(:error, r""),
(:error, r""),
@test_throws Exception fit!(yhat, verbosity=-1, force=true))
end
@testset "network #3" begin
N =100
X = (x1=rand(N), x2=rand(N), x3=rand(N))
y = 2X.x1 - X.x2 + 0.05*rand(N)
XX = source(X)
yy = source(y)
# construct a transformer to standardize the target:
uscale_ = UnivariateStandardizer()
uscale = machine(uscale_, yy)
# get the transformed inputs, as if `uscale` were already fit:
z = transform(uscale, yy)
# construct a transformer to standardize the inputs:
scale_ = Standardizer()
scale = machine(scale_, XX) # no need to fit
# get the transformed inputs, as if `scale` were already fit:
Xt = transform(scale, XX)
# do nothing to the DataFrame
Xa = node(identity, Xt)
# choose a learner and make it machine:
knn_ = KNNRegressor(K=7) # just a container for hyperparameters
knn = machine(knn_, Xa, z) # no need to fit
# get the predictions, as if `knn` already fit:
zhat = predict(knn, Xa)
# inverse transform the target:
yhat = inverse_transform(uscale, zhat)
# fit-through training:
@test_mach_sequence(fit!(yhat, rows=1:50),
[(:train, uscale), (:train, scale), (:train, knn)],
[(:train, scale), (:train, uscale), (:train, knn)])
@test_mach_sequence(fit!(yhat, rows=1:50),
[(:skip, uscale), (:skip, scale), (:skip, knn)],
[(:skip, scale), (:skip, uscale), (:skip, knn)])
# change rows:
@test_mach_sequence(fit!(yhat),
[(:train, uscale), (:train, scale), (:train, knn)],
[(:train, scale), (:train, uscale), (:train, knn)])
knn_.K =67
@test_mach_sequence(fit!(yhat),
[(:skip, uscale), (:skip, scale), (:update, knn)],
[(:skip, scale), (:skip, uscale), (:update, knn)])
end
@testset "network with machines sharing one model" begin
N =100
X = (x1=rand(N), x2=rand(N), x3=rand(N))
y = 2X.x1 - X.x2 + 0.05*rand(N)
XX = source(X)
yy = source(y)
# construct a transformer to standardize the target:
uscale_ = UnivariateStandardizer()
uscale = machine(uscale_, yy)
# get the transformed inputs, as if `uscale` were already fit:
z = transform(uscale, yy)
# construct a transformer to standardize the inputs:
xscale_ = Standardizer()
xscale = machine(xscale_, XX) # no need to fit
# get the transformed inputs, as if `scale` were already fit:
Xt = transform(xscale, XX)
# choose a learner and make two machines from it:
knn_ = KNNRegressor(K=7) # just a container for hyperparameters
knn1 = machine(knn_, Xt, z) # no need to fit
knn2 = machine(knn_, Xt, z) # no need to fit
# get the predictions, as if `knn` already fit:
zhat1 = predict(knn1, Xt)
zhat2 = predict(knn2, Xt)
zhat = zhat1 + zhat2
# inverse transform the target:
yhat = inverse_transform(uscale, zhat)
fit!(yhat, verbosity=0)
θ = fitted_params(yhat)
end
mutable struct PermuteArgs <: MLJBase.Static
permutation::NTuple{N,Int} where N
end
MLJBase.transform(p::PermuteArgs, _, args...) =
Tuple([args[i] for i in p.permutation])
MLJBase.inverse_transform(p::PermuteArgs, _, args...) =
Tuple([args[i] for i in sortperm(p.permutation |> collect)])
@testset "nodes wrapping Static transformers can be called with rows" begin
y1 = source(10*ones(Int, 3))
y2 = source(20*ones(Int, 3))
y3 = source(30*ones(Int, 3))
permuter = PermuteArgs((2, 3, 1))
mach = machine(permuter)
y = transform(mach, y1, y2, y3)
@test y(rows=1:2) == (20*ones(Int, 2), 30*ones(Int, 2), 10*ones(Int, 2))
end
@testset "overloading methods for AbstractNode" begin
A = rand(3,7)
As = source(A)
@test MLJBase.matrix(MLJBase.table(As))() == A
X = (x1 = [1,2,3], x2=[10, 20, 30], x3=[100, 200, 300])
Xs = source(X)
@test selectrows(Xs, 1)() == selectrows(X, 1)
@test selectrows(Xs, 2:3)() == selectrows(X, 2:3)
@test selectcols(Xs, 1)() == selectcols(X, 1)
@test selectcols(Xs, 2:3)() == selectcols(X, 2:3)
@test selectcols(Xs, :x1)() == selectcols(X, :x1)
@test selectcols(Xs, [:x1, :x3])() == selectcols(X, [:x1, :x3])
@test nrows(Xs)() == 3
y = rand(4)
ys = source(y)
@test vcat(ys, ys)() == vcat(y, y)
@test hcat(ys, ys)() == hcat(y, y)
@test log(ys)() == log.(y)
@test exp(ys)() == exp.(y)
Z = (rand(4), rand(4), rand(4))
Zs = source(Z)
@test mean(Zs)() == mean.(Z)
@test mode(Zs)() == mode.(Z)
@test median(Zs)() == median.(Z)
a, b, λ = rand(4), rand(4), rand()
as, bs = source(a), source(b)
@test (as + bs)() == a + b
@test (λ * bs)() == λ * b
X = source(1)
y = source(7)
@test glb(X, y)() == (1, 7)
glb_node = @tuple X y
@test glb_node() == (1, 7)
X = source(1:10)
Y = node(X->selectrows(X, 3:4), X)
@test nrows_at_source(Y) == 10
@test nrows(Y)() == 2
end
@testset "reformat logic - with upstream transformer learning from data" begin
X = (x1=rand(5), x2=rand(5))
y = categorical(collect("abaaa"))
Xs = source(X)
ys = source(y)
std = Standardizer()
mach1 = machine(std, Xs)
W = transform(mach1, Xs)
# a classifier with reformat front-end:
clf = ConstantClassifier(testing=true)
mach2 = machine(clf, W, ys; cache=true)
yhat = predict(mach2, W)
@test_logs((:info, "reformatting X, y"),
(:info, "resampling X, y"),
fit!(yhat, verbosity=0, rows=1:3))
# training network with new rows changes upstream state of
# classifier and hence retriggers reformatting of data:
@test_logs((:info, "reformatting X, y"),
(:info, "resampling X, y"),
fit!(yhat, verbosity=0, rows=1:2))
# however just changing classifier hyper-parameter avoids
# reformatting and resampling:
clf.bogus = 123
@test_logs fit!(yhat, verbosity=0, rows=1:2)
end
mutable struct Scale <: MLJBase.Static
scaling::Float64
end
function MLJBase.transform(s::Scale, _, X)
X isa AbstractVecOrMat && return X * s.scaling
MLJBase.table(s.scaling * MLJBase.matrix(X), prototype=X)
end
function MLJBase.inverse_transform(s::Scale, _, X)
X isa AbstractVecOrMat && return X / s.scaling
MLJBase.table(MLJBase.matrix(X) / s.scaling, prototype=X)
end
@testset "reformat logic - with upstream Static transformer" begin
X = (x1=ones(5), x2=ones(5))
y = categorical(collect("abaaa"))
Xs = source(X)
ys = source(y)
scaler = Scale(2.0)
mach1 = machine(scaler)
W = transform(mach1, Xs)
# a classifier with reformat front-end:
clf = ConstantClassifier(testing=true)
mach2 = machine(clf, W, ys, cache=true)
yhat = predict(mach2, W)
@test_logs((:info, "reformatting X, y"),
(:info, "resampling X, y"),
fit!(yhat, verbosity=0, rows=1:3))
# training network with new rows does not change upstream state of
# classifier, because `scaler isa Static` and no reformatting of
# data:
@test_logs((:info, "resampling X, y"),
fit!(yhat, verbosity=0, rows=1:2)
)
# however changing an upstream hyperparameter forces reforamatting:
scaler.scaling = 3.0
@test_logs((:info, "reformatting X, y"),
(:info, "resampling X, y"),
fit!(yhat, verbosity=0, rows=1:2))
# however just changing classifier hyper-parameter avoids
# reformatting and resampling:
clf.bogus = 123
@test_logs fit!(yhat, verbosity=0, rows=1:2)
end
@testset "@node" begin
X1 = source(4)
X2 = source(5)
Xp = source(1:10)
add(a, b, c) = a + b + c
N = @node add(X1, 1, X2)
@test N() == 10
N = @node tuple(X1, 5, X1)
@test N() == (4, 5, 4)
Y = @node selectrows(Xp, 3:4)
@test Y() == 3:4
@test Y([:one, :two, :three, :four]) == [:three, :four]
end
x = source(3)
y = source(7)
@testset "syntactic sugar for nodes" begin
for op in [:(+), :(*), :(/)]
quote
@test $op(x, y)() == $op(x(), y())
@test $op(2, x)() == $op(2, x())
@test $op(x, 2)() == $op(x(), 2)
end |> eval
end
end
end # module
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 4368 | module TestReplace
using Test
using ..Models
using ..TestUtilities
using MLJBase
using Tables
using StableRNGs
using Serialization
rng = StableRNG(616161)
# build a learning network:
x1 = map(n -> mod(n,3), rand(rng, UInt8, 100)) |> categorical;
x2 = randn(rng, 100);
X = (x1=x1, x2=x2);
y = x2.^2;
Xs = source(X)
ys = source(y)
z = log(ys)
stand = UnivariateStandardizer()
standM = machine(stand, z)
u = transform(standM, z)
hot = OneHotEncoder()
hotM = machine(hot, Xs)
W = transform(hotM, Xs)
knn = KNNRegressor()
knnM = machine(knn, W, u)
oak = DecisionTreeRegressor()
oakM = machine(oak, W, u)
uhat = 0.5*(predict(knnM, W) + predict(oakM, W))
zhat = inverse_transform(standM, uhat)
yhat = exp(zhat)
enode = @node mae(ys, yhat)
@testset "replace() method; $(typeof(accel))" for accel in (CPU1(), CPUThreads())
fit!(yhat, verbosity=0, acceleration=accel)
# test nested reporting:
r = MLJBase.report(yhat)
d = r.report_given_machine
ms = machines(yhat)
@test ms == r.machines |> reverse
@test all(mach -> report(mach) == d[mach], ms)
hot2 = deepcopy(hot)
knn2 = deepcopy(knn)
# duplicate the network with `yhat` as glb:
yhat_clone = replace(
yhat,
hot=>hot2,
knn=>knn2,
ys=>source(42);
copy_unspecified_deeply=false,
)
# test models and sources duplicated correctly:
models_clone = MLJBase.models(yhat_clone)
@test models_clone[1] === stand
@test models_clone[2] === knn2
@test models_clone[3] === hot2
sources_clone = sources(yhat_clone)
@test sources_clone[1]() == X
@test sources_clone[2]() === 42
# test serializable option:
fit!(yhat, verbosity=0)
yhat_ser = replace(yhat; serializable=true)
machines_ser = machines(yhat_ser)
mach4 = machines_ser[4]
@test mach4.state == -1
@test all(isempty, sources(yhat_ser))
# duplicate a signature:
signature = (predict=yhat, report=(mae=enode,)) |> MLJBase.Signature
signature_clone = replace(
signature,
hot=>hot2,
knn=>knn2,
ys=>source(2*y);
copy_unspecified_deeply=false,
)
glb_node = glb(signature_clone)
models_clone = MLJBase.models(glb_node)
@test models_clone[1] === stand
@test models_clone[2] === knn2
@test models_clone[3] === hot2
sources_clone = sources(glb_node)
@test sources_clone[1]() == X
@test sources_clone[2]() == 2*y
# warning thrown
@test_logs(
(:warn, r"No replacement"),
replace(
signature,
hot=>hot2,
knn=>knn2,
ys=>source(2*y);
),
)
yhat2 = MLJBase.operation_nodes(signature_clone).predict
## EXTRA TESTS FOR TRAINING SEQUENCE
# pickout the newly created machines:
standM2 = machines(yhat2, stand) |> first
oakM2 = machines(yhat2, oak) |> first
knnM2 = machines(yhat2, knn) |> first
hotM2 = machines(yhat2, hot) |> first
@test_mach_sequence(fit!(yhat2, force=true, acceleration=accel),
[(:train, standM2), (:train, hotM2),
(:train, knnM2), (:train, oakM2)],
[(:train, hotM2), (:train, standM2),
(:train, knnM2), (:train, oakM2)],
[(:train, standM2), (:train, hotM2),
(:train, oakM2), (:train, knnM2)],
[(:train, hotM2), (:train, standM2),
(:train, oakM2), (:train, knnM2)])
@test length(MLJBase.machines(yhat)) == length(MLJBase.machines(yhat2))
@test MLJBase.models(yhat) == MLJBase.models(yhat2)
@test 2yhat() ≈ yhat2()
# this change should trigger retraining of all machines except the
# univariate standardizer:
hot2.drop_last = true
@test_mach_sequence(fit!(yhat2, acceleration=accel),
[(:skip, standM2), (:update, hotM2),
(:train, knnM2), (:train, oakM2)],
[(:update, hotM2), (:skip, standM2),
(:train, knnM2), (:train, oakM2)],
[(:skip, standM2), (:update, hotM2),
(:train, oakM2), (:train, knnM2)],
[(:update, hotM2), (:skip, standM2),
(:train, oakM2), (:train, knnM2)])
end
end # module
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 4961 | module TestSignatures
using ..Models
using MLJBase
using StableRNGs
using Tables
using Test
using MLJModelInterface
using OrderedCollections
using StatisticalMeasures
@testset "signatures - accessor functions" begin
a = source(:a)
b = source(:b)
W = source(:W)
yhat = source(:yhat)
s = (
transform=W,
report=(a=a, b=b),
fitted_params=(c=W,),
predict=yhat,
acceleration=CPUThreads(),
) |> MLJBase.Signature
@test MLJBase.report_nodes(s) == (a=a, b=b)
@test MLJBase.fitted_params_nodes(s) == (c=W,)
@test MLJBase.operation_nodes(s) == (transform=W, predict=yhat)
@test MLJBase.operations(s) == (:transform, :predict)
@test MLJBase.acceleration(s) == CPUThreads()
s = (
transform=W,
predict=yhat,
) |> MLJBase.Signature
@test MLJBase.report_nodes(s) == NamedTuple()
@test MLJBase.acceleration(s) == CPU1()
end
# # DUMMY CLUSTERER THAT DOES NOT GENERALIZE TO NEW DATA
mutable struct OneShotClusterer <: Static
nclusters
rng
end
# X is a n x p matrix
function MLJModelInterface.predict(model::OneShotClusterer, ::Nothing, X)
rng = copy(model.rng)
Xmat = Tables.matrix(X)
labels = map(i -> Char(64 + i), 1:model.nclusters)
Xout = categorical(rand(rng, labels, size(Xmat, 1)); levels=labels)
report = (; labels)
return Xout, report
end
MLJModelInterface.reporting_operations(::OneShotClusterer) = (:predict,)
# Some complicated learning network:
Xs = source(first(make_blobs(10)))
mach0 = machine(:clusterer)
ytrain = predict(mach0, Xs)
mach1 = machine(:classifier1, Xs, ytrain)
# two machines pointing to same model:
mach2a = machine(:classifier2, Xs, ytrain)
mach2b = machine(:classifier2, Xs, ytrain)
y1 = predict(mach1, Xs) # probabilistic predictions
junk = node(fitted_params, mach1)
y2a = predict(mach2a, Xs) # probabilistic predictions
y2b = predict(mach2b, Xs) # probabilistic predictions
loss = node(
(y1, y2) -> brier_loss(y1, mode.(y2)) + brier_loss(y2, mode.(y1)) |> mean,
y1,
y2a,
)
λ = 0.3
ymix = λ*y1 + (1 - λ)*(0.2*y2a + 0.8*y2b)
yhat = mode(ymix)
signature = (;
predict=yhat,
report=(; loss=loss),
fitted_params=(; junk),
) |> MLJBase.Signature
glb1 = glb(signature)
glb2 = glb(yhat, loss, junk)
clusterer = OneShotClusterer(3, StableRNG(123))
composite = (
clusterer = clusterer,
classifier1 = KNNClassifier(),
classifier2 = ConstantClassifier(),
)
@testset "signature methods: glb, report, age, output_and_report" begin
fit!(glb1; composite, verbosity=0)
fit!(glb2; composite, verbosity=0)
@test glb1() == glb2()
r = MLJBase.report(signature)
# neither classifier has a contribution to the report:
@test isnothing(report(mach1))
@test isnothing(report(mach2a))
@test isnothing(report(mach2b))
@test r == (clusterer = report(mach0), loss=loss())
fr = MLJBase.fitted_params(signature)
@test keys(fr) == (:classifier1, :classifier2, :junk)
@test sum(MLJBase.age.(machines(glb1))) == MLJBase.age(signature)
output, r = MLJBase.output_and_report(signature, :predict, selectrows(Xs(), 1:2))
@test output == yhat(selectrows(Xs(), 1:2))
@test r == (clusterer = (labels = ['A', 'B', 'C'],),)
end
@testset "signature helper: tuple_keyed_on_model" begin
d = OrderedDict(:model1 => [:mach1a, :mach1b], :model2 => [:mach2,])
f(mach) = mach == :mach2 ? nothing : 42
g(mach) = mach in [:mach1a, :mach1b] ? nothing : 24
@test MLJBase.tuple_keyed_on_model(f, d) == (model1=[42, 42],)
@test MLJBase.tuple_keyed_on_model(f, d; scalarize=false) == (model1=[42, 42],)
@test MLJBase.tuple_keyed_on_model(f, d; drop_nothings=false) ==
(model1=[42, 42], model2=nothing)
@test MLJBase.tuple_keyed_on_model(f, d; drop_nothings=false, scalarize=false) ==
(model1=[42, 42], model2=[nothing,])
@test MLJBase.tuple_keyed_on_model(g, d) == (model2=24,)
@test MLJBase.tuple_keyed_on_model(g, d; scalarize=false) == (model2=[24,],)
@test MLJBase.tuple_keyed_on_model(g, d; drop_nothings=false) ==
(model1=[nothing, nothing], model2=24)
@test MLJBase.tuple_keyed_on_model(g, d; drop_nothings=false, scalarize=false) ==
(model1=[nothing, nothing], model2=[24,])
end
@testset "signature helper: machines_given_model" begin
d = MLJBase.machines_given_model(glb1)
@test d[:clusterer] == Any[mach0,]
@test d[:classifier1] == Any[mach1,]
@test d[:classifier2] == Any[mach2a, mach2b]
@test length(keys(d)) == 3
end
@testset "signature helper: call_and_copy" begin
@test_throws MLJBase.ERR_CALL_AND_COPY MLJBase.call_and_copy(42) == 42
x = Ref(3)
n = source(x)
frozen_x = MLJBase.call_and_copy(n)
@test frozen_x[] == 3
x[] = 5
@test frozen_x[] == 3
y = source(7)
@test MLJBase.call_and_copy((a=source(20), b=y)) == (a=20, b=7)
end # module
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 2252 | module TestCompositionModelsInspection
using Test
using MLJBase
using Tables
import MLJBase
using ..Models
using CategoricalArrays
using OrderedCollections
using Statistics
X = (x1=rand(3), x2=rand(3), x3=rand(3))
y = float.([1, 2, 3])
mutable struct Bar <: DeterministicNetworkComposite
scale::Float64
rgs
input_stand
target_stand
end
function MLJBase.prefit(model::Bar, verbosity, X, y)
Xs = source(X)
ys = source(y)
y1 = model.scale*ys
mach1 = machine(:input_stand, Xs)
X1 = transform(mach1, Xs)
mach2 = machine(:target_stand, y1)
z = transform(mach2, ys)
mach3 = machine(:rgs, X1, z)
zhat = predict(mach3, X1)
yhat = inverse_transform(mach2, zhat)
(; predict=yhat)
end
scale=0.97
rgs = KNNRegressor()
input_stand = Standardizer()
target_stand = UnivariateStandardizer()
model = Bar(scale, rgs, input_stand, target_stand)
mach = machine(model, X, y)
fit!(mach, verbosity=0)
@testset "user-friendly inspection of reports and fitted params" begin
# mutating the models should not effect result:
model.scale = 42.3
model.rgs.K = 42
model.input_stand.features=[:x1,]
r = report(mach)
keys(r) == (:input_stand,)
@test Set(r.input_stand.features_fit) == Set([:x1, :x2, :x3])
fp = fitted_params(mach)
@test fp.rgs isa NamedTuple{(:tree,)}
@test fp.input_stand.mean_and_std_given_feature[:x1] |> collect ≈
[mean(X.x1), std(X.x1)]
@test fp.target_stand.fitresult |> collect ≈
[mean(0.97*y), std(0.97*y)] # scale = 0.97 at fit! call
end
mutable struct Mixer <: DeterministicNetworkComposite
model1
model2
misc::Int
end
@testset "#549" begin
function MLJBase.prefit(model::Mixer, verbosity, X, y)
Xs = source(X)
ys = source(y)
mach1 = machine(:model1, Xs, ys)
mach2 = machine(:model2, Xs, ys)
yhat1 = predict(mach1, Xs)
yhat2 = predict(mach2, Xs)
yhat = 0.5*yhat1 + 0.5*yhat2
(; predict=yhat)
end
model = Mixer(KNNRegressor(), KNNRegressor(), 42)
mach = fit!(machine(model, make_regression(10, 3)...), verbosity=0)
fp = fitted_params(mach)
@test !(fp.model1 isa Vector)
@test !(fp.model2 isa Vector)
end
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 26550 | module TestNetoworkComposite
using Test
using MLJBase
using ..Models
using ..TestUtilities
using StableRNGs
using Tables
using MLJModelInterface
using CategoricalArrays
using OrderedCollections
using StatisticalMeasures
using Serialization
const MMI = MLJModelInterface
const rng = StableRNG(123)
X, _ = make_moons(10, rng=StableRNG(123))
Xnew = selectrows(X, 5:10)
# # DUMMY CLUSTERER THAT DOES NOT GENERALIZE TO NEW DATA
mutable struct OneShotClusterer <: Static
nclusters
rng
report_labels::Bool
end
function MMI.predict(model::OneShotClusterer, ::Nothing, X)
rng = copy(model.rng)
Xmat = Tables.matrix(X)
labels = map(i -> Char(64 + i), 1:model.nclusters)
Xout = categorical(rand(rng, labels, size(Xmat, 1)); levels=labels)
report = model.report_labels ? (; labels) : nothing
return Xout, report
end
MMI.reporting_operations(::OneShotClusterer) = (:predict,)
# # DUMMY STATIC TRANSFORMER THAT REPORTS
mutable struct ReportingScaler <: Static
alpha::Float64
end
MLJBase.reporting_operations(::Type{<:ReportingScaler}) = (:transform, )
MLJBase.transform(model::ReportingScaler, _, X) = (
Tables.table(model.alpha*Tables.matrix(X)),
(; nrows = size(MLJBase.matrix(X))[1]),
)
# # ANOTHER DUMMY STATIC TRANSFORMER THAT REPORTS
mutable struct ReportingClassSwapper <: Static
shift::Int64
end
MLJBase.reporting_operations(::Type{<:ReportingClassSwapper}) = (:transform, )
MLJBase.transform(model::ReportingClassSwapper, _, y) = (
MLJBase.int.(y) .+ model.shift,
(nrows = length(y),)
)
# # BASIC TESTS
mutable struct WatermelonComposite <: UnsupervisedNetworkComposite
scaler
clusterer
classifier1
classifier2
mix::Float64
finalizer
end
function MLJBase.prefit(composite::WatermelonComposite, verbosity, X)
Xs = source(X)
len = node(Xs) do X
size(Tables.matrix(X), 1)
end
W = transform(machine(:scaler), Xs)
ytrain = predict(machine(:clusterer), Xs)
mach1 = machine(:classifier1, W, ytrain)
junk = node(m->fitted_params(m), mach1) # a fitted_params node
# two machines pointing to same model:
mach2a = machine(:classifier2, W, ytrain)
mach2b = machine(:classifier2, W, ytrain)
y1 = predict(mach1, W)
y2a = predict(mach2a, W)
y2b = predict(mach2b, W)
training_loss = node(
(y1, y2) -> brier_loss(y1, mode.(y2)) + brier_loss(y2, mode.(y1)) |> mean,
y1,
y2a,
)
λ = composite.mix
ymix = λ*y1 + (1 - λ)*(0.2*y2a + 0.8*y2b)
yhat = transform(machine(:finalizer), mode(ymix))
return (; predict=yhat, report=(;training_loss, len), fitted_params=(; junk))
end
composite = WatermelonComposite(
ReportingScaler(3.0),
OneShotClusterer(3, StableRNG(123), true),
DecisionTreeClassifier(),
ConstantClassifier(),
0.5,
ReportingClassSwapper(0),
)
@testset "fitted parameters and reports" begin
f, c, fitr = MLJBase.fit(composite, 0, X)
# check fitted_params:
fp = @test_logs fitted_params(composite, f)
@test Set(keys(fp)) == Set([:classifier1, :classifier2, :junk])
@test :tree_or_leaf in keys(fp.classifier1)
constant_fps = fp.classifier2
@test length(constant_fps) == 2
@test all(constant_fps) do fp
:target_distribution in keys(fp)
end
@test haskey(fp.junk, :tree_or_leaf)
@test fp.junk.tree_or_leaf.featim == fp.classifier1.tree_or_leaf.featim
# check fit report (which omits key :finalizer):
@test Set(keys(fitr)) ==
Set([:scaler, :clusterer, :classifier1, :training_loss, :len])
@test fitr.scaler == (nrows=10,)
@test fitr.clusterer == (labels=['A', 'B', 'C'],)
@test Set(keys(fitr.classifier1)) == Set([:classes_seen, :print_tree, :features])
@test fitr.training_loss isa Real
@test fitr.len == 10
o, predictr = predict(composite, f, selectrows(X, 1:5))
# the above should have no effect on learned parameters:
fp = fitted_params(composite, f)
@test Set(keys(fp)) == Set([:classifier1, :classifier2, :junk])
@test :tree_or_leaf in keys(fp.classifier1)
constant_fps = fp.classifier2
@test length(constant_fps) == 2
@test all(constant_fps) do fp
:target_distribution in keys(fp)
end
# check predict report (which excludes reports from "supplementary" nodes)
@test Set(keys(predictr)) ==
Set([:scaler, :clusterer, :classifier1, :finalizer])
@test predictr.scaler == (nrows=5,)
@test predictr.clusterer == (labels=['A', 'B', 'C'],)
@test Set(keys(predictr.classifier1)) == Set([:classes_seen, :print_tree, :features])
@test predictr.finalizer == (nrows=5,)
o, predictr = predict(composite, f, selectrows(X, 1:2))
# after second predict, predict report should update:
@test Set(keys(predictr)) ==
Set([:scaler, :clusterer, :classifier1, :finalizer])
@test predictr.scaler == (nrows=2,) # <----------- different
@test predictr.clusterer == (labels=['A', 'B', 'C'],)
@test Set(keys(predictr.classifier1)) == Set([:classes_seen, :print_tree, :features])
@test predictr.finalizer == (nrows=2,) # <---------- different
r = MMI.report(composite, Dict(:fit => fitr, :predict=> predictr))
@test keys(r) == (:classifier1, :scaler, :clusterer, :training_loss, :len, :predict)
@test r.predict == predictr
@test r == merge(fitr, (predict=predictr,))
end
@testset "logic for composite model update - start_over() method" begin
old_composite = deepcopy(composite)
glb_node = MLJBase.prefit(composite, 0, X) |> MLJBase.Signature |> MLJBase.glb
# don't start over if composite unchanged:
@test !MLJBase.start_over(composite, old_composite, glb_node)
# don't start over if a component is mutated:
composite.scaler.alpha = 5.0
@test !MLJBase.start_over(composite, old_composite, glb_node)
# don't start over if a component is replaced:
composite.classifier2 = KNNClassifier()
@test !MLJBase.start_over(composite, old_composite, glb_node)
# do start over if a non-model field is changed:
composite.mix = 0.17
@test MLJBase.start_over(composite, old_composite, glb_node)
end
@testset "basic serialization" begin
composite = WatermelonComposite(
ReportingScaler(3.0),
OneShotClusterer(3, StableRNG(123), false),
ConstantClassifier(),
ConstantClassifier(),
0.5,
ReportingClassSwapper(0),
)
X, _ = make_moons(10, rng=StableRNG(123))
filename = "composite_mach.jls"
mach = machine(composite, X)
fit!(mach, verbosity=0)
# `serializable` function:
smach = MLJBase.serializable(mach)
TestUtilities.generic_tests(mach, smach)
@test keys(fitted_params(smach)) == keys(fitted_params(mach))
@test keys(report(smach)) == keys(report(mach))
# Check data has been wiped out from models at the first level of composition
_machines = machines(glb(mach.fitresult))
_smachines = machines(glb(smach.fitresult))
@test length(_machines) == length(_smachines)
for submach in _smachines
TestUtilities.test_data(submach)
end
# end-to-end:
MLJBase.save(filename, mach)
smach = machine(filename)
@test predict(smach, X) == predict(mach, X)
rm(filename)
# file size does not scale with size of data:
filesizes = []
for n in [100, 500, 1000]
filename = "serialized_temp_$n.jls"
X, _ = make_moons(n, rng=StableRNG(123))
mach = machine(composite, X)
fit!(mach, verbosity=0)
MLJBase.save(filename, mach)
push!(filesizes, filesize(filename))
rm(filename)
end
@test all(x==filesizes[1] for x in filesizes)
# What if no serializable procedure had happened
filename = "full_of_data.jls"
X, _ = make_moons(1000, rng=StableRNG(123))
mach = machine(composite, X)
fit!(mach, verbosity=0)
serialize(filename, mach)
@test filesize(filename) > filesizes[1]
@test_logs (:warn, MLJBase.warn_bad_deserialization(mach.state)) machine(filename)
end
# # SOME INTEGRATION TESTS
N = 50
Xin = (a=rand(N), b=rand(N), c=rand(N));
yin = rand(N);
train, test = partition(eachindex(yin), 0.7);
Xtrain = MLJBase.selectrows(Xin, train);
ytrain = yin[train];
ridge_model = FooBarRegressor(lambda=0.1)
selector_model = FeatureSelector()
@testset "first integration test" begin
composite = SimpleDeterministicNetworkCompositeModel(model=ridge_model,
transformer=selector_model)
fitresult, cache, rep = MLJBase.fit(composite, 0, Xtrain, ytrain);
# to check internals:
d = MLJBase.machines_given_model(glb(fitresult))
ridge = only(d[:model])
selector = only(d[:transformer])
ridge_old_fitresult = deepcopy(fitted_params(ridge))
selector_old_fitresult = deepcopy(fitted_params(selector))
# this should trigger no retraining:
fitresult, cache, rep =
@test_logs(
(:info, r"^Not"),
(:info, r"^Not"),
MLJBase.update(composite, 2, fitresult, cache, Xtrain, ytrain));
@test fitted_params(ridge) == ridge_old_fitresult
@test fitted_params(selector) == selector_old_fitresult
# this should trigger update of selector and training of ridge:
selector_model.features = [:a, :b]
fitresult, cache, rep =
@test_logs(
(:info, r"^Updating"),
(:info, r"^Training"),
MLJBase.update(composite, 2, fitresult, cache, Xtrain, ytrain));
@test fitted_params(ridge) != ridge_old_fitresult
@test fitted_params(selector) != selector_old_fitresult
ridge_old_fitresult = deepcopy(fitted_params(ridge))
selector_old_fitresult = deepcopy(fitted_params(selector))
# this should trigger updating of ridge only:
ridge_model.lambda = 1.0
fitresult, cache, rep =
@test_logs(
(:info, r"^Not"),
(:info, r"^Updating"),
MLJBase.update(composite, 2, fitresult, cache, Xtrain, ytrain));
@test fitted_params(ridge) != ridge_old_fitresult
@test fitted_params(selector) == selector_old_fitresult
# smoke tests
predict(composite, fitresult, MLJBase.selectrows(Xin, test));
Xs = source(Xtrain)
ys = source(ytrain)
mach = machine(composite, Xs, ys)
yhat = predict(mach, Xs)
fit!(yhat, verbosity=0)
composite.transformer.features = [:b, :c]
fit!(yhat, verbosity=0)
fit!(yhat, rows=1:20, verbosity=0)
yhat(MLJBase.selectrows(Xin, test));
end
mutable struct WrappedRidge <: DeterministicNetworkComposite
ridge
end
function MLJBase.prefit(model::WrappedRidge, verbosity::Integer, X, y)
Xs = source(X)
ys = source(y)
stand = Standardizer()
standM = machine(stand, Xs)
W = transform(standM, Xs)
boxcox = UnivariateBoxCoxTransformer()
boxcoxM = machine(boxcox, ys)
z = transform(boxcoxM, ys)
ridgeM = machine(:ridge, W, z)
zhat = predict(ridgeM, W)
yhat = inverse_transform(boxcoxM, zhat)
return (predict=yhat,)
end
MLJBase.input_scitype(::Type{<:WrappedRidge}) =
Table(Continuous)
MLJBase.target_scitype(::Type{<:WrappedRidge}) =
AbstractVector{<:Continuous}
@testset "second integration test" begin
ridge = FooBarRegressor(lambda=0.1)
model_ = WrappedRidge(ridge)
mach = machine(model_, Xin, yin)
fit!(mach, verbosity=0)
yhat=predict(mach, Xin);
ridge.lambda = 1.0
fit!(mach, verbosity=0)
@test predict(mach, Xin) != yhat
end
# A dummy clustering model:
mutable struct DummyClusterer <: Unsupervised
n::Int
end
DummyClusterer(; n=3) = DummyClusterer(n)
function MLJBase.fit(model::DummyClusterer, verbosity::Int, X)
Xmatrix = Tables.matrix(X)
n = min(size(Xmatrix, 2), model.n)
centres = Xmatrix[1:n, :]
levels = categorical(1:n)
report = (centres=centres,)
fitresult = levels
return fitresult, nothing, report
end
MLJBase.transform(model::DummyClusterer, fitresult, Xnew) =
selectcols(Xnew, 1:length(fitresult))
MLJBase.predict(model::DummyClusterer, fitresult, Xnew) =
[fill(fitresult[1], nrows(Xnew))...]
# A wrap of above model:
mutable struct WrappedClusterer <: UnsupervisedNetworkComposite
model
end
WrappedClusterer(; model=DummyClusterer()) =
WrappedClusterer(model)
function MLJBase.prefit(model::WrappedClusterer, verbosity::Int, X)
Xs = source(X)
W = transform(machine(OneHotEncoder(), Xs), Xs)
m = machine(:model, W)
yhat = predict(m, W)
Wout = transform(m, W)
foo = node(η -> first(η), yhat)
return (predict=yhat, transform=Wout, report=(foo=foo,))
end
@testset "third integration test" begin
X, _ = make_regression(10, 5);
model = WrappedClusterer(model=DummyClusterer(n=2))
mach = fit!(machine(model, X), verbosity=0)
model.model.n = 3
@test_logs fit!(mach, verbosity=0)
@test transform(mach, X) == selectcols(X, 1:3)
r = report(mach)
@test r.model.centres == MLJBase.matrix(X)[1:3,:]
@test r.foo == predict(mach, rows=:)[1]
fp = fitted_params(mach)
@test :model in keys(fp)
levs = fp.model.fitresult
@test predict(mach, X) == fill(levs[1], 10)
end
## NETWORK WITH MULTIPLE REPORT NODES
mutable struct TwoStages <: DeterministicNetworkComposite
model1
model2
model3
end
function MLJBase.prefit(m::TwoStages, verbosity, X, y)
Xs = source(X)
ys = source(y)
mach1 = machine(:model1, Xs, ys)
mach2 = machine(:model2, Xs, ys)
ypred1 = MLJBase.predict(mach1, Xs)
ypred2 = MLJBase.predict(mach2, Xs)
Y = MLJBase.table(hcat(ypred1, ypred2))
mach3 = machine(:model3, Y, ys)
ypred3 = MLJBase.predict(mach3, Y)
μpred = node(x->mean(x), ypred3)
σpred = node((x, μ)->mean((x.-μ).^2), ypred3, μpred)
return (predict=ypred3, report=(μpred=μpred, σpred=σpred))
end
@testset "multiple report nodes and retraining" begin
X, y = make_regression(100, 3)
model3 = FooBarRegressor(lambda=1)
twostages = TwoStages(FooBarRegressor(lambda=0.1),
FooBarRegressor(lambda=10), model3)
mach = machine(twostages, X, y)
fit!(mach, verbosity=0)
rep = report(mach)
signature = mach.fitresult
_glb = glb(signature)
mach1 = only(machines(_glb, :model1))
mach2 = only(machines(_glb, :model2))
mach3 = only(machines(_glb, :model3))
# All machines have been fitted once
@test mach1.state == mach2.state == mach3.state
# Retrieve current values of interest
μpred = rep.μpred
σpred = rep.σpred
# Change model3 and refit
model3.lambda = 10
fit!(mach, verbosity=0)
rep = report(mach)
# Machines 1,2 have been fitted once and machine 3 twice
@test mach1.state == mach2.state == 1
@test mach3.state == 2
# The new values have been updated
@test rep.μpred != μpred
@test rep.σpred != σpred
end
## COMPOSITE WITH COMPONENT MODELS STORED IN NTUPLE
# `modelnames` is a tuple of `Symbol`s, one for each `model` in `models`:
mutable struct Averager{modelnames} <: DeterministicNetworkComposite
models::NTuple{<:Any,Deterministic}
weights::Vector{Float64}
Averager(modelnames, models, weights) =
new{modelnames}(models, weights)
end
# special kw constructor, allowing one to specify the property names
# to be attributed to each component model (see below):
function Averager(; weights=Float64[], named_models...)
nt = NamedTuple(named_models)
modelnames = keys(nt)
models = values(nt)
return Averager(modelnames, models, weights)
end
# for example:
averager = Averager(weights=[1, 1],
model1=KNNRegressor(K=3),
model2=RidgeRegressor())
# so we can do `averager.model1` and `averager.model2`:
Base.propertynames(::Averager{modelnames}) where modelnames =
tuple(:weights, modelnames...)
function Base.getproperty(averager::Averager{modelnames},
name::Symbol) where modelnames
name === :weights && return getfield(averager, :weights)
models = getfield(averager, :models)
for j in eachindex(modelnames)
name === modelnames[j] && return models[j]
end
error("type Averager has no field $name")
end
# overload multiplication of a node by a matrix:
import Base.*
*(preds::Node, weights) = node(p->p*weights, preds)
# learning network wrapped in a fit method:
function MLJBase.prefit(averager::Averager{modelnames},
verbosity,
X,
y) where modelnames
Xs = source(X)
ys = source(y)
weights = averager.weights
machines = [machine(name, Xs, ys) for
name in modelnames]
predictions = hcat([predict(mach, Xs) for mach in machines]...)
yhat = (1/sum(weights))*(predictions*weights)
return (; predict=yhat)
end
@testset "composite with component models stored in ntuple" begin
X, y = make_regression(10, 3);
mach = machine(averager, X, y)
fit!(mach, verbosity=0)
fp = fitted_params(mach)
@test keys(fp.model1) == (:tree, )
@test keys(fp.model2) == (:coefficients, :intercept)
@test isnothing(report(mach))
@test iterator(range(averager, :(model1.K), lower=1, upper=10), 10) == 1:10
end
## DATA FRONT-END IN AN EXPORTED LEARNING NETWORK
mutable struct Scale <: MLJBase.Static
scaling::Float64
end
function MLJBase.transform(s::Scale, _, X)
X isa AbstractVecOrMat && return X * s.scaling
MLJBase.table(s.scaling * MLJBase.matrix(X), prototype=X)
end
function MLJBase.inverse_transform(s::Scale, _, X)
X isa AbstractVecOrMat && return X / s.scaling
MLJBase.table(MLJBase.matrix(X) / s.scaling, prototype=X)
end
mutable struct ElephantModel <: ProbabilisticNetworkComposite
scaler
clf
cache::Bool
end
function MLJBase.prefit(model::ElephantModel, verbosity, X, y)
Xs = source(X)
ys = source(y)
mach1 = machine(:scaler, cache=model.cache)
W = transform(mach1, Xs)
# a classifier with reformat front-end:
mach2 = machine(:clf, W, ys, cache=model.cache)
yhat = predict(mach2, W)
return (; predict=yhat)
end
@testset "reformat/selectrows logic in composite model" begin
X = (x1=ones(5), x2=ones(5))
y = categorical(collect("abaaa"))
model = ElephantModel(Scale(2.0),
ConstantClassifier(testing=true, bogus=1.0),
true)
mach = machine(model, X, y, cache=false)
@test_logs((:info, "reformatting X, y"),
(:info, "resampling X, y"),
fit!(mach, verbosity=0, rows=1:3)
)
@test mach.state == 1
# new clf hyperparmater (same rows) means no reformatting or resampling:
model.clf.bogus = 10
@test_logs fit!(mach, verbosity=0, rows=1:3)
@test mach.state == 2
# however changing an upstream hyperparameter forces reformatting
# and resampling:
model.scaler.scaling = 3.1
@test_logs((:info, "reformatting X, y"),
(:info, "resampling X, y"),
fit!(mach, verbosity=0, rows=1:3))
end
@testset "operation nodes that are source nodes" begin
mutable struct MontenegroComposite <: UnsupervisedNetworkComposite
stand
end
MontenegroComposite(; stand=Standardizer()) = MontenegroComposite(stand)
function MLJBase.prefit(model::MontenegroComposite, verbosity, X)
Xs = source(X)
mach1 = machine(:stand, Xs)
X2 = transform(mach1, Xs)
# node for the inverse_transform:
return (transform=X2, inverse_transform=Xs)
end
X = (x = Float64[1, 2, 3],)
mach = machine(MontenegroComposite(), X)
fit!(mach, verbosity=0, force=true)
@test transform(mach, X).x ≈ Float64[-1, 0, 1]
@test inverse_transform(mach, X) == X
end
# # STATIC MODEL WITH MULTIPLE INPUTS
mutable struct Balancer <: Static end
MLJBase.transform(::Balancer, _, X, y) = (selectrows(X, 1:2), selectrows(y, 1:2))
struct ThinWrapper <: StaticNetworkComposite
balancer
end
function MLJBase.prefit(wrapper::ThinWrapper, verbosity)
data = source() # empty source because there is no training data
Xs = first(data)
ys = last(data)
mach=machine(:balancer)
output = transform(mach, Xs, ys)
(; transform = output)
end
balancer = Balancer()
wrapper = ThinWrapper(balancer)
X, y = make_blobs()
mach = machine(wrapper)
Xunder, yunder = transform(mach, X, y)
@test Xunder == selectrows(X, 1:2)
@test yunder == selectrows(y, 1:2)
# # MACHINE INTEGRATION TESTS
X = (x1=rand(3), x2=rand(3), x3=rand(3))
y = float.([1, 2, 3])
mutable struct Bar <: DeterministicNetworkComposite
scale::Float64
rgs
input_stand
target_stand
end
function MLJBase.prefit(model::Bar, verbosity, X, y)
Xs = source(X)
ys = source(y)
y1 = model.scale*ys
mach1 = machine(:input_stand, Xs)
X1 = transform(mach1, Xs)
mach2 = machine(:target_stand, y1)
z = transform(mach2, ys)
mach3 = machine(:rgs, X1, z)
zhat = predict(mach3, X1)
yhat = inverse_transform(mach2, zhat)
return (; predict=yhat)
end
@testset "user-friendly inspection of reports and fitted params" begin
scale=0.97
rgs = KNNRegressor()
input_stand = Standardizer()
target_stand = UnivariateStandardizer()
model = Bar(scale, rgs, input_stand, target_stand)
mach = machine(model, X, y)
fit!(mach, verbosity=0)
# mutating the models should not effect result:
model.scale = 42.3
model.rgs.K = 42
model.input_stand.features=[:x1,]
r = report(mach)
@test only(keys(r)) == :input_stand
@test Set(r.input_stand.features_fit) == Set([:x1, :x2, :x3])
fp = fitted_params(mach)
@test fp.rgs isa NamedTuple{(:tree,)}
@test fp.input_stand.mean_and_std_given_feature[:x1] |> collect ≈
[mean(X.x1), std(X.x1)]
@test fp.target_stand.fitresult |> collect ≈
[mean(0.97*y), std(0.97*y)] # scale = 0.97 at fit! call
end
mutable struct Mixer <: DeterministicNetworkComposite
model1
model2
misc::Int
end
function MLJBase.prefit(model::Mixer, verbosity, X, y)
Xs = source(X)
ys = source(y)
mach1 = machine(:model1, Xs, ys)
mach2 = machine(:model2, Xs, ys)
yhat1 = predict(mach1, Xs)
yhat2 = predict(mach2, Xs)
yhat = 0.5*yhat1 + 0.5*yhat2
return (; predict=yhat)
end
@testset "#549" begin
model = Mixer(KNNRegressor(), KNNRegressor(), 42)
mach = fit!(machine(model, make_regression(10, 3)...), verbosity=0)
fp = fitted_params(mach)
@test !(fp.model1 isa Vector)
@test !(fp.model2 isa Vector)
end
# ## SERIALIATION
@testset "Test serializable of composite machines" begin
filename = "stack_mach.jls"
X, y = make_regression(100, 1)
model = Stack(
metalearner = DecisionTreeRegressor(),
tree1 = DecisionTreeRegressor(min_samples_split=3),
tree2 = DecisionTreeRegressor(),
measures=rmse)
mach = machine(model, X, y)
fit!(mach, verbosity=0)
# Check serializable function
smach = MLJBase.serializable(mach)
TestUtilities.generic_tests(mach, smach)
# Check data has been wiped out from models at the first level of composition
submachines = machines(glb(mach.fitresult))
ssubmachines = machines(glb(smach.fitresult))
@test length(submachines) == length(ssubmachines)
for submach in ssubmachines
TestUtilities.test_data(submach)
end
# Testing extra report field : it is a deepcopy
@test report(smach).cv_report === report(mach).cv_report
@test smach.fitresult isa MLJBase.Signature
Serialization.serialize(filename, smach)
smach = Serialization.deserialize(filename)
MLJBase.restore!(smach)
@test MLJBase.predict(smach, X) == MLJBase.predict(mach, X)
@test keys(fitted_params(smach)) == keys(fitted_params(mach))
@test keys(report(smach)) == keys(report(mach))
rm(filename)
# End to end
MLJBase.save(filename, mach)
smach = machine(filename)
@test predict(smach, X) == predict(mach, X)
rm(filename)
end
@testset "Test serializable of nested composite machines" begin
filename = "nested_stack_mach.jls"
X, y = make_regression(100, 1)
pipe = (X -> coerce(X, :x₁=>Continuous)) |> DecisionTreeRegressor()
model = @test_logs (:warn, r"") Stack(
metalearner = DecisionTreeRegressor(),
pipe = pipe)
mach = machine(model, X, y)
fit!(mach, verbosity=0)
MLJBase.save(filename, mach)
smach = machine(filename)
@test predict(smach, X) == predict(mach, X)
# Test data as been erased at the first and second level of composition
for submach in machines(glb(smach.fitresult))
TestUtilities.test_data(submach)
if submach isa Machine{<:NetworkComposite}
for subsubmach in machines(glb(submach.fitresult))
TestUtilities.test_data(subsubmach)
end
end
end
rm(filename)
end
struct DummyRangeCV
cv
end
torange(x::UnitRange) = x
torange(x) = minimum(x):maximum(x)
function MLJBase.train_test_pairs(dcv::DummyRangeCV, rows, X, y)
ttp = MLJBase.train_test_pairs(dcv.cv, rows)
return [(torange(t),torange(e)) for (t,e) in ttp]
end
@testset "Test serialized filesize does not increase with datasize" begin
# At the moment it is necessary to resort to a custom resampling strategy,
# for this test. This is because partial functions working on nodes,
# such as `selectrows`in learning networks store data.
# A basic CV would store vectors which would grow in size as the dataset grows.
dcv = DummyRangeCV(CV(nfolds=3))
model = Stack(
metalearner = FooBarRegressor(lambda=1.),
resampling = dcv,
model_1 = DeterministicConstantRegressor(),
model_2=ConstantRegressor()
)
filesizes = []
for n in [100, 500, 1000]
filename = "serialized_temp_$n.jls"
X, y = make_regression(n, 1)
mach = machine(model, X, y)
fit!(mach, verbosity=0)
MLJBase.save(filename, mach)
push!(filesizes, filesize(filename))
rm(filename)
end
@test all(x==filesizes[1] for x in filesizes)
# What if no serializable procedure had happened
filename = "full_of_data.jls"
X, y = make_regression(1000, 1)
mach = machine(model, X, y)
fit!(mach, verbosity=0)
serialize(filename, mach)
@test filesize(filename) > filesizes[1]
@test_logs (:warn, MLJBase.warn_bad_deserialization(mach.state)) machine(filename)
rm(filename)
end
end # module
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 23704 | module TestPipelines2
using MLJBase
using Test
using ..Models
using ..TestUtilities
using StableRNGs
using Tables
import MLJBase: Pred, Trans
rng = StableRNG(698790187)
@testset "helpers" begin
@test MLJBase.individuate([:x, :y, :x, :z, :y, :x]) ==
[:x, :y, :x2, :z, :y2, :x3]
end
# # DUMMY MODELS
mutable struct MyTransformer2 <: Static
ftr::Symbol
end
MLJBase.transform(transf::MyTransformer2, verbosity, X) =
fill(:st, nrows(X))
mutable struct MyDeterministic <: Deterministic
x::Symbol
end
MLJBase.fit(::MyDeterministic, args...) = nothing, nothing, (; tlosses=ones(3))
MLJBase.transform(m::MyDeterministic, ::Any, Xnew) = fill(:dt, nrows(Xnew))
MLJBase.predict(m::MyDeterministic, ::Any, Xnew) = fill(:dp, nrows(Xnew))
MLJBase.supports_training_losses(::Type{<:MyDeterministic}) = true
MLJBase.iteration_parameter(::Type{<:MyDeterministic}) = :x
MLJBase.training_losses(::MyDeterministic, report) = report.tlosses
mutable struct MyProbabilistic <: Probabilistic
x::Symbol
end
mutable struct MyUnsupervised <: Unsupervised
x::Symbol
end
MLJBase.fit(::MyUnsupervised, args...) = nothing, nothing, nothing
MLJBase.transform(m::MyUnsupervised, ::Any, Xnew) = fill(:ut, nrows(Xnew))
MLJBase.predict(m::MyUnsupervised, ::Any, Xnew) = fill(:up, nrows(Xnew))
mutable struct MyInterval <: Interval
x::Symbol
end
mutable struct StaticKefir <: Static
alpha::Float64 # non-zero to be invertible
end
MLJBase.reporting_operations(::Type{<:StaticKefir}) = (:transform, :inverse_transform)
# piece-wise linear function that is linear only for `alpha=1`:
kefir(x, alpha) = x > 0 ? x * alpha : x / alpha
MLJBase.transform(model::StaticKefir, _, X) = (
broadcast(kefir, X, model.alpha),
(; first = first(X)),
)
MLJBase.inverse_transform(model::StaticKefir, _, W) = (
broadcast(kefir, W, 1/(model.alpha)),
(; last = last(W)),
)
d = MyDeterministic(:d)
p = MyProbabilistic(:p)
u = MyUnsupervised(:u)
s = MyTransformer2(:s) # Static
i = MyInterval(:i)
m = MLJBase.matrix
t = MLJBase.table
@testset "pipe_named_tuple" begin
@test_throws MLJBase.ERR_EMPTY_PIPELINE MLJBase.pipe_named_tuple((),())
@test_throws(MLJBase.ERR_TOO_MANY_SUPERVISED,
MLJBase.pipe_named_tuple((:foo, :foo, :foo), (d, u, p)))
_names = (:trf, :fun, :fun, :trf, :clf)
components = (u, m, t, u, d)
@test MLJBase.pipe_named_tuple(_names, components) ==
NamedTuple{(:trf, :fun, :fun2, :trf2, :clf),
Tuple{Unsupervised,
Any,
Any,
Unsupervised,
Deterministic}}(components)
end
@testset "public constructor" begin
# un-named components:
flute = Pipeline(m, t, u)
@test flute isa UnsupervisedPipeline
@test MLJBase.constructor(flute) == Pipeline
@test Pipeline(m, t, u, p) isa ProbabilisticPipeline
@test Pipeline(m, t, u, p, operation=predict_mean) isa DeterministicPipeline
@test Pipeline(u, p, u, operation=predict_mean) isa DeterministicPipeline
@test Pipeline(m, t) isa StaticPipeline
@test Pipeline(m, t, s) isa StaticPipeline
@test Pipeline(m, t, s, d) isa DeterministicPipeline
@test Pipeline(m, t, i) isa IntervalPipeline
@test_logs((:info, MLJBase.INFO_TREATING_AS_DETERMINISTIC),
@test Pipeline(m, t, u, p, u) isa DeterministicPipeline)
@test_logs((:info, MLJBase.INFO_TREATING_AS_DETERMINISTIC),
@test Pipeline(m, t, u, i, u) isa DeterministicPipeline)
# if "hidden" supervised model is already deterministic,
# no need for warning:
@test_logs @test Pipeline(m, t, u, d, u) isa DeterministicPipeline
# named components:
@test Pipeline(c1=m, c2=t, c3=u) isa UnsupervisedPipeline
@test Pipeline(c1=m, c2=t, c3=u, c5=p) isa ProbabilisticPipeline
@test Pipeline(c1=m, c2=t) isa StaticPipeline
@test Pipeline(c1=m, c2=t, c6=s) isa StaticPipeline
@test Pipeline(c1=m, c2=t, c6=s, c7=d) isa DeterministicPipeline
@test Pipeline(c1=m, c2=t, c8=i) isa IntervalPipeline
@test_logs((:info, MLJBase.INFO_TREATING_AS_DETERMINISTIC),
@test Pipeline(c1=m, c2=t, c3=u, c5=p, c4=u) isa
DeterministicPipeline)
@test(Pipeline(c1=m, c2=t, c3=u, c5=p, c4=u, prediction_type=:interval) isa
IntervalPipeline)
@test(Pipeline(c1=m, c2=t, c3=u, c5=p, c4=u,
prediction_type=:probabilistic) isa
ProbabilisticPipeline)
@test_logs((:info, MLJBase.INFO_TREATING_AS_DETERMINISTIC),
@test Pipeline(c1=m, c2=t, c3=u, c8=i, c4=u) isa
DeterministicPipeline)
# if "hidden" supervised model is already deterministic,
# no need for warning:
@test_logs(@test Pipeline(c1=m, c2=t, c3=u, c7=d, c4=u) isa
DeterministicPipeline)
# errors and warnings:
@test_throws MLJBase.ERR_MIXED_PIPELINE_SPEC Pipeline(m, mymodel=p)
@test_throws(MLJBase.ERR_INVALID_OPERATION,
Pipeline(u, s, operation=cos))
@test_throws(MLJBase.ERR_INVALID_PREDICTION_TYPE,
Pipeline(u=u, s=s, prediction_type=:ostrich))
@test_logs((:warn, MLJBase.WARN_IGNORING_PREDICTION_TYPE),
Pipeline(m, t, u, prediction_type=:deterministic))
@test_throws(MLJBase.err_prediction_type_conflict(d, :probabilistic),
Pipeline(m, t, d, prediction_type=:probabilistic))
end
@testset "property access" begin
pipe = Pipeline(m, u, u, s)
# property names:
@test propertynames(pipe) ===
(:f, :my_unsupervised, :my_unsupervised2, :my_transformer2, :cache)
# getproperty:
@test pipe.my_unsupervised == u
@test pipe.my_unsupervised2 == u
@test pipe.cache == true
# replacing a component with one whose abstract supertype is the same
# or smaller:
pipe.my_unsupervised = s
@test pipe.my_unsupervised == s
# attempting to replace a component with one whose abstract supertype
# is bigger:
@test_throws MethodError pipe.my_transformer2 = u
# mutating the components themeselves:
pipe.my_unsupervised.ftr = :z
@test pipe.my_unsupervised.ftr == :z
# or using MLJBase's recursive getproperty:
MLJBase.recursive_setproperty!(pipe, :(my_unsupervised.ftr), :bonzai)
@test pipe.my_unsupervised.ftr == :bonzai
# more errors:
@test_throws(MLJBase.err_pipeline_bad_property(pipe, :mount_fuji),
pipe.mount_fuji)
@test_throws(MLJBase.err_pipeline_bad_property(pipe, :mount_fuji),
pipe.mount_fuji = 42)
end
@testset "show" begin
io = IOBuffer()
pipe = Pipeline(x-> x^2, m, t, p)
show(io, MIME("text/plain"), pipe)
end
@testset "Front and extend" begin
composite = (;
u1=u,
u2=u,
u3=u,
s1=s,
s2=s,
d=d,
callable=x->string.(x),
)
Xs = source(rand(3))
ys = source(rand(3))
front = MLJBase.Front(Xs, ys, Pred())
@test front.predict() == Xs()
@test front.transform() == ys()
@test MLJBase.active(front)() == Xs()
front = MLJBase.Front(Xs, Xs, Trans())
front = MLJBase.extend(front, u, :u1, false, predict, ys)
pnode, tnode = front.predict, front.transform
fit!(pnode, verbosity=0, composite=composite)
fit!(tnode, verbosity=0, composite=composite)
@test pnode() == [:up, :up, :up]
@test tnode() == [:ut, :ut, :ut]
front = MLJBase.extend(front, s, :s1, true, predict, ys)
pnode, tnode = front.predict, front.transform
fit!(pnode, verbosity=0, composite=composite)
fit!(tnode, verbosity=0, composite=composite)
@test pnode() == [:up, :up, :up]
@test tnode() == [:st, :st, :st]
front = MLJBase.extend(front, u, :u2, true, predict, ys)
pnode, tnode = front.predict, front.transform
fit!(pnode, verbosity=0, composite=composite)
fit!(tnode, verbosity=0, composite=composite)
@test pnode() == [:up, :up, :up]
@test tnode() == [:ut, :ut, :ut]
front = MLJBase.extend(front, d, :d, true, predict, ys)
pnode, tnode = front.predict, front.transform
fit!(pnode, verbosity=0, composite=composite)
fit!(tnode, verbosity=0, composite=composite)
@test pnode() == [:dp, :dp, :dp]
@test tnode() == [:dt, :dt, :dt]
front = MLJBase.extend(front, x->string.(x), :callable, true, predict, ys)
pnode, tnode = front.predict, front.transform
fit!(pnode, verbosity=0, composite=composite)
fit!(tnode, verbosity=0, composite=composite)
@test pnode() == string.([:dp, :dp, :dp])
@test tnode() == [:dt, :dt, :dt]
front = MLJBase.extend(front, u, :u3, true, predict, ys)
pnode, tnode = front.predict, front.transform
fit!(pnode, verbosity=0, composite=composite)
fit!(tnode, verbosity=0, composite=composite)
@test pnode() == [:ut, :ut, :ut]
@test tnode() == [:dt, :dt, :dt]
front = MLJBase.extend(front, s, :s2, true, predict, ys)
pnode, tnode = front.predict, front.transform
fit!(pnode, verbosity=0, composite=composite)
fit!(tnode, verbosity=0, composite=composite)
@test pnode() == [:st, :st, :st]
@test tnode() == [:dt, :dt, :dt]
end
NN = 7
X = MLJBase.table(rand(rng, NN, 3));
y = 2X.x1 - X.x2 + 0.05*rand(rng,NN);
Xs = source(X); ys = source(y)
broadcast_mode(v) = mode.(v)
doubler(y) = 2*y
@testset "pipeline_network_interface" begin
t = MLJBase.table
m = MLJBase.matrix
f = FeatureSelector()
h = OneHotEncoder()
k = KNNRegressor()
u = UnivariateStandardizer()
c = ConstantClassifier()
component_name_pairs = [f => :f, k => :k]
interface = MLJBase.pipeline_network_interface(
true,
predict,
component_name_pairs,
Xs,
ys,
)
tree = interface.predict |> MLJBase.tree
@test tree.operation == predict
@test tree.model == :k
@test tree.arg1.operation == transform
@test tree.arg1.model == :f
@test tree.arg1.arg1.source == Xs
@test tree.arg1.train_arg1.source == Xs
@test tree.train_arg1 == tree.arg1
@test tree.train_arg2.source == ys
component_name_pairs = [f => :f, h => :h]
interface = MLJBase.pipeline_network_interface(
true,
predict,
component_name_pairs,
Xs,
)
tree = interface.transform |> MLJBase.tree
@test tree.operation == transform
@test tree.model == :h
@test tree.arg1.operation == transform
@test tree.arg1.model == :f
@test tree.arg1.arg1.source == Xs
@test tree.arg1.train_arg1.source == Xs
@test tree.train_arg1 == tree.arg1
component_name_pairs = [m => :m, t => :t]
interface = MLJBase.pipeline_network_interface(
true,
predict,
component_name_pairs,
Xs,
)
tree = interface.transform |> MLJBase.tree
@test tree.operation == t
@test tree.model == nothing
@test tree.arg1.operation == m
@test tree.arg1.model == nothing
@test tree.arg1.arg1.source == Xs
# check a probablistic case:
component_name_pairs = [f => :f, c => :c]
interface = MLJBase.pipeline_network_interface(
true,
predict,
component_name_pairs,
Xs,
ys,
)
# check a static case:
component_name_pairs = [m => :m, t => :t]
interface = MLJBase.pipeline_network_interface(
true,
predict,
component_name_pairs,
Xs,
ys,
)
# An integration test...
# build a linear network and interface:
component_name_pairs = [f => :f, k => :k]
interface = MLJBase.pipeline_network_interface(
true,
predict,
component_name_pairs,
Xs,
ys,
)
yhat1 = interface.predict
# build the same network by hand:
fM = machine(:f, Xs)
Xt = transform(fM, Xs)
kM = machine(k, Xt, ys)
yhat2 = predict(kM, Xt)
# compare predictions
composite = (; f, k)
verbosity = 0
fit!(yhat1; verbosity, composite);
fit!(yhat2; verbosity, composite);
@test yhat1() ≈ yhat2()
k.K = 3; f.features = [:x3,]
fit!(yhat1; verbosity, composite);
fit!(yhat2; verbosity, composite);
@test yhat1() ≈ yhat2()
global hand_built = yhat1()
end
struct FooCarrot <: Deterministic end
@testset "iteration parameter - nothing passes through" begin
pipe = FeatureSelector() |> FooCarrot()
@test iteration_parameter(pipe) === nothing
end
@testset "training_losses" begin
model = MyDeterministic(:bla)
pipe = Standardizer() |> model
# test helpers:
@test MLJBase.supervised_component_name(pipe) == :my_deterministic
@test MLJBase.supervised_component(pipe) == model
@test supports_training_losses(pipe)
_, _, rp = MLJBase.fit(pipe, 0, X, y)
@test training_losses(pipe, rp) == ones(3)
@test iteration_parameter(pipe) ==
:(my_deterministic.x)
end
# # INTEGRATION TESTS
@testset "integration 1" begin
# check a simple pipeline prediction agrees with prediction of
# hand-built learning network built earlier:
p = Pipeline(FeatureSelector,
KNNRegressor,
prediction_type=:deterministic)
p.knn_regressor.K = 3; p.feature_selector.features = [:x3,]
mach = machine(p, X, y)
fit!(mach, verbosity=0)
@test MLJBase.tree(MLJBase.unwrap(mach.fitresult).predict).model == :knn_regressor
@test MLJBase.tree(MLJBase.unwrap(mach.fitresult).predict).arg1.model ==
:feature_selector
@test predict(mach, X) ≈ hand_built
# Check target_scitype of a supervised pipeline is the same as the supervised component
@test target_scitype(p) == target_scitype(KNNRegressor())
# test cache is set correctly internally:
machs = machines(glb(mach.fitresult))
@test all(machs) do m
MLJBase._cache_status(m) == "caches model-specific representations of data"
end
# test correct error thrown for inverse_transform:
@test_throws(MLJBase.err_unsupported_operation(:inverse_transform),
inverse_transform(mach, 3))
end
@testset "integration 2" begin
# a simple probabilistic classifier pipeline:
X = MLJBase.table(rand(rng,7,3));
y = categorical(collect("ffmmfmf"));
Xs = source(X)
ys = source(y)
p = Pipeline(OneHotEncoder, ConstantClassifier, cache=false)
mach = machine(p, X, y)
fit!(mach, verbosity=0)
@test p isa ProbabilisticNetworkComposite
pdf(predict(mach, X)[1], 'f') ≈ 4/7
# test cache is set correctly internally:
machs = machines(glb(mach.fitresult))
@test all(machs) do m
MLJBase._cache_status(m) == "does not cache data"
end
# test invalid replacement of classifier with regressor throws
# informative error message:
p.constant_classifier = ConstantRegressor()
@test_logs((:error, r"^Problem"),
(:info, r"^Running type"),
(:warn, MLJBase.alert_generic_scitype_mismatch(
scitype((X, y)),
MLJBase.fit_data_scitype(ConstantRegressor()),
typeof(ConstantRegressor())
)),
(:info, r"It seems"),
(:error, r"Problem"),
@test_throws Exception fit!(mach, verbosity=-1))
end
@testset "integration 3" begin
# test a simple deterministic classifier pipeline:
X = MLJBase.table(rand(rng,7,3))
y = categorical(collect("ffmmfmf"))
Xs = source(X)
ys = source(y)
p = Pipeline(OneHotEncoder, ConstantClassifier, broadcast_mode,
prediction_type=:probabilistic)
mach = machine(p, X, y)
fit!(mach, verbosity=0)
@test predict(mach, X) == fill('f', 7)
# Check target_scitype of a supervised pipeline is the same as the supervised component
@test target_scitype(p) == target_scitype(ConstantClassifier())
# test pipelines with weights:
w = map(y) do η
η == 'm' ? 100 : 1
end
mach = machine(p, X, y, w)
fit!(mach, verbosity=0)
@test predict(mach, X) == fill('m', 7)
end
age = [23, 45, 34, 25, 67]
X = (age = age,
gender = categorical(['m', 'm', 'f', 'm', 'f']))
height = [67.0, 81.5, 55.6, 90.0, 61.1]
mutable struct MyTransformer3 <: Static
ftr::Symbol
end
MLJBase.transform(transf::MyTransformer3, verbosity, X) =
selectcols(X, transf.ftr)
@testset "integration 4" begin
#static transformers in pipelines
p99 = Pipeline(X -> coerce(X, :age=>Continuous),
OneHotEncoder,
MyTransformer3(:age))
mach = fit!(machine(p99, X), verbosity=0)
@test transform(mach, X) == float.(X.age)
# Check target_scitype of an unsupervised pipeline is Unknown
@test target_scitype(p99) == Unknown
end
@testset "integration 5" begin
# pure static pipeline:
p = Pipeline(X -> coerce(X, :age=>Continuous),
MyTransformer3(:age))
mach = fit!(machine(p), verbosity=0) # no training arguments!
@test transform(mach, X) == X.age
# and another:
p = Pipeline(exp, log, x-> 2*x)
mach = fit!(machine(p), verbosity=0)
@test transform(mach, 20) ≈ 40
end
@testset "integration 6" begin
# operation different from predict:
p = Pipeline(OneHotEncoder,
ConstantRegressor,
operation=predict_mean)
@test p isa Deterministic
mach = fit!(machine(p, X, height), verbosity=0)
@test scitype(predict(mach, X)) == AbstractVector{Continuous}
end
@testset "integration 7" begin
# inverse transform:
p = Pipeline(UnivariateBoxCoxTransformer,
UnivariateStandardizer)
@test !target_in_fit(p)
xtrain = rand(rng, 10)
mach = machine(p, xtrain)
fit!(mach, verbosity=0)
x = rand(rng, 5)
y = transform(mach, x)
x̂ = inverse_transform(mach, y)
@test isapprox(x, x̂)
# Check target_scitype of an unsupervised pipeline is Unknown
@test target_scitype(p) == Unknown
end
# A dummy clustering model:
mutable struct DummyClusterer <: Unsupervised
n::Int
end
DummyClusterer(; n=3) = DummyClusterer(n)
function MLJBase.fit(model::DummyClusterer, verbosity::Int, X)
Xmatrix = Tables.matrix(X)
n = min(size(Xmatrix, 2), model.n)
centres = Xmatrix[1:n, :]
levels = categorical(1:n)
report = (centres=centres,)
fitresult = levels
return fitresult, nothing, report
end
MLJBase.transform(model::DummyClusterer, fitresult, Xnew) =
selectcols(Xnew, 1:length(fitresult))
MLJBase.predict(model::DummyClusterer, fitresult, Xnew) =
[fill(fitresult[1], nrows(Xnew))...]
@testset "integration 8" begin
# calling predict on unsupervised pipeline
# https://github.com/JuliaAI/MLJClusteringInterface.jl/issues/10
N = 20
X = (a = rand(N), b = rand(N))
p = Pipeline(PCA, DummyClusterer)
mach = machine(p, X)
fit!(mach, verbosity=0)
y = predict(mach, X)
@test y == fill(categorical(1:2)[1], N)
end
@testset "syntactic sugar" begin
# recall u, s, p, m, are defined way above
# unsupervised model |> static model:
pipe1 = u |> s
@test pipe1 == Pipeline(u, s)
# unsupervised model |> supervised model:
pipe2 = u |> p
@test pipe2 == Pipeline(u, p)
# pipe |> pipe:
hose = pipe1 |> pipe2
@test hose == Pipeline(u, s, u, p)
# pipe |> model:
@test Pipeline(u, s) |> p == Pipeline(u, s, p)
# model |> pipe:
@test u |> Pipeline(s, p) == Pipeline(u, s, p)
# pipe |> function:
@test Pipeline(u, s) |> m == Pipeline(u, s, m)
# function |> pipe:
@test m |> Pipeline(s, p) == Pipeline(m, s, p)
# model |> function:
@test u |> m == Pipeline(u, m)
# function |> model:
@test t |> u == Pipeline(t, u)
@test_logs((:info, MLJBase.INFO_AMBIGUOUS_CACHE),
Pipeline(u, cache=false) |> p)
# with types
@test PCA |> Standardizer() |> KNNRegressor ==
Pipeline(PCA(), Standardizer(), KNNRegressor())
end
@testset "miscelleneous coverage" begin
@test MLJBase.as_type(:unsupervised) == Unsupervised
end
@testset "inverse transform for pipes with static components" begin
X = randn(rng, 20)
pipe = StaticKefir(3) |> UnivariateStandardizer() |>
StaticKefir(5) |> UnivariateStandardizer()
mach = machine(pipe, X)
fit!(mach, verbosity=0)
@test inverse_transform(mach, transform(mach, X)) ≈ X
@test transform(mach, inverse_transform(mach, X)) ≈ X
end
@testset "accessing reports generated by Static models" begin
X = Float64[4, 5, 6]
pipe = UnivariateStandardizer() |> StaticKefir(3)
mach = machine(pipe, X)
fit!(mach, verbosity=0)
@test isnothing(report(mach)) # tranform has not been called yet
transform(mach, X) # adds to report of mach, ie mutates mach
r = report(mach).static_kefir
@test report(mach).static_kefir.first == -1
transform(mach, [5, 6]) # mutates `mach`
r = report(mach).static_kefir
@test keys(r) == (:first, )
@test r.first == 0
inverse_transform(mach, [1, 2, 3])
r = report(mach)
@test r.inverse_transform.static_kefir.first == 0.0
@test r.inverse_transform.static_kefir.last == 3
end
@testset "Test serializable of pipeline" begin
filename = "pipe_mach.jls"
X, y = make_regression(100, 1)
pipe = Standardizer() |> KNNRegressor()
mach = machine(pipe, X, y)
fit!(mach, verbosity=0)
# Check serializable function
smach = MLJBase.serializable(mach)
TestUtilities.generic_tests(mach, smach)
@test keys(fitted_params(smach)) == keys(fitted_params(mach))
@test keys(report(smach)) == keys(report(mach))
# Check data has been wiped out from models at the first level of composition
submachines = machines(glb(mach.fitresult))
ssubmachines = machines(glb(mach.fitresult))
@test length(submachines) == length(ssubmachines)
for submach in submachines
TestUtilities.test_data(submach)
end
# End to end
MLJBase.save(filename, mach)
smach = machine(filename)
@test predict(smach, X) == predict(mach, X)
rm(filename)
end
@testset "feature importances" begin
# the DecisionTreeClassifier in /test/_models/ supports feature importances.
pipe = Standardizer |> DecisionTreeClassifier()
@test reports_feature_importances(pipe)
X, y = @load_iris
fitresult, _, report = MLJBase.fit(pipe, 0, X, y)
features = first.(feature_importances(pipe, fitresult, report))
@test Set(features) == Set(keys(X))
end
struct SupervisedTransformer <: Unsupervised end
MLJBase.fit(::SupervisedTransformer, verbosity, X, y) = (mean(y), nothing, nothing)
MLJBase.transform(::SupervisedTransformer, fitresult, X) =
fitresult*MLJBase.matrix(X) |> MLJBase.table
MLJBase.target_in_fit(::Type{<:SupervisedTransformer}) = true
struct DummyTransformer <: Unsupervised end
MLJBase.fit(::DummyTransformer, verbosity, X) = (nothing, nothing, nothing)
MLJBase.transform(::DummyTransformer, fitresult, X) = X
@testset "supervised transformers in a pipeline" begin
X = MLJBase.table((a=fill(10.0, 3),))
y = fill(2, 3)
pipe = SupervisedTransformer() |> DeterministicConstantRegressor()
@test target_in_fit(pipe)
mach = machine(pipe, X, y)
fit!(mach, verbosity=0)
@test predict(mach, X) == fill(2.0, 3)
pipe2 = DummyTransformer |> pipe
@test target_in_fit(pipe2)
mach = machine(pipe2, X, y)
fit!(mach, verbosity=0)
@test predict(mach, X) == fill(2.0, 3)
pipe3 = DummyTransformer |> SupervisedTransformer |> DummyTransformer
@test target_in_fit(pipe3)
mach = machine(pipe3, X, y)
fit!(mach, verbosity=0)
@test transform(mach, X).x1 == fill(20.0, 3)
end
end # module
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 23317 | module TestStacking
using Test
using MLJBase
using StatisticalMeasures
using MLJModelInterface
using ..Models
using Random
using StableRNGs
import Distributions
rng = StableRNGs.StableRNG(1234)
function model_evaluation(models::NamedTuple, X, y; measure=rmse)
cv = CV(;nfolds=3)
results = []
for model in models
mach = machine(model, X, y)
ev = evaluate!(mach; resampling=cv, verbosity=0, measure=measure, check_measure=false)
push!(results, ev.measurement[1])
end
results
end
function test_internal_evaluation(internalreport, std_evaluation, modelnames)
for model in modelnames
model_ev = internalreport[model]
std_ev = std_evaluation[model]
@test model_ev isa PerformanceEvaluation
@test model_ev.per_fold == std_ev.per_fold
@test model_ev.measurement == std_ev.measurement
@test model_ev.per_observation[1] == std_ev.per_observation[1]
@test model_ev.per_observation[2] == std_ev.per_observation[2]
@test model_ev.operation == std_ev.operation
@test model_ev.report_per_fold == std_ev.report_per_fold
@test model_ev.train_test_rows == std_ev.train_test_rows
end
end
@testset "Testing Stack on Continuous target" begin
X, y = make_regression(500, 5; rng=rng)
@testset "Testing Deterministic Stack" begin
# Testing performance
# The dataset is a simple regression model with intercept
# No model in the stack can recover the true model on its own
# Indeed, FooBarRegressor has no intercept
# By combining models, the stack can generalize better than any submodel
# And optimize the rmse
models = (constant=DeterministicConstantRegressor(),
decisiontree=DecisionTreeRegressor(),
ridge_lambda=FooBarRegressor(;lambda=0.1),
ridge=FooBarRegressor(;lambda=0))
mystack = Stack(;metalearner=FooBarRegressor(),
resampling=CV(;nfolds=3),
models...)
results = model_evaluation((stack=mystack, models...), X, y)
@test argmin(results) == 1
# Mixing ProbabilisticModels amd Deterministic models as members of the stack
models = (constant=ConstantRegressor(),
decisiontree=DecisionTreeRegressor(),
ridge_lambda=FooBarRegressor(;lambda=0.1),
ridge=FooBarRegressor(;lambda=0))
mystack = Stack(;metalearner=FooBarRegressor(),
resampling=CV(;nfolds=3),
models...)
# Testing attribute access of the stack
@test propertynames(mystack) == (
:metalearner,
:resampling,
:measures,
:cache,
:acceleration,
:constant,
:decisiontree,
:ridge_lambda,
:ridge,
)
@test mystack.decisiontree isa DecisionTreeRegressor
@test target_scitype(mystack) == target_scitype(FooBarRegressor())
@test input_scitype(mystack) == input_scitype(FooBarRegressor())
# Testing fitted_params results are easily accessible for each
# submodel. They are in order of the cross validation procedure.
# Here 3-folds then 3 machines + the final fit
mach = machine(mystack, X, y)
fit!(mach, verbosity=0)
fp = fitted_params(mach)
@test nrows(getfield(fp, :constant)) == 4
@test nrows(getfield(fp, :decisiontree)) == 4
@test nrows(getfield(fp, :ridge)) == 4
@test nrows(getfield(fp, :ridge_lambda)) == 4
# The metalearner has been fit and has one coefficient
# for each model in the library (No intercept)
@test fp.metalearner isa Vector{Float64}
@test nrows(fp.metalearner) == 4
# Testing prediction is Deterministic
@test predict(mach) isa Vector{Float64}
end
@testset "Testing ProbabilisticStack" begin
models = (constant=ConstantRegressor(),
decisiontree=DecisionTreeRegressor(),
ridge_lambda=FooBarRegressor(;lambda=0.1),
ridge=FooBarRegressor(;lambda=0))
# The type of the stack is determined by the type of the metalearner
metalearner = ConstantRegressor(;distribution_type=Distributions.Cauchy)
mystack = Stack(;metalearner=metalearner,
resampling=CV(;nfolds=3),
models...)
@test target_scitype(mystack) == target_scitype(metalearner)
@test input_scitype(mystack) == input_scitype(FooBarRegressor())
mach = machine(mystack, X, y)
fit!(mach, verbosity=0)
@test predict(mach) isa Vector{Distributions.Cauchy{Float64}}
end
end
@testset "Testing ProbabilisticStack on Finite target" begin
X, y = make_blobs(;rng=rng, shuffle=false)
models = (constant=ConstantClassifier(),
decisiontree=DecisionTreeClassifier(),
knn=KNNClassifier())
mystack = Stack(;metalearner=DecisionTreeClassifier(),
resampling=StratifiedCV(;nfolds=3),
models...)
# Check input and target scitypes
@test target_scitype(mystack) == target_scitype(DecisionTreeClassifier())
# Here the greatest lower bound is the scitype of the knn
@test input_scitype(mystack) == input_scitype(KNNClassifier())
mach = machine(mystack, X, y)
fit!(mach, verbosity=0)
@test predict(mach) isa Vector{<:MLJBase.UnivariateFinite}
end
@testset "Stack constructor valid argument checks" begin
# baselearner cannot be a deterministic classifier:
@test_throws(
MLJBase.ERR_BAD_BASEMODEL(DeterministicConstantClassifier()),
Stack(metalearner=ConstantClassifier(), mymodel=DeterministicConstantClassifier()),
)
# metalearner should be `Deterministic` or `Probablisitic`:
@test_throws(
MLJBase.ERR_BAD_METALEARNER,
Stack(;metalearner=Standardizer(), constant=ConstantClassifier()),
)
# must specify a metalearner:
@test_throws(
MLJBase.ERR_NO_METALEARNER,
Stack(;constant=ConstantRegressor()),
)
# informative error for spelling mistakes (#796):
@test_throws(
MLJBase.err_expecting_model(CPU1(), spelling=true),
Stack(metalearner=ConstantRegressor(), knn=ConstantRegressor(), acclraton=CPU1()),
)
# informative error is type used in place of instance (in a base model):
@test_throws(
MLJBase.err_expecting_model(ConstantRegressor),
Stack(metalearner=ConstantRegressor(), knn=ConstantRegressor),
)
# informative error is type used in place of instance (in metalearner):
@test_throws(
MLJBase.err_expecting_model(ConstantRegressor),
Stack(metalearner=ConstantRegressor, knn=ConstantRegressor()),
)
end
@testset "Misc" begin
# Test setproperty! behaviour
models = (constant=DeterministicConstantRegressor(),
decisiontree=DecisionTreeRegressor(),
ridge_lambda=FooBarRegressor(;lambda=0.1))
mystack = Stack(;metalearner=FooBarRegressor(),
measures=rmse,
resampling=CV(;nfolds=3),
models...)
@test MLJBase.constructor(mystack) == Stack
@test mystack.ridge_lambda.lambda == 0.1
@test mystack.metalearner isa FooBarRegressor
@test mystack.resampling isa CV
mystack.ridge_lambda = FooBarRegressor(;lambda=0.2)
@test mystack.ridge_lambda.lambda == 0.2
mystack.metalearner = DecisionTreeRegressor()
@test mystack.metalearner isa DecisionTreeRegressor
mystack.resampling = StratifiedCV()
@test mystack.resampling isa StratifiedCV
# Test measures accepts a single measure
@test mystack.measures == [rmse,]
# using inner constructor accepts :resampling and :metalearner
# as modelnames
modelnames = (:resampling, :metalearner)
models = [DeterministicConstantRegressor(), FooBarRegressor(;lambda=0)]
metalearner = DeterministicConstantRegressor()
resampling = CV()
cache = true
acceleration = CPU1()
@test_logs MLJBase.DeterministicStack(
modelnames,
models,
metalearner,
resampling,
nothing,
cache,
acceleration)
# Test input_target_scitypes with non matching target_scitypes
models = [KNNRegressor()]
metalearner = KNNClassifier()
inp_scitype, tg_scitype = MLJBase.input_target_scitypes(models, metalearner)
@test tg_scitype == Unknown
@test inp_scitype == Table{<:AbstractVector{<:Continuous}}
# Test input_target_scitypes with non matching target_scitypes
models = [ConstantClassifier(), DecisionTreeClassifier()]
metalearner = KNNClassifier()
inp_scitype, tg_scitype = MLJBase.input_target_scitypes(models, metalearner)
@test tg_scitype == AbstractVector{<:Finite}
@test inp_scitype == Table{<:Union{AbstractVector{<:Continuous},
AbstractVector{<:Count},
AbstractVector{<:OrderedFactor}}}
# Changing a model to a non compatible target scitype
initial_stack = Stack(;metalearner=FooBarRegressor(),
resampling=CV(;nfolds=3),
constant = DeterministicConstantRegressor(),
fb=FooBarRegressor())
initial_stack.constant = ConstantClassifier()
@test_throws DomainError clean!(initial_stack)
# Test check_stack_measures with
# probabilistic measure and deterministic model
measures =[log_loss]
stack = Stack(;metalearner=FooBarRegressor(),
resampling=CV(;nfolds=3),
measure=measures,
constant=ConstantRegressor(),
fb=FooBarRegressor())
X, y = make_regression()
@test_logs((:error, r"Problem fitting"),
(:info, r"Running type"),
(:info, r"Type checks okay"),
@test_throws ArgumentError fit!(machine(stack, X, y), verbosity=0))
@test_throws ArgumentError MLJBase.check_stack_measures(stack, 0, measures, y)
# This will not raise
stack.measures = nothing
fit!(machine(stack, X, y), verbosity=0)
end
@testset "function oos_set" begin
X = (x = Float64[1, 1, 2, 2, 3, 3],)
y = coerce(['a', 'b', 'b', 'c', 'a', 'a'], Multiclass)
n = nrows(y)
model1 = KNNClassifier(K=2)
model2 = ConstantClassifier()
judge = KNNClassifier(K=3)
stack = Stack(metalearner=judge,
model1=model1,
model2=model2,
resampling=CV(;nfolds=3, shuffle=true, rng=StableRNG(123)))
Xs = source(X)
ys = source(y)
ttp = MLJBase.train_test_pairs(stack.resampling, 1:n, X, y)
Zval, yval, folds_evaluations = MLJBase.oos_set(stack, Xs, ys, ttp)
# No internal measure has been provided so the resulting
# folds_evaluations contain nothing
@test all(x === nothing for x in folds_evaluations)
# To be accessed, the machines need to be trained
fit!(Zval, verbosity=0, composite=stack)
# Each model in the library should output a 3-dim vector to be concatenated
# resulting in the table of shape (nrows, 6) here nrows=6
# for future use by the metalearner
sc = schema(Zval())
@test nrows(Zval()) == 6
@test sc.names == (:x1, :x2, :x3, :x4, :x5, :x6)
# The lines of yval should match the reordering indexes
# of the original y (reordering given by the folds node)
reordering = vcat([x[2] for x in ttp]...)
@test yval() == y[reordering]
# And the same is true for Zval, let's check this for model1's output
# on the first fold, ie (2 first rows, 3 first columns)
# First we need to train the model
trainingrows = ttp[1][1]
Xtrain = selectrows(X, trainingrows)
ytrain = selectrows(y, trainingrows)
mach = machine(model1, Xtrain, ytrain)
fit!(mach, verbosity=0)
# Then predict on the validation rows
Xpred = selectrows(X, ttp[1][2])
Zval_expected_dist = predict(mach, Xpred)
# This is a distribution, we need to apply the appropriate transformation
Zval_expected = pdf(Zval_expected_dist, levels(first(Zval_expected_dist)))
@test matrix(Zval())[1:2, 1:3] == Zval_expected
end
@testset "An integration test for stacked classification" begin
# We train a stack by hand and compare with the canned version
# `Stack(...)`. There are two base learners, with 3-fold
# cross-validation used to construct the out-of-sample base model
# predictions.
probs(y) = pdf(y, levels(first(y)))
# data:
N = 200
X = (x = rand(rng, 3N), )
y = coerce(rand("abc", 3N), Multiclass)
# row splits:
test1 = 1:N
test2 = (N + 1):2N
test3 = (2N + 1):3N
train1 = (N + 1):3N
train2 = vcat(1:N, (2N + 1):3N)
train3 = 1:2N
# base `model1`:
model1 = KNNClassifier(K=2)
mach1 = machine(model1, X, y)
fit!(mach1, rows=train1, verbosity=0)
y11 = predict(mach1, rows=test1) |> probs
mach1 = machine(model1, X, y)
fit!(mach1, rows=train2, verbosity=0)
y12 = predict(mach1, rows=test2) |> probs
mach1 = machine(model1, X, y)
fit!(mach1, rows=train3, verbosity=0)
y13 = predict(mach1, rows=test3) |> probs
y1_oos = vcat(y11, y12, y13)
mach1_full = machine(model1, X, y)
fit!(mach1_full, verbosity=0)
y1 = predict(mach1_full, X) |> probs
# base `model2`:
model2 = DecisionTreeClassifier()
mach2 = machine(model2, X, y)
fit!(mach2, rows=train1, verbosity=0)
y21 = predict(mach2, rows=test1) |> probs
mach2 = machine(model2, X, y)
fit!(mach2, rows=train2, verbosity=0)
y22 = predict(mach2, rows=test2) |> probs
mach2 = machine(model2, X, y)
fit!(mach2, rows=train3, verbosity=0)
y23 = predict(mach2, rows=test3) |> probs
y2_oos = vcat(y21, y22, y23)
mach2_full = machine(model2, X, y)
fit!(mach2_full, verbosity=0)
y2 = predict(mach2_full, X) |> probs
# metalearner (`judge`):
X_oos = MLJBase.table(hcat(y1_oos, y2_oos))
judge = KNNClassifier(K=3)
m_judge = machine(judge, X_oos, y)
fit!(m_judge, verbosity=0)
X_judge = MLJBase.table(hcat(y1, y2))
yhat_matrix = predict(m_judge, X_judge) |> probs
# alternatively, use stack:
stack = Stack(metalearner=judge,
model1=model1,
model2=model2,
resampling=CV(nfolds=3))
mach = machine(stack, X, y)
fit!(mach, verbosity=0)
yhat_matrix_stack = predict(mach, X) |> probs
# compare:
@test yhat_matrix_stack ≈ yhat_matrix
end
@testset "Test store_for_evaluation" begin
X, y = make_blobs(;rng=rng, shuffle=false)
Xs, ys = source(X), source(y)
mach = machine(KNNClassifier(), Xs, ys)
fit!(mach, verbosity=0)
measures = [accuracy, log_loss]
mach_, Xtest, ytest = MLJBase.store_for_evaluation(mach, Xs, ys, measures)()
@test Xtest == X
@test ytest == y
@test mach_ == mach
# check fallback
@test MLJBase.store_for_evaluation(mach, Xs, ys, nothing) === nothing
end
@testset "Test internal_stack_report" begin
n = 500
X, y = make_regression(n, 5; rng=rng)
resampling = CV(;nfolds=2)
measures = [rms, l2]
constant = ConstantRegressor()
ridge = FooBarRegressor()
mystack = Stack(;metalearner=ridge,
resampling=resampling,
measures=measures,
constant=constant,
ridge=ridge)
std_evaluation = (
constant=evaluate(constant, X, y, resampling=resampling, measures=measures, verbosity=0),
ridge=evaluate(ridge, X, y, resampling=resampling, measures=measures, verbosity=0)
)
ttp = MLJBase.train_test_pairs(resampling, 1:nrows(y), X, y)
# Testing internal_stack_report default with nothing
@test MLJBase.internal_stack_report(mystack, 0, ttp, nothing, nothing) == NamedTuple{}()
# Simulate the evaluation nodes which consist of
# - The fold machine
# - Xtest
# - ytest
evaluation_nodes = []
for (train, test) in MLJBase.train_test_pairs(resampling, 1:n, y)
for model in getfield(mystack, :models)
mach = machine(model, X, y)
fit!(mach, verbosity=0, rows=train)
Xtest = selectrows(X, test)
ytest = selectrows(y, test)
push!(evaluation_nodes, source((mach, Xtest, ytest)))
end
end
internalreport = MLJBase.internal_stack_report(
mystack,
0,
ttp,
evaluation_nodes...
).report.cv_report()
test_internal_evaluation(internalreport, std_evaluation, (:constant, :ridge))
test_internal_evaluation(internalreport, std_evaluation, (:constant, :ridge))
@test std_evaluation.constant.fitted_params_per_fold ==
internalreport.constant.fitted_params_per_fold
@test std_evaluation.ridge.fitted_params_per_fold ==
internalreport.ridge.fitted_params_per_fold
end
@testset "Test internal evaluation of the stack in regression mode" begin
X, y = make_regression(500, 5; rng=rng)
resampling = CV(;nfolds=3)
measures = [rms, l2]
constant = ConstantRegressor()
ridge = FooBarRegressor()
mystack = Stack(;metalearner=FooBarRegressor(),
resampling=resampling,
measure=measures,
ridge=ridge,
constant=constant)
mach = machine(mystack, X, y)
fit!(mach, verbosity=0)
internalreport = report(mach).cv_report
# evaluate decisiontree and ridge out of stack and check results match
std_evaluation = (
constant = evaluate(constant, X, y,
measure=measures,
resampling=resampling,
verbosity=0),
ridge = evaluate(ridge, X, y, measure=measures, resampling=resampling, verbosity=0)
)
test_internal_evaluation(internalreport, std_evaluation, (:constant, :ridge))
@test std_evaluation.constant.fitted_params_per_fold ==
internalreport.constant.fitted_params_per_fold
@test std_evaluation.ridge.fitted_params_per_fold ==
internalreport.ridge.fitted_params_per_fold
end
@testset "Test internal evaluation of the stack in classification mode" begin
X, y = make_blobs(;rng=rng, shuffle=false)
resampling = StratifiedCV(;nfolds=3)
measures = [accuracy, log_loss]
constant = ConstantClassifier()
knn = KNNClassifier()
mystack = Stack(;metalearner=DecisionTreeClassifier(),
resampling=resampling,
constant=constant,
knn=knn,
measures=measures)
mach = machine(mystack, X, y)
fit!(mach, verbosity=0)
internalreport = report(mach).cv_report
# evaluate decisiontree and ridge out of stack and check results match
std_evaluation = (
constant = evaluate(constant, X, y,
measure=measures,
resampling=resampling,
verbosity=0),
knn = evaluate(knn, X, y, measure=measures, resampling=resampling, verbosity=0)
)
test_internal_evaluation(internalreport, std_evaluation, (:knn, :constant))
# Test fitted_params
for i in 1:mystack.resampling.nfolds
std_constant_fp = std_evaluation.constant.fitted_params_per_fold[i]
intern_constant_fp = internalreport.constant.fitted_params_per_fold[i]
@test std_constant_fp.target_distribution ≈ intern_constant_fp.target_distribution
std_knn_fp = std_evaluation.knn.fitted_params_per_fold[i]
intern_knn_fp = internalreport.knn.fitted_params_per_fold[i]
@test std_knn_fp.tree.data == intern_knn_fp.tree.data
end
end
@testset "Test Holdout CV" begin
X, y = make_regression(100, 3; rng=rng)
resampling = Holdout()
constant = ConstantRegressor()
ridge = FooBarRegressor()
mystack = Stack(;metalearner=FooBarRegressor(),
resampling=resampling,
measures=[rmse],
ridge=ridge,
constant=constant)
mach = machine(mystack, X, y)
fit!(mach, verbosity=0)
for modelname in (:ridge, :constant)
model_perf = getproperty(report(mach).cv_report, modelname)
@test length(model_perf.per_fold) == 1
@test length(model_perf.train_test_rows) == 1
end
end
@testset "Test cache is forwarded to submodels" begin
X, y = make_regression(100, 3; rng=rng)
constant = ConstantRegressor()
ridge = FooBarRegressor()
mystack = Stack(;metalearner=FooBarRegressor(),
cache=false,
ridge=ridge,
constant=constant)
mach = machine(mystack, X, y)
fit!(mach, verbosity = 0)
# The data and resampled_data have not been populated
for mach in machines(glb(mach.fitresult))
@test !isdefined(mach, :data)
@test !isdefined(mach, :resampled_data)
end
end
# a regression `Stack` which has `model` as one of the base models:
function _stack(model, resource)
models = (constant=DeterministicConstantRegressor(),
ridge_lambda=FooBarRegressor(;lambda=0.1),
model=model)
Stack(;
metalearner=FooBarRegressor(;lambda=0.05),
resampling=CV(;nfolds=3),
acceleration=resource,
models...
)
end
# return a nested stack in which `model` appears at two levels, with
# both layers accelerated using `resource`:
_double_stack(model, resource) =
_stack(_stack(model, resource), resource)
@testset "Test multithreaded version" begin
X, y = make_regression(100, 5; rng=StableRNG(1234))
stack = _double_stack(FooBarRegressor(;lambda=0.07), CPU1())
mach = machine(stack, X, y)
fit!(mach, verbosity=0)
cpu_fp = fitted_params(mach)
cpu_ypred = predict(mach)
stack = _double_stack(FooBarRegressor(;lambda=0.07), CPUThreads())
mach = machine(stack, X, y)
fit!(mach, verbosity=0)
thread_fp = fitted_params(mach)
thread_ypred = predict(mach)
@test cpu_ypred ≈ thread_ypred
@test cpu_fp.metalearner ≈ thread_fp.metalearner
@test cpu_fp.ridge_lambda ≈ thread_fp.ridge_lambda
end
mutable struct Parsnip <: MLJBase.Probabilistic
bogus::Int
end
function MLJBase.fit(::Parsnip, verbosity::Int, A, y)
y1 = skipmissing(y) |> collect
fitresult = MLJBase.Distributions.fit(MLJBase.UnivariateFinite, y1)
cache = nothing
report = NamedTuple()
return fitresult, cache, report
end
MLJBase.predict(::Parsnip, fitresult, Xnew) =
fill(fitresult, nrows(Xnew))
MLJBase.target_scitype(::Type{<:Parsnip}) = AbstractVector{<:Union{Missing,Finite}}
MLJBase.input_scitype(::Type{<:Parsnip}) = Table(Union{Missing,Continuous})
@testset "Adjudicators that support missings works #816" begin
# get a data set with missings in target
X, y0 = @load_crabs
y = vcat([missing, ], y0[2:end])
stack = Stack(
metalearner=Parsnip(1),
model1 = Parsnip(2),
model2 = Parsnip(3),
)
mach = machine(stack, X, y)
fit!(mach, verbosity=0)
end
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 3651 | module TestWrappedFunctions
# using Revise
using Test
using MLJBase
using ..Models
using CategoricalArrays
using StatisticalMeasures
import Random.seed!
seed!(1234)
struct PlainTransformer <: Static
ftr::Symbol
end
MLJBase.transform(transf::PlainTransformer, verbosity, X) =
selectcols(X, transf.ftr)
@testset "machine constructor for static transformers" begin
X = (x1=rand(3), x2=[1, 2, 3]);
mach = machine(PlainTransformer(:x2))
@test transform(mach, X) == [1, 2, 3]
end
struct YourTransformer <: Static
ftr::Symbol
end
MLJBase.reporting_operations(::Type{<:YourTransformer}) = (:transform,)
# returns `(output, report)`:
MLJBase.transform(transf::YourTransformer, verbosity, X) =
(selectcols(X, transf.ftr), (; nrows=nrows(X)))
MLJBase.predict(transf::YourTransformer, verbosity, X) =
collect(1:nrows(X)) |> reverse
@testset "nodal machine constructor for static transformers" begin
X = (x1=rand(3), x2=[1, 2, 3]);
mach = machine(YourTransformer(:x2))
@test transform(mach, X) == [1, 2, 3]
@test_throws MLJBase.ERR_ROWS_NOT_ALLOWED transform(mach, rows=:)
@test predict(mach, X) == [3, 2, 1]
@test report(mach).nrows == 3
transform(mach, (x2=["a", "b"],))
@test report(mach).nrows == 2
end
x1 = rand(30)
x2 = rand(30)
x3 = rand(30)
y = exp.(x1 - x2 -2x3 + 0.1*rand(30))
X = (x1=x1, x2=x2, x3=x3)
f(X) = (a=selectcols(X, :x1), b=selectcols(X, :x2))
knn = KNNRegressor()
# 1. function in a pipeline:
knn_target = TransformedTargetModel(knn, transformer=UnivariateBoxCoxTransformer())
comp1 = f |> Standardizer() |> knn_target
e = evaluate(comp1, X, y, measure=mae, resampling=Holdout(), verbosity=0)
# 2. function with parameters in a pipeline:
mutable struct GreatTransformer <: Static
ftr::Symbol
end
MLJBase.transform(transf::GreatTransformer, verbosity, X) =
(a=selectcols(X, transf.ftr), b=selectcols(X, :x2))
comp2 = GreatTransformer(:x3) |> Standardizer() |> knn_target
comp2.great_transformer.ftr = :x1 # change the parameter
e2 = evaluate(comp2, X, y, measure=mae, resampling=Holdout(), verbosity=0)
@test e2.measurement[1] ≈ e.measurement[1]
# 3. function in an `NetworkComposite`:
mutable struct Comp3 <: DeterministicNetworkComposite
rgs
end
f(X::AbstractNode) = node(f, X)
function MLJBase.prefit(::Comp3, verbosity, X, y)
Xs = source(X)
ys = source(y)
X2 = f(Xs) # f define in global scope
W = transform(machine(Standardizer(), X2), X2)
box_mach = machine(UnivariateBoxCoxTransformer(), ys)
z = transform(box_mach, ys)
knn_mach = machine(:rgs, W, z)
zhat = predict(knn_mach, W)
yhat = inverse_transform(box_mach, zhat)
return (; predict=yhat)
end
comp3 = Comp3(knn)
e3 = evaluate(comp3, X, y, measure=mae, resampling=Holdout(), verbosity=0)
@test e2.measurement[1] ≈ e.measurement[1]
# 4. function with parameters in `NetworkComposite`:
mutable struct CC <: DeterministicNetworkComposite
transf
rgs
end
function MLJBase.prefit(::CC, verbosity, X, y)
Xs = source(X)
ys = source(y)
inserter_mach = machine(:transf)
X2 = transform(inserter_mach, Xs)
W = transform(machine(Standardizer(), X2), X2)
box_mach = machine(UnivariateBoxCoxTransformer(), ys)
z = transform(box_mach, ys)
knn_mach = machine(:rgs, W, z)
zhat = predict(knn_mach, W)
yhat = inverse_transform(box_mach, zhat)
return (; predict=yhat)
end
inserter = GreatTransformer(:x3)
comp4 = CC(inserter, knn)
comp4.transf.ftr = :x1 # change the parameter
e4 = evaluate(comp4, X, y, measure=mae, resampling=Holdout(), verbosity=0)
@test e4.measurement[1] ≈ e.measurement[1]
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 6459 | module TestTransformedTargetModel
using MLJBase
using Test
using ..Models
using StableRNGs
const MMI = MLJBase.MLJModelInterface
rng = StableRNG(698790187)
atom = DeterministicConstantRegressor()
p_atom = ConstantRegressor()
whitener = UnivariateStandardizer()
@testset "constructor and clean!" begin
model = @test_throws(
MLJBase.ERR_TRANSFORMER_UNSPECIFIED,
TransformedTargetModel(atom),
)
@test_logs TransformedTargetModel(atom, transformer=UnivariateStandardizer)
model = @test_logs TransformedTargetModel(atom, transformer=whitener)
@test model.model == atom
@test model.inverse == nothing
@test model.transformer == whitener
@test model.cache
model = @test_logs TransformedTargetModel(model=atom, transformer=whitener)
@test model.model == atom
@test model.inverse == nothing
model = @test_logs(
TransformedTargetModel(atom, transformer=whitener, inverse=identity))
@test model.model == atom
@test model.inverse == identity
@test_throws(MLJBase.ERR_MODEL_UNSPECIFIED,
TransformedTargetModel(transformer=whitener))
@test_throws(MLJBase.ERR_TOO_MANY_ARGUMENTS,
TransformedTargetModel(atom, whitener))
@test_throws(MLJBase.err_tt_unsupported(whitener),
TransformedTargetModel(whitener, transformer=whitener))
model = @test_logs((:warn, MLJBase.WARN_IDENTITY_INVERSE),
TransformedTargetModel(p_atom, transformer=whitener))
@test model.inverse == identity
model = @test_logs((:warn, MLJBase.WARN_MISSING_INVERSE),
TransformedTargetModel(atom, transformer=y->log.(y)))
@test model.inverse == identity
end
# a function for transforming and it's inverse:
f(y) = sin.(y)
g(y) = asin.(y)
# implemented as a static model:
mutable struct Nonlinear <: Static
λ::Float64 # ignored
end
Nonlinear(; λ=1) = Nonlinear(λ)
MMI.transform(::Nonlinear, _, y) = f(y)
MMI.inverse_transform(::Nonlinear, _, z) = g(z)
# some data:
X, _ = make_regression(5)
z = rand(rng, 5)
y = asin.(z)
@test sin.(y) ≈ z
# average of y on untransformed scale:
avg = mean(y)
# average of y on the non-linear scale defined by f:
avg_nonlinear = g(mean(f(y))) # = g(mean(z))
@testset begin "fit and predict"
# Remember that `atom` predicts the constant mean of the training
# target on all new observations. Let's
# check it's expected behaviour before wrapping:
fr, _, _ = MMI.fit(atom, 0, X, y)
@test predict(atom, fr, X) ≈ fill(avg, 5)
# Test wrapping using f and g:
model = TransformedTargetModel(atom, transformer=f, inverse=g)
@test MLJBase.constructor(model) == TransformedTargetModel
fr1, _, _ = MMI.fit(model, 0, X, y)
@test first(predict(model, fr1, X)) ≈ fill(avg_nonlinear, 5)
# Test wrapping using a `Static` transformer:
model = TransformedTargetModel(atom, transformer=Nonlinear())
fr1, _, _ = MMI.fit(model, 0, X, y)
@test first(predict(model, fr1, X)) ≈ fill(avg_nonlinear, 5)
# Test wrapping using a non-static `Unsupervised` model:
model = TransformedTargetModel(atom, transformer=whitener)
fr1, _, _ = MMI.fit(model, 0, X, y)
@test first(predict(model, fr1, X)) ≈ fill(avg, 5) # whitener is linear
# Test with `inverse=identity`:
model = TransformedTargetModel(atom, transformer=Nonlinear(), inverse=identity)
fr1, _, _ = MMI.fit(model, 0, X, y)
@test first(predict(model, fr1, X)) ≈ fill(mean(z), 5)
# Test a probablistic model:
model = TransformedTargetModel(p_atom, transformer=whitener, inverse=identity)
fr1, _, _ = MMI.fit(model, 0, X, y)
yhat = predict(model, fr1, X) |> first
@test isapprox(first(yhat).μ, 0, atol=1e-15)
end
MMI.iteration_parameter(::Type{DeterministicConstantRegressor}) = :n
@testset "traits" begin
model = TransformedTargetModel(atom, transformer=Nonlinear())
@test input_scitype(model) == input_scitype(atom)
@test target_scitype(model) == target_scitype(atom)
@test is_wrapper(model)
@test iteration_parameter(model) == :(model.n)
@test package_name(model) == "MLJBase"
@test occursin("2229d", package_uuid(model))
@test package_license(model) == "MIT"
@test package_url(model) == "https://github.com/JuliaAI/MLJBase.jl"
end
@testset "integration 1" begin
model = TransformedTargetModel(atom, transformer=Nonlinear())
mach = machine(model, X, y)
fit!(mach, verbosity=0)
@test predict(mach, X) ≈ fill(avg_nonlinear, 5)
@test issubset([:model,], keys(fitted_params(mach)))
@test fitted_params(mach).model.fitresult ≈ mean(z)
end
@testset "integration 2" begin
model = TransformedTargetModel(atom, transformer=UnivariateBoxCoxTransformer())
mach = machine(model, X, y)
fit!(mach, verbosity=2)
@test predict(mach, X) isa Vector
@test issubset([:model, :transformer], keys(fitted_params(mach)))
@test issubset([:λ, :c], keys(fitted_params(mach).transformer))
end
@testset "integration 3" begin
model = TransformedTargetModel(atom, transformer=v->log.(v), inverse=v->exp.(v))
mach = machine(model, X, y)
fit!(mach, verbosity=0)
@test predict(mach, X) isa Vector
@test keys(fitted_params(mach)) == (:model,)
end
mutable struct FooModel46 <: Deterministic
epochs
end
MMI.fit(::FooModel46, verbosity, X, y) =
nothing, nothing, (training_losses=ones(length(y)),)
MMI.predict(::FooModel46, fitresult, Xnew) = ones(nrows(Xnew))
MMI.supports_training_losses(::Type{<:FooModel46}) = true
MMI.iteration_parameter(::Type{<:FooModel46}) = :epochs
MMI.training_losses(::FooModel46, report) = report.training_losses
X = rand(5)
y = rand(5)
@testset "training_losses" begin
atom = FooModel46(10)
model = TransformedTargetModel(atom, transformer=Nonlinear())
@test supports_training_losses(model)
@test iteration_parameter(model) == :(model.epochs)
mach = machine(model, X, y)
fit!(mach, verbosity=0)
rep = report(mach)
@test rep.model.training_losses == ones(5)
@test training_losses(mach) == ones(5)
end
@testset "feature_importances" begin
X, y = @load_iris
atom = DecisionTreeClassifier()
model = TransformedTargetModel(atom, transformer=identity, inverse=identity)
@test reports_feature_importances(model)
fitresult, _, rpt = MMI.fit(model, 0, X, y)
@test Set(first.(feature_importances(model, fitresult, rpt))) == Set(keys(X))
end
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 9069 | module TestData
using Test
#using DataFrames
import TypedTables
using CategoricalArrays
import Tables
using ScientificTypes
using Random
using StableRNGs
import StableRNGs.StableRNG
rng = StableRNG(55511)
import MLJBase
import MLJBase: decoder, int, classes, partition, unpack, selectcols, matrix,
CategoricalValue, selectrows, select, table, nrows, restrict,
corestrict, complement, transform
@testset "partition" begin
train, test = partition(1:100, 0.9)
@test collect(train) == collect(1:90)
@test collect(test) == collect(91:100)
rng = StableRNG(666)
train, test = partition(1:100, 0.9, shuffle=true, rng=rng)
@test length(train) == 90
@test length(test) == 10
@test train[1:8] == [49, 75, 98, 99, 47, 59, 65, 12]
rng = StableRNG(888)
train, test = partition(1:100, 0.9, rng=rng)
rng = StableRNG(888)
train2, test2 = partition(1:100, 0.9, shuffle=true, rng=rng)
@test train2 == train
@test test2 == test
train, test = partition(1:100, 0.9, shuffle=false, rng=1)
@test collect(train) == collect(1:90)
@test collect(test) == collect(91:100)
# Matrix
X = collect(reshape(1:10, 5, 2))
@test partition(X, 0.2, 0.4) == ([1 6], [2 7; 3 8], [4 9; 5 10])
rng = StableRNG(42)
@test partition(X, 0.2, 0.4; shuffle=true, rng=rng) == ([5 10], [3 8; 4 9], [1 6; 2 7])
# Table
rows = Tables.rows((a=collect(1:5), b=collect(6:10)))
@test partition(rows, 0.6, 0.2) ==
((a = [1, 2, 3], b = [6, 7, 8]), (a = [4], b = [9]), (a = [5], b = [10]))
rng = StableRNG(123)
@test partition(rows, 0.6, 0.2; shuffle=true, rng=rng) ==
((a = [3, 1, 5], b = [8, 6, 10]), (a = [2], b = [7]), (a = [4], b = [9]))
# Not a vector/matrix/table
@test_throws MLJBase.ERR_PARTITION_UNSUPPORTED partition(1, 0.4)
# bad `fractions`:
@test_throws DomainError partition(1:10, 1.5)
# with stratification
y = ones(Int, 1000)
y[end-100:end] .= 0; # 90%
train1, test1 =
partition(eachindex(y), 0.8, stratify=categorical(y), rng=34)
train, test = partition(eachindex(y), 0.8, stratify=y, rng=34)
@test train == train1
@test test == test1
@test isapprox(sum(y[train])/length(train), 0.9, rtol=1e-2)
@test isapprox(sum(y[test])/length(test), 0.9, rtol=1e-2)
s1, s2, s3 = partition(eachindex(y), 0.3, 0.6, stratify=y, rng=345)
@test isapprox(sum(y[s1])/length(s1), 0.9, rtol=1e-2)
@test isapprox(sum(y[s2])/length(s2), 0.9, rtol=1e-2)
@test isapprox(sum(y[s3])/length(s3), 0.9, rtol=1e-2)
y = ones(Int, 1000)
y[end-500:end-200] .= 2
y[end-200+1:end] .= 3
p1 = sum(y .== 1) / length(y) # 0.5
p2 = sum(y .== 2) / length(y) # 0.3
p3 = sum(y .== 3) / length(y) # 0.2
s1, s2, s3 = partition(eachindex(y), 0.3, 0.6, stratify=y, rng=111)
# overkill test...
for s in (s1, s2, s3)
for (i, p) in enumerate((p1, p2, p3))
@test isapprox(sum(y[s] .== i)/length(s), p, rtol=1e-2)
end
end
# it should work with missing values though maybe not recommended...
y = ones(Union{Missing,Int}, 1000)
y[end-600:end-550] .= missing
y[end-500:end-200] .= 2
y[end-200+1:end] .= 3
p1 = sum(skipmissing(y) .== 1) / length(y) # 0.45
p2 = sum(skipmissing(y) .== 2) / length(y) # 0.3
p3 = sum(skipmissing(y) .== 3) / length(y) # 0.2
pm = sum(ismissing.(y)) / length(y) # 0.05
s1, s2 = partition(eachindex(y), 0.7, stratify=y, rng=11)
for s in (s1, s2)
for (i, p) in enumerate((p1, p2, p3))
@test isapprox(sum(y[s] .=== i)/length(s), p, rtol=1e-2)
end
@test isapprox(sum(ismissing.(y[s]))/length(s), pm, rtol=1e-1)
end
# test ordering is preserved if no shuffle
s1, s2 = partition(eachindex(y), 0.7, stratify=y)
@test issorted(s1)
@test issorted(s2)
s1, s2 = partition(eachindex(y), 0.7, stratify=y, shuffle=true)
@test !issorted(s1)
@test !issorted(s2)
# parallel partitions:
X = rand(20, 2)
y = rand(20)
Xtrain, Xtest, Xvalid = partition(X, 0.1, 0.3, rng=StableRNG(123))
ytrain, ytest, yvalid = partition(y, 0.1, 0.3, rng=StableRNG(123))
@test partition((X, y), 0.1, 0.3, rng=StableRNG(123), multi=true) ==
((Xtrain, Xtest, Xvalid), (ytrain, ytest, yvalid))
y = rand(21)
@test_throws(MLJBase.ERR_PARTITION_DIMENSION_MISMATCH,
partition((X, y), 0.2, multi=true))
end
@testset "unpack" begin
channing = TypedTables.Table(
Sex = categorical(["Female", "Male", "Female"]),
Entry = Int32[965, 988, 850],
Exit = Int32[1088, 1045, 940],
Time = Int32[123, 57, 90],
Cens = Int32[0, 0, 1],
weights = [1,2,5])
w, y, X, rest = unpack(channing,
==(:weights),
==(:Exit),
x -> x != :Time;
:Exit=>Continuous,
:Entry=>Continuous,
:Cens=>Multiclass)
@test w == selectcols(channing, :weights)
@test y == selectcols(channing, :Exit)
@test X == selectcols(channing, [:Sex, :Entry, :Cens])
@test rest === selectcols(channing, :Time)
@test scitype_union(y) <: Continuous
@test scitype_union(selectcols(X, :Cens)) <: Multiclass
w, y, X = unpack(channing,
==(:weights),
==(:Exit),
x -> x != :Time;
wrap_singles=true,
:Exit=>Continuous,
:Entry=>Continuous,
:Cens=>Multiclass)
@test selectcols(w, 1) == selectcols(channing, :weights)
@test selectcols(y, 1) == selectcols(channing, :Exit)
@test X == selectcols(channing, [:Sex, :Entry, :Cens])
@test_throws(Exception, unpack(channing,
==(:weights),
==(:Exit),
==(:weights),
x -> x != :Time;
:Exit=>Continuous,
:Entry=>Continuous,
:Cens=>Multiclass))
# shuffling:
small = (x=collect(1:5), y = collect("abcde"))
x, y, w = unpack(small, ==(:x), ==(:y); rng=StableRNG(123))
@test x == [3, 1, 5, 2, 4]
@test y == ['c', 'a', 'e', 'b', 'd']
@test isempty(w)
@test unpack(small, ==(:x), ==(:y); shuffle=true, rng=StableRNG(66)) ==
unpack(small, ==(:x), ==(:y); rng=StableRNG(66))
end
@testset "restrict and corestrict" begin
f = ([1], [2, 3], [4, 5, 6, 7], [8, 9, 10])
@test complement(f, 1) == [2, 3, 4, 5, 6, 7, 8, 9, 10]
@test complement(f, 2) == [1, 4, 5, 6, 7, 8, 9, 10]
@test complement(f, 3) == [1, 2, 3, 8, 9, 10]
@test complement(f, 4) == [1, 2, 3, 4, 5, 6, 7]
X = 10:10:100
@test restrict(X, f, 3) == 40:10:70
@test corestrict(X, f, 3) == [10, 20, 30, 80, 90, 100]
end
@testset "coverage" begin
@test_throws DomainError partition(1:10, 1.5)
@test_throws MethodError selectrows(Val(:other), (1,), (1,))
@test_throws MethodError selectcols(Val(:other), (1,), (1,))
@test_throws MethodError select(Val(:other), (1,), (1,), (1,))
@test_throws MethodError nrows(Val(:other), (1,))
nt = (a=5, b=7)
@test MLJBase.project(nt, :) == (a=5, b=7)
@test MLJBase.project(nt, :a) == (a=5, )
@test MLJBase.project(nt, 1) == (a=5, )
X = MLJBase.table((x=[1,2,3], y=[4,5,6]))
@test select(X, 1, :y) == 4
end
@testset "transforming from raw values and categorical values" begin
values = vcat([missing, ], collect("asdfjklqwerpoi"))
Xraw = rand(rng,values, 15, 10)
X = categorical(Xraw)
element = skipmissing(X) |> first
@test transform(element, missing) |> ismissing
raw = first(skipmissing(Xraw))
c = transform(element, raw)
@test Set(classes(c)) == Set(classes(X))
@test c == first(skipmissing(X))
RAW = Xraw[2:end-1,2:end-1]
C = transform(element, RAW)
@test Set(classes(C)) == Set(classes(X))
@test identity.(skipmissing(C)) ==
identity.(skipmissing(X[2:end-1,2:end-1]))
raw = first(skipmissing(Xraw))
c = transform(X, raw)
@test Set(classes(c)) == Set(classes(X))
@test c == first(skipmissing(X))
RAW = Xraw[2:end-1,2:end-1]
C = transform(X, RAW)
@test Set(classes(C)) == Set(classes(X))
@test identity.(skipmissing(C)) ==
identity.(skipmissing(X[2:end-1,2:end-1]))
end
@testset "skipinvalid" begin
w = rand(5)
@test MLJBase.skipinvalid([1, 2, missing, 3, NaN], [missing, 5, 6, 7, 8]) ==
([2, 3], [5, 7])
@test(
MLJBase._skipinvalid([1, 2, missing, 3, NaN],
[missing, 5, 6, 7, 8],
w) ==
([2, 3], [5, 7], w[[2,4]]))
@test(
MLJBase._skipinvalid([1, 2, missing, 3, NaN],
[missing, 5, 6, 7, 8],
nothing) ==
([2, 3], [5, 7], nothing))
end
end # module
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 2572 | module TestDatasets
# using Revise
using Test
using MLJBase
X, y = @load_boston
@test schema(X).names == (:Crim, :Zn, :Indus, :NOx, :Rm, :Age,
:Dis, :Rad, :Tax, :PTRatio, :Black, :LStat)
@test scitype(y) <: AbstractVector{Continuous}
X, y = @load_ames
schema(X).names == (:MSSubClass, :MSZoning, :LotFrontage, :LotArea,
:Street, :LotShape, :LandContour, :LotConfig,
:LandSlope, :Neighborhood, :Condition1,
:Condition2, :BldgType, :HouseStyle,
:OverallQual, :OverallCond, :YearBuilt,
:YearRemodAdd, :RoofStyle, :RoofMatl,
:Exterior1st, :Exterior2nd, :MasVnrType,
:MasVnrArea, :ExterQual, :ExterCond,
:Foundation, :BsmtQual, :BsmtCond, :BsmtExposure,
:BsmtFinType1, :BsmtFinSF1, :BsmtFinType2,
:BsmtFinSF2, :BsmtUnfSF, :TotalBsmtSF, :Heating,
:HeatingQC, :CentralAir, :Electrical, :x1stFlrSF,
:x2ndFlrSF, :LowQualFinSF, :GrLivArea, :BsmtFullBath,
:BsmtHalfBath, :FullBath, :HalfBath,
:BedroomAbvGr, :KitchenAbvGr, :KitchenQual,
:TotRmsAbvGrd, :Functional, :Fireplaces,
:FireplaceQu, :GarageType, :GarageYrBlt,
:GarageFinish, :GarageCars, :GarageArea,
:GarageQual, :GarageCond, :PavedDrive,
:WoodDeckSF, :OpenPorchSF, :EnclosedPorch,
:x3SsnPorch, :ScreenPorch, :PoolArea, :MiscVal,
:MoSold, :YrSold, :SaleType, :SaleCondition)
@test scitype(y) <: AbstractVector{Continuous}
X, y = @load_reduced_ames
schema(X).names == (:OverallQual, :GrLivArea, :Neighborhood,
:x1stFlrSF, :TotalBsmtSF, :BsmtFinSF1, :LotArea,
:GarageCars, :MSSubClass, :GarageArea, :YearRemodAdd,
:YearBuilt)
@test scitype(y) <: AbstractVector{Continuous}
X, y = @load_iris
@test schema(X).names == (:sepal_length, :sepal_width, :petal_length,
:petal_width)
@test scitype(y) <: AbstractVector{<:Multiclass}
X, y = @load_crabs
@test schema(X).names == (:FL, :RW, :CL, :CW, :BD)
@test scitype(y) <: AbstractVector{<:Multiclass}
X, y = @load_smarket
@test schema(X).names == (:Year, :Lag1, :Lag2, :Lag3, :Lag4, :Lag5, :Volume, :Today)
@test scitype(y) == AbstractVector{Multiclass{2}}
X = @load_sunspots
@test schema(X).names == (:sunspot_number, )
end # module
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 3654 | module TestDatasetsSynthetic
using Test
using MLJBase
using Random
using Statistics
using CategoricalArrays
using StableRNGs
@testset "make_blobs" begin
# Standard behaviour
n, p, centers = 110, 2, 3
X, y = make_blobs(n, p; centers=centers)
@test (n, p) == size(MLJBase.matrix(X))
@test n == length(y)
@test centers == length(unique(y))
@test y isa CategoricalVector
# Specific arguments
rng = StableRNG(600)
n, p = 5000, 3
centers = randn(rng,4, p)
stds = [1.0, 2.0, 3.0, 7.0]
X, y = make_blobs(n, p; centers=centers, shuffle=false,
center_box=-5. => 5.,
cluster_std=stds, rng=534,
as_table=false, eltype=Float32)
@test size(X) == (n, p)
@test eltype(X) == Float32
@test isapprox(std((X[y .== 1, :])), 1.0, rtol=0.2) # roughly 1
@test isapprox(std((X[y .== 4, :])), 7.0, rtol=0.2) # roughly 7
# Errors
@test_throws ArgumentError make_blobs(0, 0)
@test_throws ArgumentError make_blobs(;center_box=5=>2)
@test_throws ArgumentError make_blobs(n, p; centers=randn(rng,4, p+1))
@test_throws ArgumentError make_blobs(n, p; centers=3, cluster_std=[1,1])
@test_throws ArgumentError make_blobs(n, p; centers=2, cluster_std=[0,1])
end
@testset "make_circles" begin
n = 55
X, y = make_circles(n)
@test (n, 2) == size(MLJBase.matrix(X))
@test n == length(y)
@test 2 == length(unique(y))
@test y isa CategoricalVector
# specific arguments
X, y = make_circles(150; shuffle=false, noise=0.01, factor=0.2,
rng=55, as_table=false, eltype=Float32)
@test eltype(X) == Float32
rs = sqrt.(sum(X.^2, dims=2))
@test all(0.15 .< rs[y.==0] .< 0.25)
@test all(0.95 .< rs[y.==1] .< 1.05)
# Errors
@test_throws ArgumentError make_circles(-1)
@test_throws ArgumentError make_circles(; noise=-1)
@test_throws ArgumentError make_circles(; factor=5)
@test_throws ArgumentError make_circles(; factor=0)
end
@testset "make_moons" begin
n = 55
X, y = make_moons(n)
@test (n, 2) == size(MLJBase.matrix(X))
@test n == length(y)
@test 2 == length(unique(y))
# specific arguments
X, y = make_moons(50; shuffle=false, noise=0.5, xshift=0.3, yshift=0.2,
rng=455, as_table=false, eltype=Float32)
@test length(unique(y)) == 2
@test eltype(X) == Float32
# Errors
@test_throws ArgumentError make_moons(-1)
@test_throws ArgumentError make_moons(noise=-1)
end
@testset "make_regression" begin
n, p = 100, 5
X, y = make_regression(n, p)
Xm = MLJBase.matrix(X)
@test size(Xm) == (n, p)
@test length(y) == n
# specific arguments end
X, y = make_regression(150, 3; intercept=false, sparse=0.3, noise=0.5,
outliers=0.5, as_table=false,
eltype=Float32, rng=155)
@test eltype(X) == Float32
@test size(X) == (150, 3)
@test length(y) == 150
# binary
X, y = make_regression(150, 3; binary=true)
@test y isa CategoricalVector
# errors
@test_throws ArgumentError make_regression(-5, 2)
@test_throws ArgumentError make_regression(2, -2)
@test_throws ArgumentError make_regression(noise=-1)
@test_throws ArgumentError make_regression(sparse=-1)
@test_throws ArgumentError make_regression(outliers=-1)
X, y = make_regression(n, p; n_targets = 4)
@test MLJBase.Tables.istable(y) == true
@test MLJBase.Tables.columnnames(y) == [:target1, :target2, :target3, :target4]
end
end # module
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 8556 | module TestOneDimensionalRangeIterators
using Test
using MLJBase
using Random
import Distributions
using Statistics
using StableRNGs
rng = StableRNG(66600099)
stable_rng() = StableRNG(123)
const Dist = Distributions
mutable struct DummyModel <: Deterministic
K::Int
metric::Float64
kernel::Char
end
dummy_model = DummyModel(4, 9.5, 'k')
mutable struct SuperModel <: Deterministic
lambda::Float64
model1::DummyModel
model2::DummyModel
end
dummy1 = DummyModel(1, 9.5, 'k')
dummy2 = DummyModel(2, 9.5, 'k')
super_model = SuperModel(0.5, dummy1, dummy2)
z1 = range(dummy_model, :K, lower=1, upper=10)
z2 = range(dummy_model, :K, lower=10, origin=10^6, unit=10^5)
z3 = range(dummy_model, :K, upper=-10, origin=-10^6, unit=10^5)
z4 = range(super_model, :lambda, lower=1, upper=10)
z5 = range(dummy_model, :K, origin=10, unit=20)
p1 = range(dummy_model, :K, lower=1, upper=10, scale=:log10)
p2 = range(dummy_model, :kernel, values=['c', 'd'])
p3 = range(super_model, :lambda, lower=0.1, upper=1, scale=:log2)
p4 = range(dummy_model, :K, lower=1, upper=3, scale=x->2x)
[p4, p4]
# Starting from julia v"1.7.0-DEV.1233", the default RNG has changed
# create a function giving julia version dependent default RNG with seed.
@static if VERSION >= v"1.7.0-DEV.1230"
_default_rng(seed) = (rng = TaskLocalRNG(); Random.seed!(rng, seed))
else
_default_rng(seed) = MersenneTwister(seed)
end
@testset "scale transformations" begin
@test transform(MLJBase.Scale, scale(:log), ℯ) == 1
@test inverse_transform(MLJBase.Scale, scale(:log), 1) == float(ℯ)
end
@testset "inspecting scales of ranges" begin
@test scale(p1) == :log10
@test scale(p2) == :none
@test scale(p3) == :log2
@test scale(p4) == :custom
@test scale(sin) === sin
end
@testset "iterators" begin
@test iterator(p1, 5) == [1, 2, 3, 6, 10]
@test iterator(p2) == collect(p2.values)
u = 2^(log2(0.1)/2)
@test iterator(p3, 3) ≈ [0.1, u, 1]
@test iterator(p4, 3) == [2, 4, 6]
# semi-unbounded ranges:
v = Int.(round.(exp.([(1-t)*log(10) + t*log(10+2e5)
for t in 0:(1/3):1]))) |> unique
@test iterator(z2, 4) == v
@test iterator(z3, 4) == reverse(-v)
# doubly-unbounded ranges:
@test iterator(z5, 4) ==
iterator(range(Int, :foo, lower=-10, upper=30), 4)
# truncated nominal range iteration:
rr = range(Char, :foo, values=collect("abcdefg"))
@test iterator(rr, 3) == ['a', 'b', 'c']
# random:
rng = StableRNG(66); @test iterator(rng, p1, 5) == [10, 2, 3, 6, 1]
rng = StableRNG(22); @test iterator(rng, p1, 5) != [10, 2, 3, 6, 1]
rng = StableRNG(33); @test iterator(rng, rr) == ['b', 'c', 'a', 'g',
'f', 'd', 'e']
rng = StableRNG(44); @test iterator(rng, rr) != ['b', 'c', 'a', 'g',
'f', 'd', 'e']
rng = StableRNG(88); @test iterator(rng, rr, 3) == ['b', 'c', 'a']
rng = StableRNG(99); @test iterator(rng, rr, 3) != ['a', 'c', 'b']
# with callable as scale:
r = range(Int, :dummy, lower=1, upper=2, scale=x->10^x)
expecting = map(x->round(Int,10^x), range(1, stop= 2, length=10))
@test iterator(r, 10) == expecting
end
@testset "fitting distributions to NumericRange objects" begin
# characterizations
l = rand(rng)
u = max(l, rand(rng)) + 1
r = range(Int, :dummy, lower=l, upper=u)
for D in [:Arcsine, :Uniform, :Biweight, :Cosine, :Epanechnikov,
:SymTriangularDist, :Triweight]
eval(quote
d = Dist.fit(Dist.$D, $r)
@test minimum(d) ≈ $l
@test maximum(d) ≈ $u
end
)
end
o = randn(rng)
s = rand(rng)
r = range(Int, :dummy, lower=-Inf, upper=Inf, origin=o, unit=s)
for D in [:Cauchy, :Gumbel, :Normal, :Laplace]
eval(quote
d = Dist.fit(Dist.$D, $r)
@test Dist.location(d) ≈ $o
@test Dist.scale(d) ≈ $s
end
)
end
o = rand(rng)
s = o/(1 + rand(rng))
r = range(Int, :dummy, lower=-Inf, upper=Inf, origin=o, unit=s)
for D in [:Normal, :Gamma, :InverseGaussian, :LogNormal, :Logistic]
eval(quote
d = Dist.fit(Dist.$D, $r)
@test mean(d) ≈ $o
@test std(d) ≈ $s
end
)
end
r = range(Float64, :dummy, lower=-Inf, upper=Inf, unit=s, origin=o,)
d = Dist.fit(Dist.Poisson, r)
@test mean(d) ≈ s
# truncation
r = range(Int, :dummy, lower=l, upper=u)
d = Dist.fit(Dist.Normal, r)
@test minimum(d) == l
@test maximum(d) == u
# unsupported distributions
@test_throws ArgumentError Dist.fit(Dist.Beta, r)
end
@testset "NumericSampler - distribution instance specified" begin
@testset "integers" begin
r = range(Int, :dummy, lower=11, upper=13)
d = Dist.Uniform(1, 20)
s = MLJBase.sampler(r, d)
rng = StableRNG(0)
dict = Dist.countmap(rand(rng, s, 1000))
eleven, twelve, thirteen = map(x -> dict[x], 11:13)
@test eleven == 252 && twelve == 514 && thirteen == 234
rng = StableRNG(999);
dict = Dist.countmap(rand(rng, s, 1000))
eleven, twelve, thirteen = map(x -> dict[x], 11:13)
@test eleven == 236 && twelve == 494 && thirteen == 270
end
@testset "right-unbounded floats" begin
r = range(Float64, :dummy, lower=0.2, upper = Inf,
origin=5, unit=1) # origin and unit not relevant here
s = MLJBase.sampler(r, Dist.Normal())
rng = stable_rng()
v = rand(rng, s, 1000)
@test all(x >= 0.2 for x in v)
@test abs(minimum(v)/0.2 - 1) <= 0.02
rng = stable_rng()
@test rand(rng, s, 1000) == v
q = quantile(v, 0.0:0.1:1.0)
v2 = filter(x -> x>=0.2, rand(stable_rng(), Dist.Normal(), 3000))[1:1000]
q2 = quantile(v2, 0.0:0.1:1.0)
@test all(x -> x≈1.0, q ./ q2)
end
@testset "sampler using callable scale" begin
r = range(Int, :dummy, lower=1, upper=2, scale=x->10^x)
s = sampler(r, Dist.Uniform)
Random.seed!(123)
v = rand(s, 10000)
@test issubset(v, 10:100)
rng = _default_rng(123)
@test rand(rng, s, 10000) == v
r = range(Float64, :dummy, lower=1, upper=2, scale=x->10^x)
s = sampler(r, Dist.Uniform)
Random.seed!(1)
v = rand(s, 10000)
@test abs(minimum(v) - 10) < 0.02
@test abs(maximum(v) - 100) < 0.02
rng = _default_rng(1)
@test rand(rng, s, 10000) == v
end
end
@testset "NumericSampler - distribution type specified" begin
r = range(Int, :k, lower=2, upper=6, origin=4.5, unit=1.2)
s = MLJBase.sampler(r, Dist.Normal)
v1 = rand(MersenneTwister(1), s, 50)
d = Dist.truncated(Dist.Normal(r.origin, r.unit), r.lower, r.upper)
v2 = map(x -> round(Int, x), rand(MersenneTwister(1), d, 50))
@test v1 == v2
end
@testset "NominalSampler" begin
r = range(Char, :(model.dummy), values=collect("cab"))
N = 10000
# to compute half-width of 95% confidence intervals, for counts of a Bernoulli process
# with probability `p`, sampled `N` times:
halfwidth(p, N) = 1.96*sqrt(p*(1 - p))*sqrt(N)
@testset "probability vector specified" begin
p = Dict('c'=>0.1, 'a'=>0.2, 'b'=>0.7)
rng = StableRNG(660)
s = MLJBase.sampler(r, [p[class] for class in "cab"])
counts = Dist.countmap(rand(rng,s, N))
for class in "abc"
μ = p[class]*N
@test abs(counts[class] - μ) < halfwidth(p[class], N)
end
end
@testset "probability vector unspecified (uniform)" begin
s = MLJBase.sampler(r)
rng = StableRNG(660)
counts = Dist.countmap(rand(rng,s, N))
for class in "abc"
μ = N/3
@test abs(counts[class] - μ) < halfwidth(1/3, N)
end
end
end
struct MySampler end
Base.rand(rng::AbstractRNG, ::MySampler) = rand(rng)
@testset "scale(s) for s a sampler" begin
@test scale(MySampler()) == :none
r = range(Char, :(model.dummy), values=collect("cab"))
@test scale(MLJBase.sampler(r, [0.1, 0.2, 0.7])) == :none
r1 = range(Int, :dummy, lower=1, upper=2, scale=x->10^x)
@test scale(MLJBase.sampler(r1, Dist.Uniform)) == :custom
r2 = range(Int, :k, lower=2, upper=6, origin=4.5, unit=1.2, scale=:log2)
@test scale(MLJBase.sampler(r2, Dist.Normal)) == :log2
end
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 4546 | module TestOneDimensionalRanges
using Test
using MLJBase
mutable struct DummyModel <: Deterministic
K::Int
metric::Float64
kernel::Char
end
dummy_model = DummyModel(4, 9.5, 'k')
mutable struct AnyModel <: Deterministic
any
end
mutable struct SuperModel <: Deterministic
lambda::Float64
model1::DummyModel
model2::DummyModel
end
dummy1 = DummyModel(1, 9.5, 'k')
dummy2 = DummyModel(2, 9.5, 'k')
any1 = AnyModel(1)
super_model = SuperModel(0.5, dummy1, dummy2)
@testset "constructors" begin
@test_throws ArgumentError range(dummy_model, :K, lower=Inf,
origin=1, unit=1)
@test_throws ArgumentError range(dummy_model, :K, upper=-Inf,
origin=1, unit=1)
@test_throws DomainError range(dummy_model, :K, lower=1)
@test_throws DomainError range(dummy_model, :K, lower=1, upper=Inf)
@test_throws DomainError range(dummy_model, :K, upper=1)
@test_throws DomainError range(dummy_model, :K, upper=1, lower=-Inf)
@test_throws DomainError range(dummy_model, :K, lower=1, origin=2)
@test_throws DomainError range(dummy_model, :K, lower=1, upper=Inf,
origin=2)
@test_throws DomainError range(dummy_model, :K, upper=1, origin=2)
@test_throws DomainError range(dummy_model, :K, upper=1, lower=-Inf,
origin=2)
@test_throws DomainError range(dummy_model, :K,
lower=3, unit=0, origin=4)
@test_throws DomainError range(dummy_model, :K,
lower=3, unit=1, origin=2)
@test_throws DomainError range(dummy_model, :K, origin=2)
@test_throws DomainError range(dummy_model, :K, unit=1)
@test_throws ArgumentError range(dummy_model, :kernel)
@test_throws ArgumentError range(dummy_model, :K, values=['c', 'd'])
@test_throws ArgumentError range(Int, :K, values=['c', 'd'])
@test range(dummy_model, :K, values=[1, 7]) ==
range(Int, :K, values=[1, 7])
z1 = range(dummy_model, :K, lower=1, upper=10)
@test z1.origin == 5.5
@test z1.unit == 4.5
@test z1.scale == :linear
z2 = range(dummy_model, :K, lower=10, origin=10^6, unit=10^5)
@test z2.origin == 10^6
@test z2.unit == 10^5
@test z2.upper == Inf
@test z2.scale == :log10
z3 = range(dummy_model, :K, upper=-10, origin=-10^6, unit=10^5)
@test z3.origin == -10^6
@test z3.unit == 10^5
@test z3.lower == -Inf
@test z3.scale == :log10minus
z4 = range(super_model, :lambda, lower=1, upper=10)
@test z4.origin == 5.5
@test z4.unit == 4.5
@test z4.scale == :linear
z5 = range(dummy_model, :K, origin=10, unit=20)
@test z5.scale == :linear
p1 = range(dummy_model, :K, lower=1, upper=10, scale=:log10)
p2 = range(dummy_model, :kernel, values=['c', 'd'])
p3 = range(super_model, :lambda, lower=0.1, upper=1, scale=:log2)
p4 = range(dummy_model, :K, lower=1, upper=3, scale=x->2x)
# test that you can replace model with type:
@test z1 == range(Int, :K, lower=1, upper=10)
@test z4 == range(Float64, :lambda, lower=1, upper=10)
@test p2 == range(Char, :kernel, values=['c', 'd'])
end
@testset "range constructors for nested parameters" begin
p1 = range(dummy_model, :K, lower=1, upper=10, scale=:log10)
q1 = range(super_model, :(model1.K) , lower=1, upper=10, scale=:log10)
@test iterator(q1, 5) == iterator(p1, 5)
q2 = range
end
@testset "warnings and errors" begin
# unambiguous union should work
@test range(Union{Nothing, Float64}, :any, lower=1, upper=10) ==
range(Float64, :any, lower=1, upper=10)
# ambiguous union should fail
@test_throws(MLJBase.ERROR_AMBIGUOUS_UNION,
range(Union{Float32, Float64}, :any, lower=1, upper=10))
# untyped parameters should warn if inferred
@test_logs((:warn, MLJBase.WARN_INFERRING_TYPE),
range(any1, :any, lower=1, upper=10))
end
@testset "coverage" begin
io = IOBuffer()
r1 = range(Int, :junk, lower=1, upper=10)
r2 = range(Char, :junk, values=['c', 'd'])
r3 = range(Float64, :junk, lower=3.14159, upper=6.283185)
show(io, r1)
@test String(take!(io)) == "NumericRange(1 ≤ junk ≤ 10; origin=5.5, unit=4.5)"
show(io, r2)
@test String(take!(io)) == "NominalRange(junk = c, d)"
show(io, r3)
@test String(take!(io)) == "NumericRange(3.142 ≤ junk ≤ 6.283; origin=4.712, unit=1.571)"
close(io)
end
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 6175 | import DataFrames
rng = StableRNGs.StableRNG(123)
@testset "categorical" begin
x = 1:5
@test MLJModelInterface.categorical(x) == categorical(x)
end
@testset "classes" begin
v = categorical(collect("asqfasqffqsaaaa"), ordered=true)
@test classes(v[1]) == levels(v)
@test classes(v) == levels(v)
levels!(v, reverse(levels(v)))
@test classes(v[1]) == levels(v)
@test classes(v) == levels(v)
end
@testset "MLJModelInterface.scitype overload" begin
ST = MLJBase.ScientificTypes
x = rand(Int, 3)
y = rand(Int, 2, 3)
z = rand(3)
a = rand(4, 3)
b = categorical(["a", "b", "c"])
c = categorical(["a", "b", "c"]; ordered=true)
X = (x1=x, x2=z, x3=b, x4=c)
@test MLJModelInterface.scitype(x) == ST.scitype(x)
@test MLJModelInterface.scitype(y) == ST.scitype(y)
@test MLJModelInterface.scitype(z) == ST.scitype(z)
@test MLJModelInterface.scitype(a) == ST.scitype(a)
@test MLJModelInterface.scitype(b) == ST.scitype(b)
@test MLJModelInterface.scitype(c) == ST.scitype(c)
@test MLJModelInterface.scitype(X) == ST.scitype(X)
end
@testset "MLJModelInterface.schema overload" begin
ST = MLJBase.ScientificTypes
x = rand(Int, 3)
z = rand(3)
b = categorical(["a", "b", "c"])
c = categorical(["a", "b", "c"]; ordered=true)
X = (x1=x, x2=z, x3=b, x4=c)
@test_throws ArgumentError MLJModelInterface.schema(x)
@test MLJModelInterface.schema(X) == ST.schema(X)
end
@testset "int, classes, decoder" begin
N = 10
mix = shuffle(rng, 0:N - 1)
Xraw = broadcast(x->mod(x, N), rand(rng, Int, 2N, 3N))
Yraw = string.(Xraw)
# to turn a categ matrix into a ordinary array with categorical
# elements. Needed because broacasting the identity gives a
# categorical array in CategoricalArrays >0.5.2
function matrix_(X)
ret = Array{Any}(undef, size(X))
for i in eachindex(X)
ret[i] = X[i]
end
return ret
end
X = categorical(Xraw)
x = X[1]
Y = categorical(Yraw)
y = Y[1]
V = matrix_(X)
W = matrix_(Y)
# raw(x::MLJBase.CategoricalValue) = x.pool.index[x.level]
# @test raw.(classes(xo)) == xo.pool.levels
# @test raw.(classes(yo)) == yo.pool.levels
# # getting all possible elements from one:
# @test raw.(X) == Xraw
# @test raw.(Y) == Yraw
# @test raw.(classes(xo)) == levels(Xo)
# @test raw.(classes(yo)) == levels(Yo)
# broadcasted encoding:
@test int(X) == int(V)
@test int(Y) == int(W)
@test int(X; type=Int8) isa AbstractArray{Int8}
# encoding is right-inverse to decoding:
d = decoder(x)
@test d(int(V)) == V # ie have the same elements
e = decoder(y)
@test e(int(W)) == W
@test int(classes(y)) == 1:length(classes(x))
# int is based on ordering not index
v = categorical(['a', 'b', 'c'], ordered=true)
@test int(v) == 1:3
levels!(v, ['c', 'a', 'b'])
@test int(v) == [2, 3, 1]
# Errors
@test_throws DomainError int("g")
end
@testset "matrix, table" begin
B = rand(UInt8, (4, 5))
names = Tuple(Symbol("x$i") for i in 1:size(B,2))
tup =NamedTuple{names}(Tuple(B[:,i] for i in 1:size(B,2)))
@test matrix(Tables.rowtable(tup)) == B
@test matrix(table(B)) == B
@test matrix(table(B), transpose=true) == B'
X = (x1=rand(rng, 5), x2=rand(rng, 5))
@test table(X, prototype=Tables.rowtable((x1=[], x2=[]))) ==
Tables.rowtable(X)
T = table((x1=(1,2,3), x2=(:x, :y, :z)))
@test selectcols(T, :x1) == [1, 2, 3]
v = categorical(11:20)
A = hcat(v, v)
tab = table(A)
@test selectcols(tab, 1) == v
@test matrix(B) == B
@test matrix(B, transpose=true) == permutedims(B)
end
@testset "select etc" begin
N = 10
A = broadcast(x->Char(65 + mod(x, 5)), rand(rng, Int, N, 5))
X = CategoricalArrays.categorical(A)
names = Tuple(Symbol("x$i") for i in 1:size(A,2))
tup = NamedTuple{names}(Tuple(A[:,i] for i in 1:size(A, 2)))
nt = (tup..., z = 1:N)
tt = TypedTables.Table(nt)
rt = Tables.rowtable(tt)
ct = Tables.columntable(tt)
@test selectcols(nothing, 4:6) === nothing
@test selectrows(tt, 1) == selectrows(tt[1:1], :)
@test MLJBase.select(nothing, 2, :x) === nothing
s = schema(tt)
@test nrows(tt) == N
@test selectcols(tt, 4:6) ==
selectcols(TypedTables.Table(x4=tt.x4, x5=tt.x5, z=tt.z), :)
@test selectcols(tt, [:x1, :z]) ==
selectcols(TypedTables.Table(x1=tt.x1, z=tt.z), :)
@test selectcols(tt, :x2) == tt.x2
@test selectcols(tt, 2) == tt.x2
@test selectrows(tt, 4:6) == selectrows(tt[4:6], :)
@test nrows(tt) == N
@test MLJBase.select(tt, 2, :x2) == tt.x2[2]
@test selectrows(rt, 4:6) == rt[4:6]
@test selectrows(rt, :) == rt
@test selectrows(rt, 5) == rt[5,:]
@test nrows(rt) == N
@test Tables.rowtable(selectrows(ct, 4:6)) == rt[4:6]
@test selectrows(ct, :) == ct
@test Tables.rowtable(selectrows(ct, 5))[1] == rt[5,1]
# vector accessors
v = rand(rng, Int, 4)
@test selectrows(v, 2:3) == v[2:3]
@test selectrows(v, 2) == [v[2]]
@test nrows(v) == 4
v = categorical(collect("asdfasdf"))
@test selectrows(v, 2:3) == v[2:3]
@test selectrows(v, 2) == [v[2]]
@test nrows(v) == 8
# matrix accessors
A = rand(rng, 5, 10)
@test selectrows(A, 2:4) == A[2:4,:]
@test selectrows(A, 2:4) == A[2:4,:]
@test selectrows(A, 2) == A[2:2,:]
A = rand(rng, 5, 10) |> categorical
@test selectrows(A, 2:4) == A[2:4,:]
@test selectrows(A, 2:4) == A[2:4,:]
@test selectrows(A, 2) == A[2:2,:]
@test nrows(A) == 5
# TypedTables
v = categorical(collect("asdfasdf"))
tt = TypedTables.Table(v=v, w=v)
@test selectcols(tt, :w) == v
end
# https://github.com/JuliaAI/MLJBase.jl/issues/784
@testset "typename and dataframes" begin
df = DataFrames.DataFrame(x=[1,2,3], y=[2,3,4], z=[4,5,6])
@test MLJBase.typename(df) == "AbstractDataFrame"
@test MLJBase.isdataframe(df)
@test selectrows(df, 2:3) == df[2:3, :]
@test selectcols(df, [:x, :z]) == df[!, [:x, :z]]
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 257 | module Interface
using Test, Random, MLJBase,
Tables, CategoricalArrays,
OrderedCollections,
TypedTables, MLJModelInterface,
StableRNGs
using ..Models
rng = StableRNG(1551234)
include("data_utils.jl")
include("model_api.jl")
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | code | 2471 | module TestModelAPI
using Test
using MLJBase
using StatisticalMeasures
import MLJModelInterface
using ..Models
using Distributions
using StableRNGs
rng = StableRNG(661)
@testset "predict_*" begin
X = rand(rng, 5)
yfinite = categorical(collect("abaaa"))
ycont = float.(1:5)
clf = ConstantClassifier()
fitresult, _, _ = MLJBase.fit(clf, 1, X, yfinite)
@test predict_mode(clf, fitresult, X)[1] == 'a'
@test_throws(MLJBase.err_wrong_target_scitype(MLJBase.Finite),
predict_mean(clf, fitresult, X))
@test_throws(MLJBase.err_wrong_target_scitype(MLJBase.Finite),
predict_median(clf, fitresult, X))
rgs = ConstantRegressor()
fitresult, _, _ = MLJBase.fit(rgs, 1, X, ycont)
@test predict_mean(rgs, fitresult, X)[1] == 3
@test predict_median(rgs, fitresult, X)[1] == 3
@test predict_mode(rgs, fitresult, X)[1] == 3
end
mutable struct UnivariateFiniteFitter <: MLJModelInterface.Probabilistic
alpha::Float64
end
UnivariateFiniteFitter(;alpha=1.0) = UnivariateFiniteFitter(alpha)
@testset "models that fit a distribution" begin
function MLJModelInterface.fit(model::UnivariateFiniteFitter,
verbosity, X, y)
α = model.alpha
N = length(y)
_classes = classes(y)
d = length(_classes)
frequency_given_class = Distributions.countmap(y)
prob_given_class =
Dict(c => (frequency_given_class[c] + α)/(N + α*d) for c in _classes)
fitresult = MLJBase.UnivariateFinite(prob_given_class)
report = (params=Distributions.params(fitresult),)
cache = nothing
verbosity > 0 && @info "Fitted a $fitresult"
return fitresult, cache, report
end
MLJModelInterface.predict(model::UnivariateFiniteFitter,
fitresult,
X) = fitresult
MLJModelInterface.input_scitype(::Type{<:UnivariateFiniteFitter}) =
Nothing
MLJModelInterface.target_scitype(::Type{<:UnivariateFiniteFitter}) =
AbstractVector{<:Finite}
y = coerce(collect("aabbccaa"), Multiclass)
X = nothing
model = UnivariateFiniteFitter(alpha=0)
mach = machine(model, X, y)
fit!(mach, verbosity=0)
ytest = y[1:3]
yhat = predict(mach, nothing) # single UnivariateFinite distribution
@test cross_entropy(fill(yhat, 3), ytest) ≈
mean([-log(1/2), -log(1/2), -log(1/4)])
end
end
true
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | docs | 2782 | ## MLJBase
Repository for developers that provides core functionality for the
[MLJ](https://github.com/JuliaAI/MLJ.jl) machine
learning framework.
| Branch | Julia | Build | Coverage |
| -------- | ----- | ------ | -------- |
| `master` | v1 | [![Continuous Integration (CPU)][gha-img-master]][gha-url] | [![Code Coverage][codecov-img-master]][codecov-url-master] |
| `dev` | v1 | [![Continuous Integration (CPU)][gha-img-dev]][gha-url] | [![Code Coverage][codecov-img-dev]][codecov-url-dev] |
[gha-img-master]: https://github.com/JuliaAI/MLJBase.jl/workflows/CI/badge.svg?branch=master "Continuous Integration (CPU)"
[gha-img-dev]: https://github.com/JuliaAI/MLJBase.jl/workflows/CI/badge.svg?branch=dev "Continuous Integration (CPU)"
[gha-url]: https://github.com/JuliaAI/MLJBase.jl/actions/workflows/ci.yml
[codecov-img-master]: https://codecov.io/gh/JuliaAI/MLJBase.jl/branch/master/graphs/badge.svg?branch=master "Code Coverage"
[codecov-img-dev]: https://codecov.io/gh/JuliaAI/MLJBase.jl/branch/dev/graphs/badge.svg?branch=dev "Code Coverage"
[codecov-url-master]: https://codecov.io/github/JuliaAI/MLJBase.jl?branch=master
[codecov-url-dev]: https://codecov.io/github/JuliaAI/MLJBase.jl?branch=dev
[](https://juliaai.github.io/MLJBase.jl/stable/)
[MLJ](https://github.com/JuliaAI/MLJ.jl) is a Julia
framework for combining and tuning machine learning models. This
repository provides core functionality for MLJ, including:
- completing the functionality for methods defined "minimally" in
MLJ's light-weight model interface
[MLJModelInterface](https://github.com/JuliaAI/MLJModelInterface.jl) (/src/interface)
- definition of **machines** and their associated methods, such as
`fit!` and `predict`/`transform` (src/machines).
- MLJ's **model composition** interface, including **learning
networks**, **pipelines**, **stacks**, **target transforms** (/src/composition)
- basic utilities for **manipulating datasets** and for **synthesizing datasets** (src/data)
- a [small
interface](https://JuliaAI.github.io/MLJ.jl/dev/evaluating_model_performance/#Custom-resampling-strategies-1)
for **resampling strategies** and implementations, including `CV()`, `StratifiedCV` and
`Holdout` (src/resampling.jl). Actual performance evaluation measures (aka metrics), which previously
were provided by MLJBase.jl, now live in [StatisticalMeasures.jl](https://juliaai.github.io/StatisticalMeasures.jl/dev/).
- methods for **performance evaluation**, based on those resampling strategies (src/resampling.jl)
- **one-dimensional hyperparameter range types**, constructors and
associated methods, for use with
[MLJTuning](https://github.com/JuliaAI/MLJTuning.jl) (src/hyperparam)
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | docs | 318 | # Composition
## Composites
```@autodocs
Modules = [MLJBase]
Pages = ["composition/composites.jl"]
```
## Networks
```@autodocs
Modules = [MLJBase]
Pages = ["composition/networks.jl"]
```
## Pipelines
```@autodocs
Modules = [MLJBase]
Pages = ["composition/pipeline_static.jl", "composition/pipelines.jl"]
```
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | docs | 996 | # Datasets
```@index
Pages = ["data/datasets_synthetic.jl"]
```
## Standard datasets
To add a new dataset assuming it has a header and is, at path
`data/newdataset.csv`
Start by loading it with CSV:
```julia
fpath = joinpath("datadir", "newdataset.csv")
data = CSV.read(fpath, copycols=true,
categorical=true)
```
Load it with DelimitedFiles and Tables
```julia
data_raw, data_header = readdlm(fpath, ',', header=true)
data_table = Tables.table(data_raw; header=Symbol.(vec(data_header)))
```
Retrieve the conversions:
```julia
for (n, st) in zip(names(data), scitype_union.(eachcol(data)))
println(":$n=>$st,")
end
```
Copy and paste the result in a coerce
```julia
data_table = coerce(data_table, ...)
```
```@autodocs
Modules = [MLJBase]
Pages = ["data/datasets.jl"]
```
## Synthetic datasets
```@autodocs
Modules = [MLJBase]
Pages = ["data/datasets_synthetic.jl"]
```
## Utility functions
```@autodocs
Modules = [MLJBase]
Pages = ["data/data.jl"]
```
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | docs | 378 | # Distributions
## Univariate Finite Distribution
```@autodocs
Modules = [MLJBase]
Pages = ["interface/univariate_finite.jl"]
```
## hyperparameters
```@autodocs
Modules = [MLJBase]
Pages = ["hyperparam/one_dimensional_range_methods.jl", "hyperparam/one_dimensional_ranges.jl"]
```
## Utility functions
```@autodocs
Modules = [MLJBase]
Pages = ["distributions.jl"]
```
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | docs | 259 | # MLJBase.jl
These docs are bare-bones and auto-generated. Complete MLJ
documentation is
[here](https://JuliaAI.github.io/MLJ.jl/dev/).
For MLJBase-specific developer information, see also the [README.md
file](https://github.com/JuliaAI/MLJBase.jl#readme).
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | docs | 79 | # Resampling
```@autodocs
Modules = [MLJBase]
Pages = ["resampling.jl"]
```
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT"
] | 1.7.0 | 6f45e12073bc2f2e73ed0473391db38c31e879c9 | docs | 342 | # Utilities
## Machines
```@autodocs
Modules = [MLJBase]
Pages = ["machines.jl"]
```
## Parameter Inspection
```@autodocs
Modules = [MLJBase]
Pages = ["parameter_inspection.jl"]
```
## Show
```@autodocs
Modules = [MLJBase]
Pages = ["show.jl"]
```
## Utility functions
```@autodocs
Modules = [MLJBase]
Pages = ["utilities.jl"]
```
| MLJBase | https://github.com/JuliaAI/MLJBase.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | code | 825 |
module JSONWebTokens
#=
Algorithms
HS256 HMAC using SHA-256 hash algorithm
HS384 HMAC using SHA-384 hash algorithm
HS512 HMAC using SHA-512 hash algorithm
RS256 RSASSA using SHA-256 hash algorithm
RS384 RSASSA using SHA-384 hash algorithm
RS512 RSASSA using SHA-512 hash algorithm
ES256 ECDSA using P-256 curve and SHA-256 hash algorithm
ES384 ECDSA using P-384 curve and SHA-384 hash algorithm
ES512 ECDSA using P-521 curve and SHA-512 hash algorithm
none No digital signature or MAC value included
=#
import JSON, SHA, MbedTLS
import Base64
using Random
abstract type Encoding end
function alg end
Base.show(io::IO, encoding::Encoding) = print(io, alg(encoding))
include("errors.jl")
include("base64url/Base64URL.jl")
include("jws.jl")
include("none.jl")
include("hs.jl")
include("rs.jl")
end
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | code | 448 |
struct InvalidSignatureError <: Exception
end
struct MalformedJWTError <: Exception
msg::String
end
struct NotSupportedJWTError <: Exception
msg::String
end
Base.showerror(io::IO, e::InvalidSignatureError) = print(io, "Signature verification failed.")
Base.showerror(io::IO, e::MalformedJWTError) = print(io, "Malformed JWT: $(e.msg).")
Base.showerror(io::IO, e::NotSupportedJWTError) = print(io, "JWT format not supported: $(e.msg).")
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | code | 905 |
struct HS{bits} <: Encoding
key::Vector{UInt8}
end
"HMAC SHA-256"
const HS256 = HS{256}
"HMAC using SHA-384"
const HS384 = HS{384}
"HMAC using SHA-512"
const HS512 = HS{512}
to_byte_array(str::AbstractString) = Vector{UInt8}(Base.CodeUnits(str))
HS{bits}(key::AbstractString) where {bits} = HS{bits}(to_byte_array(key))
alg(::HS{bits}) where {bits} = "HS$(bits)"
function has_valid_signature(encoding::HS,
header_and_claims_encoded::AbstractString,
signature_encoded::AbstractString) :: Bool
return signature_encoded == sign(encoding, header_and_claims_encoded)
end
sign(encoding::HS256, data::AbstractString) = base64url_encode(SHA.hmac_sha2_256(encoding.key, data))
sign(encoding::HS384, data::AbstractString) = base64url_encode(SHA.hmac_sha2_384(encoding.key, data))
sign(encoding::HS512, data::AbstractString) = base64url_encode(SHA.hmac_sha2_512(encoding.key, data))
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | code | 4344 |
function base64url_encode(s) :: String
encoded_str = Base64URL.base64urlencode(s)
# removes trailing padding
for i in lastindex(encoded_str):-1:1 # encoded_str is made of ASCII chars only
if encoded_str[i] == '='
continue
else
return encoded_str[1:i]
end
end
end
function base64url_decode(s::AbstractString)
@assert isascii(s)
# adds padding back
r = rem(lastindex(s), 4)
if r != 0
for i in 1:(4 - r)
s *= "="
end
end
return Base64URL.base64urldecode(s)
end
find_dots(str::AbstractString) = findall(x -> x == '.', str)
function jws_split(str::AbstractString)
dot_indexes = find_dots(str)
if isempty(dot_indexes)
throw(MalformedJWTError("JWT must contain at least one '.' character."))
elseif length(dot_indexes) > 2
throw(NotSupportedJWTError("JWE format is not supported. Only JWS is supported for now."))
end
header = SubString(str, 1, dot_indexes[1] - 1)
if length(dot_indexes) == 1
claims = SubString(str, dot_indexes[1] + 1, lastindex(str))
signature = ""
else
claims = SubString(str, dot_indexes[1] + 1, dot_indexes[2] - 1)
signature = SubString(str, dot_indexes[2] + 1, lastindex(str))
end
return header, claims, signature
end
function jws_header_dict(header_encoded::AbstractString)
try
header_dict = JSON.parse(String(base64url_decode(header_encoded)))
@assert haskey(header_dict, "alg") "\"alg\" attribute is missing."
@assert haskey(header_dict, "typ") "\"typ\" attribute is missing."
@assert header_dict["typ"] == "JWT" "Expected \"typ\" == \"JWT\". Found $(header_dict["typ"])."
return header_dict
catch e
throw(MalformedJWTError("Couldn't parse header ($e)."))
end
end
function jws_claims_dict(claims_encoded::AbstractString)
try
claims_dict = JSON.parse(String(base64url_decode(claims_encoded)))
return claims_dict
catch err
throw(MalformedJWTError("Couldn't parse claims ($err)."))
end
end
function decode_as_json(encoding::Encoding, str::AbstractString)
header_encoded, claims_encoded, signature_encoded = jws_split(str)
header_dict = jws_header_dict(header_encoded)
if isempty(signature_encoded)
throw(MalformedJWTError("Expected alg $(alg(encoding)), but found empty signature field."))
end
if header_dict["alg"] != alg(encoding)
throw(MalformedJWTError("Expected alg $(alg(encoding)). Found $(header_dict["alg"])."))
end
verify(encoding, str)
try
return String(base64url_decode(claims_encoded))
catch err
throw(MalformedJWTError("Couldn't parse claims: ($err)."))
end
end
function decode(encoding::Encoding, str::AbstractString)
json_str = decode_as_json(encoding, str)
try
claims_dict = JSON.parse(json_str)
return claims_dict
catch err
throw(MalformedJWTError("Couldn't parse claims ($err)."))
end
end
function has_valid_signature(encoding::Encoding, str::AbstractString) :: Bool
last_dot_index = findlast( x -> x == '.', str)
if last_dot_index == 0
throw(MalformedJWTError("JWT must contain at least one '.' character."))
end
header_and_claims_encoded = SubString(str, 1, last_dot_index - 1)
if lastindex(str) <= last_dot_index + 1
throw(MalformedJWTError("JWT has no signature."))
end
signature_encoded = SubString(str, last_dot_index + 1, lastindex(str))
return has_valid_signature(encoding, header_and_claims_encoded, signature_encoded)
end
function verify(encoding::Encoding, str::AbstractString)
if !has_valid_signature(encoding, str)
throw(InvalidSignatureError())
end
nothing
end
function encode(encoding::Encoding, claims_dict::Dict{S, A}) where {S<:AbstractString, A}
return encode(encoding, JSON.json(claims_dict))
end
function encode(encoding::Encoding, claims_json::AbstractString)
header_encoded = base64url_encode("""{"alg":"$(alg(encoding))","typ":"JWT"}""")
claims_encoded = base64url_encode(claims_json)
header_and_claims_encoded = header_encoded * "." * claims_encoded
signature = sign(encoding, header_and_claims_encoded)
return header_and_claims_encoded * "." * signature
end
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | code | 523 |
struct None <: Encoding
end
alg(::None) = "none"
function decode(::None, str::AbstractString)
header_encoded, claims_encoded, signature_encoded = jws_split(str)
return jws_claims_dict(claims_encoded)
end
function encode(encoding::None, claims_json::AbstractString)
header_encoded = base64url_encode("""{"alg":"$(alg(encoding))","typ":"JWT"}""")
claims_encoded = base64url_encode(claims_json)
header_and_claims_encoded = header_encoded * "." * claims_encoded
return header_and_claims_encoded
end
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | code | 3727 |
struct RS{bits} <: Encoding
key::MbedTLS.PKContext
is_private_key::Bool
end
alg(::RS{bits}) where {bits} = "RS$(bits)"
function Base.show(io::IO, encoding::RS)
print(io, alg(encoding))
if encoding.is_private_key
print(io, " Private Key")
else
print(io, " Public Key")
end
end
"RSASSA using SHA-256 hash algorithm"
const RS256 = RS{256}
"RSASSA using SHA-384 hash algorithm"
const RS384 = RS{384}
md_hash_alg(::RS256) = MbedTLS.MD_SHA256
md_hash_alg(::RS384) = MbedTLS.MD_SHA384
function _try_base64decode(str::AbstractString) :: Union{Nothing, String}
try
return String(Base64.base64decode(str))
catch
return nothing
end
end
function _try_isfile(str::AbstractString) :: Bool
try
return isfile(str)
catch
return false
end
end
const PUBLIC_KEY_PREFIX = "-----BEGIN PUBLIC KEY-----"
const PRIVATE_KEY_PREFIX_1 = "-----BEGIN PRIVATE KEY-----"
const PRIVATE_KEY_PREFIX_2 = "-----BEGIN RSA PRIVATE KEY-----"
@inline function has_public_key_prefix(str::AbstractString) :: Bool
return startswith(str, PUBLIC_KEY_PREFIX)
end
@inline function has_private_key_prefix(str::AbstractString) :: Bool
return startswith(str, PRIVATE_KEY_PREFIX_1) || startswith(str, PRIVATE_KEY_PREFIX_2)
end
@inline function has_key_prefix(str::AbstractString) :: Bool
return has_public_key_prefix(str) || has_private_key_prefix(str)
end
@inline convert_string_to_bytes(str::AbstractString) :: Vector{UInt8} = convert(Vector{UInt8}, codeunits(str))
"""
RS{bits}(key_or_filepath::AbstractString) where {bits}
`key_or_filepath` can be either the key content as plain text or base64 encoded string,
or the filepath to the key file.
"""
function RS{bits}(key_or_filepath::AbstractString) where {bits}
local key_as_bytes::Vector{UInt8}
local key_as_string::String
error_msg = "$key_or_filepath is not a valid RSA public or private key."
if has_key_prefix(key_or_filepath)
# plain key string
key_as_string = String(key_or_filepath)
key_as_bytes = convert_string_to_bytes(key_as_string)
else
# base64 encoded key string
decoded_key = _try_base64decode(key_or_filepath)
if (decoded_key != nothing) && (has_key_prefix(decoded_key))
key_as_string = String(decoded_key)
key_as_bytes = convert_string_to_bytes(decoded_key)
elseif _try_isfile(key_or_filepath)
# filepath
key_as_bytes = read(open(key_or_filepath, "r"))
key_as_string = String(copy(key_as_bytes))
else
throw(ArgumentError(error_msg))
end
end
context = MbedTLS.PKContext()
if has_public_key_prefix(key_as_string)
# public key
MbedTLS.parse_public_key!(context, key_as_bytes)
return RS{bits}(context, false)
elseif has_private_key_prefix(key_as_string)
# private key
MbedTLS.parse_key!(context, key_as_bytes)
return RS{bits}(context, true)
else
throw(ArgumentError(error_msg))
end
end
function sign(encoding::RS, data)
@assert encoding.is_private_key "Must sign using a private key."
md = md_hash_alg(encoding)
signature = MbedTLS.sign(encoding.key, md, MbedTLS.digest(md, data), MersenneTwister(0))
return base64url_encode(signature)
end
function has_valid_signature(encoding::RS, header_and_claims_encoded::AbstractString, signature_encoded::AbstractString) :: Bool
try
md = md_hash_alg(encoding)
_hash = MbedTLS.digest(md, header_and_claims_encoded)
return MbedTLS.verify(encoding.key, md, _hash, base64url_decode(signature_encoded)) == 0
catch e
return false
end
end
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | code | 747 | # This file is a part of Julia. License is MIT: https://julialang.org/license
module Base64URL
# Base64EncodePipe is a pipe-like IO object, which converts into base64 data
# sent to a stream. (You must close the pipe to complete the encode, separate
# from closing the target stream). We also have a function base64encode(f,
# args...) which works like sprint except that it produces base64-encoded data,
# along with base64encode(args...) which is equivalent to base64encode(write,
# args...), to return base64 strings. A Base64DecodePipe object can be used to
# decode base64-encoded data read from a stream , while function base64decode is
# useful for decoding strings
include("buffer.jl")
include("encode.jl")
include("decode.jl")
end
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | code | 1252 | # This file is a part of Julia. License is MIT: https://julialang.org/license
# Data buffer for pipes.
mutable struct Buffer
data::Vector{UInt8}
ptr::Ptr{UInt8}
size::Int
function Buffer(bufsize)
data = Vector{UInt8}(undef, bufsize)
return new(data, pointer(data), 0)
end
end
Base.empty!(buffer::Buffer) = buffer.size = 0
Base.getindex(buffer::Buffer, i::Integer) = unsafe_load(buffer.ptr, i)
Base.setindex!(buffer::Buffer, v::UInt8, i::Integer) = unsafe_store!(buffer.ptr, v, i)
Base.firstindex(buffer::Buffer) = 1
Base.lastindex(buffer::Buffer) = buffer.size
Base.pointer(buffer::Buffer) = buffer.ptr
capacity(buffer::Buffer) = Int(pointer(buffer.data, lastindex(buffer.data) + 1) - buffer.ptr)
function consumed!(buffer::Buffer, n::Integer)
@assert n ≤ buffer.size
buffer.ptr += n
buffer.size -= n
end
function read_to_buffer(io::IO, buffer::Buffer)
offset = buffer.ptr - pointer(buffer.data)
copyto!(buffer.data, 1, buffer.data, offset, buffer.size)
buffer.ptr = pointer(buffer.data) + buffer.size
if !eof(io)
n = min(bytesavailable(io), capacity(buffer) - buffer.size)
unsafe_read(io, buffer.ptr + buffer.size, n)
buffer.size += n
end
return
end
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | code | 5325 | # This file is a part of Julia. License is MIT: https://julialang.org/license
# Generate decode table.
const BASE64_CODE_END = 0x40
const BASE64_CODE_PAD = 0x41
const BASE64_CODE_IGN = 0x42
const BASE64_DECODE = fill(BASE64_CODE_IGN, 256)
for (i, c) in enumerate(BASE64_ENCODE)
BASE64_DECODE[Int(c)+1] = UInt8(i - 1)
end
BASE64_DECODE[Int(encodepadding())+1] = BASE64_CODE_PAD
#decode_base64(x::UInt8) = @inbounds return BASE64_DECODE[x + 1]
const BASE64URL_DECODE = fill(BASE64_CODE_IGN, 256)
for (i, c) in enumerate(BASE64URL_ENCODE)
BASE64URL_DECODE[Int(c)+1] = UInt8(i - 1)
end
BASE64URL_DECODE[Int(encodepadding())+1] = BASE64_CODE_PAD
decode_base64url(x::UInt8) = @inbounds return BASE64URL_DECODE[x + 1]
struct Base64DecodePipe{F<:Function} <: IO
io::IO
buffer::Buffer
rest::Vector{UInt8}
decode::F
function Base64DecodePipe(io::IO; decoder::T=decode_base64) where {T<:Function}
buffer = Buffer(512)
return new{T}(io, buffer, UInt8[], decoder)
end
end
#=
function Base.unsafe_read(pipe::Base64DecodePipe, ptr::Ptr{UInt8}, n::UInt)
p = read_until_end(pipe, ptr, n)
if p < ptr + n
throw(EOFError())
end
return nothing
end
=#
# Read and decode as much data as possible.
function read_until_end(pipe::Base64DecodePipe, ptr::Ptr{UInt8}, n::UInt)
p = ptr
p_end = ptr + n
while !isempty(pipe.rest) && p < p_end
unsafe_store!(p, popfirst!(pipe.rest))
p += 1
end
buffer = pipe.buffer
i = 0
b1 = b2 = b3 = b4 = BASE64_CODE_IGN
while true
if b1 < 0x40 && b2 < 0x40 && b3 < 0x40 && b4 < 0x40 && p + 2 < p_end
# fast path to decode
unsafe_store!(p , b1 << 2 | b2 >> 4)
unsafe_store!(p + 1, b2 << 4 | b3 >> 2)
unsafe_store!(p + 2, b3 << 6 | b4 )
p += 3
else
i, p, ended = decode_slow(b1, b2, b3, b4, buffer, i, pipe.io, p, p_end - p, pipe.rest, pipe.decode)
if ended
break
end
end
if p < p_end
if i + 4 ≤ lastindex(buffer)
b1 = pipe.decode(buffer[i+1])
b2 = pipe.decode(buffer[i+2])
b3 = pipe.decode(buffer[i+3])
b4 = pipe.decode(buffer[i+4])
i += 4
else
consumed!(buffer, i)
read_to_buffer(pipe.io, buffer)
i = 0
b1 = b2 = b3 = b4 = BASE64_CODE_IGN
end
else
break
end
end
consumed!(buffer, i)
return p
end
#=
function Base.read(pipe::Base64DecodePipe, ::Type{UInt8})
if isempty(pipe.rest)
unsafe_read(pipe, convert(Ptr{UInt8}, C_NULL), 0)
if isempty(pipe.rest)
throw(EOFError())
end
end
return popfirst!(pipe.rest)
end
=#
function Base.readbytes!(pipe::Base64DecodePipe, data::AbstractVector{UInt8}, nb::Integer=length(data))
filled::Int = 0
while filled < nb && !eof(pipe)
if length(data) == filled
resize!(data, min(length(data) * 2, nb))
end
p = pointer(data, filled + 1)
p_end = read_until_end(pipe, p, UInt(min(length(data), nb) - filled))
filled += p_end - p
end
resize!(data, filled)
return filled
end
Base.eof(pipe::Base64DecodePipe) = isempty(pipe.rest) && eof(pipe.io)
Base.close(pipe::Base64DecodePipe) = nothing
# Decode data from (b1, b2, b3, b5, buffer, input) into (ptr, rest).
function decode_slow(b1, b2, b3, b4, buffer, i, input, ptr, n, rest, decoder)
# Skip ignore code.
while true
if b1 == BASE64_CODE_IGN
b1, b2, b3 = b2, b3, b4
elseif b2 == BASE64_CODE_IGN
b2, b3 = b3, b4
elseif b3 == BASE64_CODE_IGN
b3 = b4
elseif b4 == BASE64_CODE_IGN
# pass
else
break
end
if i + 1 ≤ lastindex(buffer)
b4 = decoder(buffer[i+=1])
elseif !eof(input)
b4 = decoder(read(input, UInt8))
else
b4 = BASE64_CODE_END
break
end
end
# Check the decoded quadruplet.
k = 0
if b1 < 0x40 && b2 < 0x40 && b3 < 0x40 && b4 < 0x40
k = 3
elseif b1 < 0x40 && b2 < 0x40 && b3 < 0x40 && b4 == BASE64_CODE_PAD
b4 = 0x00
k = 2
elseif b1 < 0x40 && b2 < 0x40 && b3 == b4 == BASE64_CODE_PAD
b3 = b4 = 0x00
k = 1
elseif b1 == b2 == b3 == BASE64_CODE_IGN && b4 == BASE64_CODE_END
b1 = b2 = b3 = b4 = 0x00
else
throw(ArgumentError("malformed base64 sequence"))
end
# Write output.
p::Ptr{UInt8} = ptr
p_end = ptr + n
function output(b)
if p < p_end
unsafe_store!(p, b)
p += 1
else
push!(rest, b)
end
end
k ≥ 1 && output(b1 << 2 | b2 >> 4)
k ≥ 2 && output(b2 << 4 | b3 >> 2)
k ≥ 3 && output(b3 << 6 | b4 )
return i, p, k == 0
end
#=
function base64decode(s)
b = IOBuffer(s)
try
return read(Base64DecodePipe(b))
finally
close(b)
end
end
=#
function base64urldecode(s)
b = IOBuffer(s)
try
return read(Base64DecodePipe(b, decoder=decode_base64url))
finally
close(b)
end
end
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | code | 5064 | # This file is a part of Julia. License is MIT: https://julialang.org/license
# Generate encode table.
const BASE64_ENCODE = [UInt8(x) for x in ['A':'Z'; 'a':'z'; '0':'9'; '+'; '/']]
# '-' and '_' instead of '+' and '/'
const BASE64URL_ENCODE = [UInt8(x) for x in ['A':'Z'; 'a':'z'; '0':'9'; '-'; '_']]
#encode_base64(x::UInt8) = @inbounds return BASE64_ENCODE[(x & 0x3f) + 1]
encode_base64url(x::UInt8) = @inbounds return BASE64URL_ENCODE[(x & 0x3f) + 1]
encodepadding() = UInt8('=')
struct Base64EncodePipe{F<:Function} <: IO
io::IO
buffer::Buffer
encode::F
function Base64EncodePipe(io::IO; encoder::T=encode_base64) where {T<:Function}
# The buffer size must be at least 3.
buffer = Buffer(512)
pipe = new{T}(io, buffer, encoder)
finalizer(_ -> close(pipe), buffer)
return pipe
end
end
function Base.unsafe_write(pipe::Base64EncodePipe, ptr::Ptr{UInt8}, n::UInt)::Int
buffer = pipe.buffer
m = buffer.size
b1, b2, b3, k = loadtriplet!(buffer, ptr, n)
@assert k ≥ m
p = ptr + k - m
if k < 3
if k == 1
buffer[1] = b1
buffer.size = 1
elseif k == 2
buffer[1] = b1
buffer[2] = b2
buffer.size = 2
end
return p - ptr
end
@assert buffer.size == 0
i = 0
p_end = ptr + n
while true
buffer[i+1] = pipe.encode(b1 >> 2 )
buffer[i+2] = pipe.encode(b1 << 4 | b2 >> 4)
buffer[i+3] = pipe.encode(b2 << 2 | b3 >> 6)
buffer[i+4] = pipe.encode( b3 )
i += 4
if p + 2 < p_end
b1 = unsafe_load(p, 1)
b2 = unsafe_load(p, 2)
b3 = unsafe_load(p, 3)
p += 3
else
break
end
if i + 4 > capacity(buffer)
unsafe_write(pipe.io, pointer(buffer), i)
i = 0
end
end
if i > 0
unsafe_write(pipe.io, pointer(buffer), i)
end
while p < p_end
buffer[buffer.size+=1] = unsafe_load(p)
p += 1
end
return p - ptr
end
#=
function Base.write(pipe::Base64EncodePipe, x::UInt8)
buffer = pipe.buffer
buffer[buffer.size+=1] = x
if buffer.size == 3
unsafe_write(pipe, C_NULL, 0)
end
return 1
end
=#
function Base.close(pipe::Base64EncodePipe)
b1, b2, b3, k = loadtriplet!(pipe.buffer, Ptr{UInt8}(C_NULL), UInt(0))
if k == 0
# no leftover and padding
elseif k == 1
write(pipe.io,
pipe.encode(b1 >> 2),
pipe.encode(b1 << 4),
encodepadding(),
encodepadding())
elseif k == 2
write(pipe.io,
pipe.encode( b1 >> 2),
pipe.encode(b1 << 4 | b2 >> 4),
pipe.encode(b2 << 2 ),
encodepadding())
else
@assert k == 3
write(pipe.io,
pipe.encode(b1 >> 2 ),
pipe.encode(b1 << 4 | b2 >> 4),
pipe.encode(b2 << 2 | b3 >> 6),
pipe.encode( b3 ))
end
return nothing
end
# Load three bytes from buffer and ptr.
function loadtriplet!(buffer::Buffer, ptr::Ptr{UInt8}, n::UInt)
b1 = b2 = b3 = 0x00
if buffer.size == 0
if n == 0
k = 0
elseif n == 1
b1 = unsafe_load(ptr, 1)
k = 1
elseif n == 2
b1 = unsafe_load(ptr, 1)
b2 = unsafe_load(ptr, 2)
k = 2
else
b1 = unsafe_load(ptr, 1)
b2 = unsafe_load(ptr, 2)
b3 = unsafe_load(ptr, 3)
k = 3
end
elseif buffer.size == 1
b1 = buffer[1]
if n == 0
k = 1
elseif n == 1
b2 = unsafe_load(ptr, 1)
k = 2
else
b2 = unsafe_load(ptr, 1)
b3 = unsafe_load(ptr, 2)
k = 3
end
elseif buffer.size == 2
b1 = buffer[1]
b2 = buffer[2]
if n == 0
k = 2
else
b3 = unsafe_load(ptr, 1)
k = 3
end
else
@assert buffer.size == 3
b1 = buffer[1]
b2 = buffer[2]
b3 = buffer[3]
k = 3
end
empty!(buffer)
return b1, b2, b3, k
end
#=
function base64encode(f::Function, args...; context=nothing)
s = IOBuffer()
b = Base64EncodePipe(s)
if context === nothing
f(b, args...)
else
f(IOContext(b, context), args...)
end
close(b)
return String(take!(s))
end
base64encode(args...; context=nothing) = base64encode(write, args...; context=context)
=#
function base64urlencode(f::Function, args...; context=nothing)
s = IOBuffer()
b = Base64EncodePipe(s, encoder=encode_base64url)
if context === nothing
f(b, args...)
else
f(IOContext(b, context), args...)
end
close(b)
return String(take!(s))
end
base64urlencode(args...; context=nothing) = base64urlencode(write, args...; context=context)
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | code | 12066 |
import JSONWebTokens, SHA, MbedTLS, JSON
using Test
using Random
@testset "base64url_encode/decode" begin
header = """{"alg":"HS256","typ":"JWT"}"""
claims = """{"sub":"1234567890","name":"John Doe","iat":1516239022}"""
secret = "123"
header_and_claims_encoded = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ"
@test JSONWebTokens.base64url_encode(header) * "." * JSONWebTokens.base64url_encode(claims) == header_and_claims_encoded
@test JSONWebTokens.base64url_encode(SHA.hmac_sha2_256( JSONWebTokens.to_byte_array(secret), header_and_claims_encoded)) == "pF3q46_CLIyP_1QZPpeccbs-hC4n9YW2VMBjKrSO6Wg"
encoding = JSONWebTokens.None()
show(IOBuffer(), encoding)
claims_dict = JSON.parse(claims)
@test JSONWebTokens.decode(encoding, JSONWebTokens.encode(encoding, claims_dict)) == claims_dict
end
@testset "HS256 valid JSONWebTokens decode" begin
jwt_encoded = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.8TLPbKjmE0uGLQyLnfHx2z-zy6G8qu5zFFXRSuJID_Y"
encoding = JSONWebTokens.HS256("secretkey")
show(IOBuffer(), encoding)
json = JSONWebTokens.decode_as_json(encoding, jwt_encoded)
println("JSON: $json")
claims_dict = JSONWebTokens.decode(encoding, jwt_encoded)
@test claims_dict["sub"] == "1234567890"
@test claims_dict["name"] == "John Doe"
@test claims_dict["iat"] == 1516239022
end
@testset "HS256 invalid JSONWebTokens decode" begin
encoding = JSONWebTokens.HS256("secretkey")
jwt_encoded_invalid_1 = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.fyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.8TLPbKjmE0uGLQyLnfHx2z-zy6G8qu5zFFXRSuJID_Y"
jwt_encoded_invalid_2 = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.8TLPbKjmE0uGLQyLnfHx2z-zy6G8qu5zFFXRSuJJD_Y"
@test_throws JSONWebTokens.InvalidSignatureError JSONWebTokens.decode(encoding, jwt_encoded_invalid_1)
@test_throws JSONWebTokens.InvalidSignatureError JSONWebTokens.decode(encoding, jwt_encoded_invalid_2)
end
@testset "HS256 encode/decode" begin
encoding = JSONWebTokens.HS256("secretkey")
claims_json = """{"sub":"1234567890","name":"John Doe","iat":1516239022}"""
claims_dict = JSON.parse(claims_json)
@test JSONWebTokens.encode(encoding, claims_json) == "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.8TLPbKjmE0uGLQyLnfHx2z-zy6G8qu5zFFXRSuJID_Y"
@test JSONWebTokens.decode(encoding, JSONWebTokens.encode(encoding, claims_dict)) == claims_dict
end
# how to generate public/private key using openssl
# https://www.devco.net/archives/2006/02/13/public_-_private_key_encryption_using_openssl.php
# private.pem / public.pem generated using
# $ openssl genrsa -out private.pem 2048
# $ openssl rsa -in private.pem -out public.pem -outform PEM -pubout
# $ openssl genrsa -out private2.pem 2048
# $ openssl rsa -in private2.pem -out public2.pem -outform PEM -pubout
@testset "MbedTLS" begin
header = """{"alg":"RS256","typ":"JWT"}"""
claims = """{"sub":"1234567890","name":"John Doe","admin":true,"iat":1516239022}"""
header_and_claims_encoded = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImlhdCI6MTUxNjIzOTAyMn0"
signature_encoded = "o9uMYrmOqgdBqhbOBzuiN_0nFp2Ed1J4urFx-TyY61AgM6tUTutTGfIsIZERVjqRXAKd6bGYPuVlGf5m-XADAmqnKTpxcaP_t5ipNfsB6g9rudi7U3uWYldbSfW0-cnayISt5Eyga23Qs5ZqY7e7uQHN_z_mI2Cmoari91ZGnt1jte11gFNd7icMDGz9laBZESeFGFECAxP2hCvrg_G0dCySh_AVnYerD0iF0MznMvV1dxxuprjeQDunQtG3h2uQrJMTBEvCVPxrf7Kql3_k9S4pQDQaoPGQPO9yogpdYdgS5OV3LdSvjlDwRQL6FlDTgB3l1sv0NkEpRviR3x9VLA"
@test JSONWebTokens.base64url_encode(header) * "." * JSONWebTokens.base64url_encode(claims) == header_and_claims_encoded
private_key_file = joinpath(@__DIR__, "private.pem")
@assert isfile(private_key_file) "Couldn't find test private key file $private_key_file."
key = MbedTLS.parse_keyfile(private_key_file)
_hash = MbedTLS.digest(MbedTLS.MD_SHA256, header_and_claims_encoded)
output = MbedTLS.sign(key, MbedTLS.MD_SHA256, _hash, MersenneTwister(0))
@test JSONWebTokens.base64url_encode(output) == signature_encoded
public_key_file = joinpath(@__DIR__, "public.pem")
@assert isfile(public_key_file) "Couldn't find test public key file $public_key_file."
pubkey_string = read(open(public_key_file, "r"))
pubkey = MbedTLS.PKContext()
MbedTLS.parse_public_key!(pubkey, pubkey_string)
@test MbedTLS.verify(pubkey, MbedTLS.MD_SHA256, _hash, JSONWebTokens.base64url_decode(signature_encoded)) == 0
end
@testset "RSA - keys in files" begin
fp_public = joinpath(@__DIR__, "public.pem")
fp_private = joinpath(@__DIR__, "private.pem")
@assert isfile(fp_public)
@assert isfile(fp_private)
rsa_public = JSONWebTokens.RS256(fp_public)
rsa_private = JSONWebTokens.RS256(fp_private)
show(IOBuffer(), rsa_public)
show(IOBuffer(), rsa_private)
claims_dict = JSON.parse("""{"sub":"1234567890","name":"John Doe","admin":true,"iat":1516239022}""")
jwt = JSONWebTokens.encode(rsa_private, claims_dict)
@test startswith(jwt, "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.")
@test JSONWebTokens.decode(rsa_public, jwt) == claims_dict
fp_public2 = joinpath(@__DIR__, "public2.pem")
fp_private2 = joinpath(@__DIR__, "private2.pem")
@assert isfile(fp_public2)
@assert isfile(fp_private2)
rsa_public2 = JSONWebTokens.RS256(fp_public2)
rsa_private2 = JSONWebTokens.RS256(fp_private2)
@test_throws JSONWebTokens.InvalidSignatureError JSONWebTokens.decode(rsa_public2, jwt)
jwt2 = JSONWebTokens.encode(rsa_private2, claims_dict)
@test jwt != jwt2
@test JSONWebTokens.decode(rsa_public2, jwt2) == claims_dict
@test_throws JSONWebTokens.InvalidSignatureError JSONWebTokens.decode(rsa_public, jwt2)
@test_throws AssertionError JSONWebTokens.encode(rsa_public, claims_dict)
end
@testset "RSA - keys inline" begin
public_key = """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwh4KT/453FE+H2myUOtY
MJlyDMtkElgdM2G8CkupqbTy7ucCgMb5rrNGKW22ZdyAoPDXCkpqc0jkCEco1nKi
wYNE4nfcit1MDUwOqXWMVgYUsFZNQEqBYUKxJYApXbiaybkKw7Yn26VFu6+culTN
+05RXSg2I6gYcWoiQMjnPqcrvTlhYRbCLW+0+bISKSoUxm5hRV6FwfEmR30LWtaF
jHIUNHAX9dg+PVGrKPgK85T4uXKI4SNg6h+Rvgty2pQ9XMbkdli5j/450oWFOa6F
NJfYQZOX5DMLOIWKOOM0IPCmRwBxzTpOCVgvc7g1KBnw1efzdwhZo1yp5PmqbiLC
gQIDAQAB
-----END PUBLIC KEY-----"""
private_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAwh4KT/453FE+H2myUOtYMJlyDMtkElgdM2G8CkupqbTy7ucC
gMb5rrNGKW22ZdyAoPDXCkpqc0jkCEco1nKiwYNE4nfcit1MDUwOqXWMVgYUsFZN
QEqBYUKxJYApXbiaybkKw7Yn26VFu6+culTN+05RXSg2I6gYcWoiQMjnPqcrvTlh
YRbCLW+0+bISKSoUxm5hRV6FwfEmR30LWtaFjHIUNHAX9dg+PVGrKPgK85T4uXKI
4SNg6h+Rvgty2pQ9XMbkdli5j/450oWFOa6FNJfYQZOX5DMLOIWKOOM0IPCmRwBx
zTpOCVgvc7g1KBnw1efzdwhZo1yp5PmqbiLCgQIDAQABAoIBAGDgYx8m7jNw7EL7
Gf3eZiXi/pM6ElhBV1lkRlcRCbxjTPZDnfEs3ED+wV49ndDaKeuoJnnBR7z/PKaQ
9OWJUoam/4LSdONsq97a/Vo/CumHoV2bxHP4evdSNFxVyM84KS/RRHkF+IBazCFt
9Bbd6eqoXFzUi6hh5Mj9Qdj5KscOAmore/4HYw5DkzuNvtutgUaPx7SA+LwBobXP
NfVG+EKjIWan4XVUbm8QwgCdYqbpY2s5NFZMq/zu6mFGz2my73fZyCIRy0reOs5x
dg9A8xrjXMWMU8HsCAqS6o+3FXgQBmupMd53S6PAsjM7CHVS2we8T0nY1Yqfiylx
nhUZ9H0CgYEA5CbJd2RJ+9asqT+ykTKFV3qlt9Nq1Z2s7QHTfbLOPc6Jm4f+bXgD
C2Ae2v3YvSCHVs40WkJHwJA715AZm9rtlSE4QdzVCxr4D0vhbdhp7CPArnCSeyAF
Yoqt7ZHCbm3JhM31OBOwJJuRZ2jZobzTsvOCX8vyTxT6svyU9/vXtfMCgYEA2c/D
b3dib34ShJtY8MMTcCTiAv4PGrsbYF9p+9OPKvL4+na5gH9mKmwukgLBSnzeesle
ywpk2yy2Y/J3HSIOW1FUgu64bt9l0MKEFx+3Vwex3rllqhMFp7AozRLw3H+5olMT
5syy9ql8kMSsqEB9OWERQ3CJ5P2Qx6XlcsCAPrsCgYACscqTVGXjSYfEf/IV8OjO
Pa6TWzXZzADs06axx1jUNgo+Af8pP8+ZZMs4fuL+aNHwXoMTxdCfH5T1WMhUpONF
bZ0Ceh8yAGGJnLXO3E1z8oAmD0JLnfcyULz5H02SjE1i+iO5Q9JCvGudMwnO9THy
3RlfFEOKV48WahFAVIMZrQKBgCgZ6l+BWWwxh/NGLq/VGqURBVOLtvgy7q1lo7ur
jbZYmaJzbV/NFOBGnqRfQXsXVlbA8GTtevgnWUU5hNimRoJljOu2S9qN4s72oR8o
xbaOQh9Bfwg7DFV9R2XKUPInyeOq7AUYNvLW7Yoxy6AGj4ea6XTDKYAxdxBq6L2h
13q1AoGAZ6szDCRLW+69n+QKPlujfM8CSjDofnLLvr9RHSuIiFv3+moWXPPUQJf5
Gt/YOYUZ+k9mfMpC5OIrE/O+9NlUYciwl6wwJjdK9GBJuAQNqa1ZwtEioPYO3ZW6
hL1Hq+f0MJkBnql53kFDSth1fQSkSMMHIb1LGFYmoT3mSDwHDho=
-----END RSA PRIVATE KEY-----"""
rsa_public = JSONWebTokens.RS256(public_key)
rsa_private = JSONWebTokens.RS256(private_key)
show(IOBuffer(), rsa_public)
show(IOBuffer(), rsa_private)
claims_dict = JSON.parse("""{"sub":"1234567890","name":"John Doe","admin":true,"iat":1516239022}""")
jwt = JSONWebTokens.encode(rsa_private, claims_dict)
@test startswith(jwt, "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.")
@test JSONWebTokens.decode(rsa_public, jwt) == claims_dict
@testset "base64 encoded key" begin
pub_key_encoded = JSONWebTokens.Base64.base64encode(public_key)
rsa_public = JSONWebTokens.RS256(pub_key_encoded)
@test JSONWebTokens.decode(rsa_public, jwt) == claims_dict
end
end
@testset "private key - different prefix" begin
private_key = """-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDpfdYaKQcs1fg4
+nDHQLLV+UX1uNb/5nAnGkSgv+K2bWBnpnqGsGX1Gp/AsYjWA/R5SfWe5UZoFvdu
rAWCM8+y8gJwU1jRuEYUeCDKft4TYgFikdcmpsnVmo+R8dIKviji+drZcZuW3cqR
/+d7nSpbT2OOWbaENOJE/NYmQa4Ha4wKqch1YuD/qjTiB5xPGcNqX14/Z02iitA/
+c/TF9itfNLMkFp5ILD6z4yFtEgMZpk+2ohEKkZiU4KxZwxjTr8xJWGzkGxQKQKJ
m8uc8PRgYxl4dq/KLK6UUtkBwb1ktWiYRu7h++tiUAqkUblOCFO3duHGWlziizY6
+somCdcDAgMBAAECggEAIprczXnBL3ry0/cCGfXTy3SrUrS3YKvVeWYiP7TQs/rX
6+S3ihjAs6fjf1qQji993y//8DHI0op813E1S8vD/6Bwjhc8NbDa6hO3wGs3HKZM
0EAPJBJaHYdPufCeEauHQcSIE+wLhDuQ1zyXITH04h2NAr73FvcDhff3ASqA0WZt
nSXMLt/z9IEh3yR7DjTsO1efATMLIbydM7Hy1GmIEo4oqlsKWz0KHe6IrtjUoUu0
8RQGQkMBcGhp8gABoAqoj/Y2kbkzvZqeUTpo8kgzcNBhdQ/kGmrYjGbkMSTjzEyd
L321GWO6TmcocpX6ElfUU39DgzpYeFYuSmOMjde8gQKBgQD2MaOw/2APEM327pTP
rna886Rr0Kw4IfhnS255lUPvP1nllw9Q/hd0BwfAZl8rIGqQcoXFRdGp4QA9zymt
lDk6FIDOH9lu6/DF/Mu0mvt7kyZpfkdECFzw8u8Xcu+P4MfATLE/bgp5agT/NqCy
cKozQ3DIvQfcPGMgTmZ6PBS6QQKBgQDyyqzQTqhDiybSHpxpMWML8F56wLKxpL1Y
oem0loi0RFr7HJQgbgFtXqo8T8nVQV7WnwZ5Zhq7/X6JeodInZRHSiqVvWqs+648
gFp7nkZkt7wj1BdM6XPxNBP1oyPAoceBwmb+GRIb3CiZyUuXBqT7SDQx52fA95Sv
F7qiy2cYQwKBgQCkAf+lawsIHk7HgLrb+8KYL8tKE9KW9nJwBb1L+9cs68wsecy3
TP48ym4si0Y9CbRUFCbve4qoG/84Lej6/LJ+8ae5KViFX2Kf0r+1ykVcVnQRdRFu
zg90aLJEscnL1JfdkHnH4rvRlIJNrouxNAL+caAR6nPxEL2MiQ4Vwi6gwQKBgQCA
xCkfWXg4SmbObdm6mKzVOiiBpg2f1aVuioDufCAIq3AtWhzdjjpHjTtwPUjoR4FL
BpBidlqbdXhU5Z02UAzCOn2XaRxYnirsf459nZXr+N5ZijTJX89UD7C+SEW/9o1f
LoF9JkLKb9tApNfoQ2fgtYypIveOCkDbFBSyHnqRDQKBgQC5nLloja1VudR0uBNg
JQmkJqrhO4s+fwx9+PIDRycI/UOxfI4XCIFUS9jFgjKWCh9TWBX8jJNM22FT3WnS
hdYV60DqxNmfKdNolXAj8kzhpmmukJ1VAsVJYkrNqsE298FEDCOGqWVGAL9FerjT
IHkGhtMpH1DU8l9tK/CWT1rVYg==
-----END PRIVATE KEY-----"""
public_key = """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6X3WGikHLNX4OPpwx0Cy
1flF9bjW/+ZwJxpEoL/itm1gZ6Z6hrBl9RqfwLGI1gP0eUn1nuVGaBb3bqwFgjPP
svICcFNY0bhGFHggyn7eE2IBYpHXJqbJ1ZqPkfHSCr4o4vna2XGblt3Kkf/ne50q
W09jjlm2hDTiRPzWJkGuB2uMCqnIdWLg/6o04gecTxnDal9eP2dNoorQP/nP0xfY
rXzSzJBaeSCw+s+MhbRIDGaZPtqIRCpGYlOCsWcMY06/MSVhs5BsUCkCiZvLnPD0
YGMZeHavyiyulFLZAcG9ZLVomEbu4fvrYlAKpFG5TghTt3bhxlpc4os2OvrKJgnX
AwIDAQAB
-----END PUBLIC KEY-----"""
@test JSONWebTokens.has_private_key_prefix(private_key)
rsa_public = JSONWebTokens.RS256(public_key)
rsa_private = JSONWebTokens.RS256(private_key)
show(IOBuffer(), rsa_public)
show(IOBuffer(), rsa_private)
claims_dict = JSON.parse("""{"sub":"1234567890","name":"John Doe","admin":true,"iat":1516239022}""")
jwt = JSONWebTokens.encode(rsa_private, claims_dict)
@test startswith(jwt, "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.")
@test JSONWebTokens.decode(rsa_public, jwt) == claims_dict
@testset "base64 encoded key" begin
pub_key_encoded = JSONWebTokens.Base64.base64encode(public_key)
rsa_public = JSONWebTokens.RS256(pub_key_encoded)
@test JSONWebTokens.decode(rsa_public, jwt) == claims_dict
end
end
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT",
"Apache-2.0"
] | 1.1.1 | 0fc1200c36d1050a3acfecb05ee1a718859ff836 | docs | 2823 |
# JSONWebTokens.jl
[![License][license-img]](LICENSE)
[![CI][ci-img]][ci-url]
[![codecov][codecov-img]][codecov-url]
[license-img]: http://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square
[ci-img]: https://github.com/felipenoris/JSONWebTokens.jl/workflows/CI/badge.svg
[ci-url]: https://github.com/felipenoris/JSONWebTokens.jl/actions?query=workflow%3ACI
[codecov-img]: https://img.shields.io/codecov/c/github/felipenoris/JSONWebTokens.jl/master.svg?label=codecov&style=flat-square
[codecov-url]: http://codecov.io/github/felipenoris/JSONWebTokens.jl?branch=master
Secure your Julia APIs with [JWT](https://jwt.io/).
# Requirements
Julia v1.3 or later.
# Installation
```julia
julia> import Pkg; Pkg.add("JSONWebTokens")
```
# Usage
## For HMAC RSA Algorithms
Encode:
```julia
import JSONWebTokens
claims_dict = Dict( "sub" => "1234567890", "name" => "John Doe", "iat" => 1516239022)
encoding = JSONWebTokens.HS256("secretkey") # select HS256 encoding
jwt = JSONWebTokens.encode(encoding, claims_dict)
```
```
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiSm9obiBEb2UiLCJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyfQ.TjUTSL0RQayQG-y_h2Tl3FmAgxhC0fYtmeiU7jnMdXY"
```
Decode:
```julia
JSONWebTokens.decode(encoding, jwt)
```
```
Dict{String,Any} with 3 entries:
"name" => "John Doe"
"sub" => "1234567890"
"iat" => 1516239022
```
## For RSASSA RSA Algorithms
First, generate public and private keys. You can use `openssl`.
```shell
$ openssl genrsa -out private.pem 2048
$ openssl rsa -in private.pem -out public.pem -outform PEM -pubout
```
Use the private key to encode.
```julia
import JSONWebTokens
claims_dict = Dict( "sub" => "1234567890", "name" => "John Doe", "iat" => 1516239022)
rsa_private = JSONWebTokens.RS256("private.pem") # accepts a filepath, string or base64 encoded string
jwt = JSONWebTokens.encode(rsa_private, claims_dict)
```
```
"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiSm9obiBEb2UiLCJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyfQ.HUXm8CAiY9EKX3dU1Ym7bZvL7yXMu3TC9iL1do0jvM0oD2rSqY5K06KmQy1qJETYZAIZIgA6ZrX2Q3ug01DVu-Yf1Kx3-OpC39eYuBGH-7P1QgwEcizbh6dw07LGC-xshru1v_tKi9IaogiitnEMLLeGdOuCTtYw2gDRjACq2L2UiJTAgurZ_yxE3cMApo492leubNo9fADtRPpofy37Q2VivfS4XwlTkS9Bxg6jrkBhTr-ieuiBx_kAmk2Zps5f9ih-aNPXi_3p5tNH-8LUMJ5L2CTb6Ui1ghyElI7k8wfXzQIm0fGRiQu9OBnqgm2Bh9AivquXXeX6JQGxyntDqA"
```
Use the public key to decode.
```julia
rsa_public = JSONWebTokens.RS256("public.pem") # accepts a filepath, string or base64 encoded string
JSONWebTokens.decode(rsa_public, jwt)
```
```
Dict{String,Any} with 3 entries:
"name" => "John Doe"
"sub" => "1234567890"
"iat" => 1516239022
```
# Supported Algorithms
* HS256
* HS384
* HS512
* RS256
* RS384
# References
* [RFC7519](https://tools.ietf.org/html/rfc7519)
* [jwt.io](https://jwt.io)
| JSONWebTokens | https://github.com/felipenoris/JSONWebTokens.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 410 | using JuliaFormatter
# we asume the format_all.jl script is located in QEDfields.jl/.formatting
project_path = Base.Filesystem.joinpath(Base.Filesystem.dirname(Base.source_path()), "..")
not_formatted = format(project_path; verbose=true)
if not_formatted
@info "Formatting verified."
else
@warn "Formatting verification failed: Some files are not properly formatted!"
end
exit(not_formatted ? 0 : 1)
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 1077 | using Pkg
# targeting the correct source code
# this asumes the make.jl script is located in QEDprocesses.jl/docs
project_path = Base.Filesystem.joinpath(Base.Filesystem.dirname(Base.source_path()), "..")
Pkg.develop(; path=project_path)
# temporarily necessary because processes used to have a compat that is gone after the `develop` above
Pkg.update()
using Documenter
using QEDprocesses
DocMeta.setdocmeta!(QEDprocesses, :DocTestSetup, :(using QEDprocesses); recursive=true)
makedocs(;
modules=[QEDprocesses],
authors="Uwe Hernandez Acosta <[email protected]>, Simeon Ehrig, Klaus Steiniger, Tom Jungnickel, Anton Reinhard",
repo=Documenter.Remotes.GitHub("QEDjl-project", "QEDprocesses.jl"),
sitename="QEDprocesses.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://qedjl-project.gitlab.io/QEDprocesses.jl",
edit_link="dev",
assets=String[],
),
pages=["Home" => "index.md"],
)
deploydocs(; repo="github.com/QEDjl-project/QEDprocesses.jl.git", push_preview=false)
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 735 | module QEDprocesses
# constants
export ALPHA,
ALPHA_SQUARE, ELEMENTARY_CHARGE, ELEMENTARY_CHARGE_SQUARE, ELECTRONMASS, ONE_OVER_FOURPI
# propagator
export propagator
# specific compute models
export PerturbativeQED
# specific scattering processes
export Compton, omega_prime
using QEDbase
using QEDcore
using StaticArrays
using QuadGK
include("constants.jl")
include("utils.jl")
include("models/models.jl")
# one photon compton
include("processes/one_photon_compton/process.jl")
include("processes/one_photon_compton/perturbative/kinematics.jl")
include("processes/one_photon_compton/perturbative/cross_section.jl")
include("processes/one_photon_compton/perturbative/total_probability.jl")
include("patch_QEDbase.jl")
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 235 |
const ALPHA = inv(137.035999074)
const ALPHA_SQUARE = ALPHA^2
const ELEMENTARY_CHARGE = sqrt(4 * pi * ALPHA)
const ELEMENTARY_CHARGE_SQUARE = 4 * pi * ALPHA
const ELECTRONMASS = 0.510998928e6 # eV
const ONE_OVER_FOURPI = 1 / (4 * pi)
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 97 | #############
# Patches for `QEDbase.jl`
# remove if this went into `QEDbase.jl`
#
#############
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 630 | ###########
# utility functions
#
# This file contains small helper and utility functions used throughout the package.
###############
"""
_base_component_type(array_of_lv::AbstractArray{LV}) where {LV<:AbstractLorentzVector}
Return the type of the components of given Lorentz vectors, which are by themself elements of an
`AbstractArray`.
# Examples
```julia
julia> using QEDbase
julia> using QEDprocesses
julia> v = Vector{SFourMomentum}(undef,10)
julia> QEDprocesses._base_component_type(v)
Float64
```
"""
function _base_component_type(::AbstractArray{LV}) where {LV<:AbstractLorentzVector}
return eltype(LV)
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 31 | include("perturbative_qed.jl")
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 887 |
struct PerturbativeQED <: AbstractModelDefinition end
QEDbase.fundamental_interaction_type(::PerturbativeQED) = :electromagnetic
"""
in_phase_space_dimension(proc::AbstractProcessDefinition, ::PerturbativeQED)
Return the number of degrees of freedom to determine the incoming phase space for processes in PerturbativeQED.
!!! note "Convention"
The current implementation only supports the case where two of the incoming particles collide head-on.
"""
function QEDbase.in_phase_space_dimension(
proc::AbstractProcessDefinition, ::PerturbativeQED
)
return 3 * number_incoming_particles(proc) - 4 - 1
end
function QEDbase.out_phase_space_dimension(
proc::AbstractProcessDefinition, ::PerturbativeQED
)
return 3 * number_outgoing_particles(proc) - 4
end
function Base.show(io::IO, ::PerturbativeQED)
print(io, "perturbative QED")
return nothing
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 2079 | """
Compton(
in_spin [= AllSpin()]
in_pol [= AllPol()]
out_spin [= AllSpin()]
out_pol [= AllPol()]
)
"""
struct Compton{InElectronSpin,InPhotonPol,OutElectronSpin,OutPhotonPol} <:
AbstractProcessDefinition where {
InElectronSpin<:AbstractSpin,
InPhotonPol<:AbstractPolarization,
OutElectronSpin<:AbstractSpin,
OutPhotonPol<:AbstractPolarization,
}
in_spin::InElectronSpin
in_pol::InPhotonPol
out_spin::OutElectronSpin
out_pol::OutPhotonPol
end
function Compton()
return Compton(AllSpin(), AllPol(), AllSpin(), AllPol())
end
function Compton(in_pol::AbstractPolarization)
return Compton(AllSpin(), in_pol, AllSpin(), AllPol())
end
function Compton(in_pol::AbstractPolarization, out_pol::AbstractPolarization)
return Compton(AllSpin(), in_pol, AllSpin(), out_pol)
end
_polarizations(proc::Compton) = (proc.in_pol, proc.out_pol)
_spins(proc::Compton) = (proc.in_spin, proc.out_spin)
_in_spin_and_pol(proc::Compton) = (proc.in_spin, proc.in_pol)
_out_spin_and_pol(proc::Compton) = (proc.out_spin, proc.out_pol)
function QEDprocesses.incoming_particles(::Compton)
return (Electron(), Photon())
end
function QEDprocesses.outgoing_particles(::Compton)
return (Electron(), Photon())
end
function _spin_or_pol(process::Compton, ::Electron, ::Incoming)
return process.in_spin
end
function _spin_or_pol(process::Compton, ::Electron, ::Outgoing)
return process.out_spin
end
function _spin_or_pol(process::Compton, ::Photon, ::Incoming)
return process.in_pol
end
function _spin_or_pol(process::Compton, ::Photon, ::Outgoing)
return process.out_pol
end
function Base.show(io::IO, ::Compton)
print(io, "one-photon Compton scattering")
return nothing
end
function Base.show(io::IO, ::MIME"text/plain", proc::Compton)
println(io, "one-photon Compton scattering")
println(io, " incoming: electron ($(proc.in_spin)), photon ($(proc.in_pol))")
println(io, " outgoing: electron ($(proc.out_spin)), photon ($(proc.out_pol))")
return nothing
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 5310 | #####
# Perturbative one-photon Compton scattering
# Implementation of the cross section interface
#####
function QEDbase._incident_flux(in_psp::InPhaseSpacePoint{<:Compton,<:PerturbativeQED})
return momentum(in_psp, Incoming(), 1) * momentum(in_psp, Incoming(), 2)
end
function QEDbase._matrix_element(psp::PhaseSpacePoint{<:Compton,PerturbativeQED})
in_ps = momenta(psp, Incoming())
out_ps = momenta(psp, Outgoing())
return _pert_compton_matrix_element(psp.proc, in_ps, out_ps)
end
"""
_averaging_norm(proc::Compton)
!!! note "Convention"
We average over the initial spins and pols, and sum over final.
"""
function QEDbase._averaging_norm(proc::Compton)
normalizations = multiplicity.(_in_spin_and_pol(proc))
return inv(prod(normalizations))
end
@inline function _all_onshell(psp::PhaseSpacePoint{<:Compton})
return @inbounds isapprox(
getMass2(momentum(psp, Incoming(), 1)), mass(incoming_particles(psp.proc)[1])^2
) &&
isapprox(
getMass2(momentum(psp, Incoming(), 2)), mass(incoming_particles(psp.proc)[2])^2
) &&
isapprox(
getMass2(momentum(psp, Outgoing(), 1)), mass(outgoing_particles(psp.proc)[1])^2
) &&
isapprox(
getMass2(momentum(psp, Outgoing(), 2)), mass(outgoing_particles(psp.proc)[2])^2
)
end
@inline function QEDbase._is_in_phasespace(
psp::PhaseSpacePoint{<:Compton,<:PerturbativeQED}
)
@inbounds if (
!isapprox(
momentum(psp, Incoming(), 1) + momentum(psp, Incoming(), 2),
momentum(psp, Outgoing(), 1) + momentum(psp, Outgoing(), 2),
)
)
return false
end
return _all_onshell(psp)
end
@inline function QEDbase._phase_space_factor(
psp::PhaseSpacePoint{<:Compton,PerturbativeQED}
)
in_ps = momenta(psp, Incoming())
out_ps = momenta(psp, Outgoing())
return _pert_compton_ps_fac(psp.ps_def, in_ps[2], out_ps[2])
end
#######
# Matrix elements
#######
@inline function _pert_compton_matrix_element(
proc::Compton, in_ps::NTuple{N,T}, out_ps::NTuple{M,T}
) where {N,M,T<:AbstractFourMomentum}
in_electron_mom = in_ps[1]
in_photon_mom = in_ps[2]
out_electron_mom = out_ps[1]
out_photon_mom = out_ps[2]
in_electron_state = base_state(Electron(), Incoming(), in_electron_mom, proc.in_spin)
in_photon_state = base_state(Photon(), Incoming(), in_photon_mom, proc.in_pol)
out_electron_state = base_state(Electron(), Outgoing(), out_electron_mom, proc.out_spin)
out_photon_state = base_state(Photon(), Outgoing(), out_photon_mom, proc.out_pol)
return _pert_compton_matrix_element(
in_electron_mom,
in_electron_state,
in_photon_mom,
in_photon_state,
out_electron_mom,
out_electron_state,
out_photon_mom,
out_photon_state,
)
end
function _pert_compton_matrix_element(
in_electron_mom::T,
in_electron_state,
in_photon_mom::T,
in_photon_state,
out_electron_mom::T,
out_electron_state,
out_photon_mom::T,
out_photon_state,
) where {T<:AbstractFourMomentum}
base_states_comb = Iterators.product(
QEDbase._as_svec(in_electron_state),
QEDbase._as_svec(in_photon_state),
QEDbase._as_svec(out_electron_state),
QEDbase._as_svec(out_photon_state),
)
matrix_elements = Vector{ComplexF64}()
sizehint!(matrix_elements, length(base_states_comb))
for (in_el, in_ph, out_el, out_ph) in base_states_comb
push!(
matrix_elements,
_pert_compton_matrix_element_single(
in_electron_mom,
in_el,
in_photon_mom,
in_ph,
out_electron_mom,
out_el,
out_photon_mom,
out_ph,
),
)
end
return matrix_elements
end
function _pert_compton_matrix_element_single(
in_electron_mom::T,
in_electron_state::BiSpinor,
in_photon_mom::T,
in_photon_state::SLorentzVector,
out_electron_mom::T,
out_electron_state::AdjointBiSpinor,
out_photon_mom::T,
out_photon_state::SLorentzVector,
) where {T<:AbstractFourMomentum}
in_ph_slashed = slashed(in_photon_state)
out_ph_slashed = slashed(out_photon_state)
prop1 = QEDcore._fermion_propagator(in_photon_mom + in_electron_mom, mass(Electron()))
prop2 = QEDcore._fermion_propagator(in_electron_mom - out_photon_mom, mass(Electron()))
# TODO: fermion propagator is not yet in QEDbase
diagram_1 =
out_electron_state *
(out_ph_slashed * (prop1 * (in_ph_slashed * in_electron_state)))
diagram_2 =
out_electron_state *
(in_ph_slashed * (prop2 * (out_ph_slashed * in_electron_state)))
result = diagram_1 + diagram_2
# TODO: find (preferably unitful) global provider for physical constants
# elementary charge
return ELEMENTARY_CHARGE_SQUARE * result
end
#######
# Phase space factors
#######
function _pert_compton_ps_fac(
in_ps_def::PhasespaceDefinition{inCS,ElectronRestFrame}, in_photon_mom, out_photon_mom
) where {inCS}
# TODO
omega = getE(in_photon_mom)
omega_prime = getE(out_photon_mom)
return omega_prime^2 / (16 * pi^2 * omega * mass(Electron()))
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 1645 | @inline function _pert_omega_prime(omega, cth; mass=1.0)
return omega / (1 + omega / mass * (1 - cth))
end
function generate_momenta(
proc::Compton,
model::PerturbativeQED,
in_ps_def::PhasespaceDefinition{SphericalCoordinateSystem,ElectronRestFrame},
in_ps::NTuple{N,T},
out_ps::NTuple{M,T},
) where {N,M,T<:Real}
return QEDbase._generate_momenta(proc, model, in_ps_def, in_ps, out_ps)
end
function QEDbase._generate_incoming_momenta(
proc::Compton,
model::PerturbativeQED,
in_ps_def::PhasespaceDefinition{SphericalCoordinateSystem,ElectronRestFrame},
in_ps::NTuple{N,T},
) where {N,T<:Real}
om = in_ps[1]
P = SFourMomentum(one(om), zero(om), zero(om), zero(om))
K = SFourMomentum(om, zero(om), zero(om), om)
return P, K
end
function QEDbase._generate_momenta(
proc::Compton,
model::PerturbativeQED,
in_ps_def::PhasespaceDefinition{SphericalCoordinateSystem,ElectronRestFrame},
in_ps::NTuple{N,T},
out_ps::NTuple{M,T},
) where {N,M,T<:Real}
omega = in_ps[1]
cth = out_ps[1]
phi = out_ps[2]
P, K, Pp, Kp = _generate_momenta_elab_sph(omega, cth, phi) # TODO: do this coord and frame dependent
in_moms = (P, K)
out_moms = (Pp, Kp)
return in_moms, out_moms
end
function _generate_momenta_elab_sph(om, cth, phi, m=1.0)
P = SFourMomentum(m, zero(m), zero(m), zero(m))
K = SFourMomentum(om, zero(om), zero(om), om)
omp = _pert_omega_prime(om, cth)
sth = sqrt(1 - cth^2)
sphi, cphi = sincos(phi)
Kp = SFourMomentum(omp, omp * sth * cphi, omp * sth * sphi, omp * cth)
Pp = P + K - Kp
return P, K, Pp, Kp
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 459 | function QEDbase._total_probability(in_psp::InPhaseSpacePoint{<:Compton,<:PerturbativeQED})
omega = getE(momentum(in_psp[Incoming(), 2]))
function func(x)
return unsafe_differential_probability(
PhaseSpacePoint(in_psp.proc, in_psp.model, in_psp.ps_def, (omega,), (x, 0.0))
)
end
tot_prob, _ = quadgk(func, -1, 1; rtol=sqrt(eps(omega)))
tot_prob *= 2 * pi # phi integration is trivial
return tot_prob
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 113 | using Test
using SafeTestsets
begin
# scattering processes
include("processes/run_process_test.jl")
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 218 |
@time @safetestset "general one photon compton" begin
include("one_photon_compton/process.jl")
end
@time @safetestset "perturbative one photon compton" begin
include("one_photon_compton/perturbative.jl")
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 993 |
"""
Klein-Nishina: differential cross section
source: Peskin, Schroeder. "Quantum field theory." (1995). eq: 5.91
note: we compute d sigma/ d Omega, and *not* d sigma/ d cos theta (the difference is a factor 2 pi)
"""
function _groundtruth_pert_compton_diffCS_spinsum_polsum(om, cth, mass)
prefac = ALPHA_SQUARE / 2
omp = QEDprocesses._pert_omega_prime(om, cth)
sth_sq = 1 - cth^2
return prefac * (omp / om)^2 * (omp / om + om / omp - sth_sq)
end
function _groundtruth_pert_compton_diffCS_spinsum_xpol(omega, ctheta, phi, mass)
om_prime = QEDprocesses._pert_omega_prime(omega, ctheta)
om_prime_over_om = om_prime / omega
return 0.5 * ALPHA_SQUARE / mass^2 *
om_prime_over_om^2 *
(om_prime_over_om + 1.0 / om_prime_over_om - 2 * (1 - ctheta^2) * cos(phi)^2)
end
function _groundtruth_pert_compton_diffCS_spinsum_ypol(omega, ctheta, phi, mass)
return _groundtruth_pert_compton_diffCS_spinsum_xpol(omega, ctheta, phi + pi / 2, mass)
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 6503 |
using QEDbase
using QEDcore
using QEDprocesses
using Random
using StaticArrays
using QuadGK
const RNG = MersenneTwister(77697185)
const ATOL = eps()
const RTOL = sqrt(eps())
include("groundtruths.jl")
const MODEL = PerturbativeQED()
const PS_DEF = PhasespaceDefinition(SphericalCoordinateSystem(), ElectronRestFrame())
const OMEGAS = (1e-6 * rand(RNG), 1e-3 * rand(RNG), rand(RNG), 1e3 * rand(RNG))
const COS_THETAS = [-1.0, 2 * rand(RNG) - 1, 0.0, 1.0]
const PHIS = [0, 2 * pi, rand(RNG) * 2 * pi]
@testset "pretty-printing" begin
buf = IOBuffer()
print(buf, MODEL)
@test String(take!(buf)) == "perturbative QED"
show(buf, MIME"text/plain"(), MODEL)
@test String(take!(buf)) == "perturbative QED"
end
@testset "perturbative kinematics" begin
PROC = Compton()
@testset "momentum generation" begin
@testset "$om, $cth, $phi" for (om, cth, phi) in
Iterators.product(OMEGAS, COS_THETAS, PHIS)
IN_COORDS = (om,)
OUT_COORDS = (cth, phi)
IN_PS, OUT_PS = QEDbase._generate_momenta(
PROC, MODEL, PS_DEF, IN_COORDS, OUT_COORDS
)
in_mom_square = getMass2.(IN_PS)
out_mom_square = getMass2.(OUT_PS)
in_masses = mass.(incoming_particles(PROC)) .^ 2
out_masses = mass.(outgoing_particles(PROC)) .^ 2
# we need a larger ATOL than eps() here because the error is accumulated over several additions
@test all(isapprox.(in_mom_square, in_masses, atol=4 * ATOL, rtol=RTOL))
@test all(isapprox.(out_mom_square, out_masses, atol=4 * ATOL, rtol=RTOL))
end
end
end
@testset "perturbative" begin
@testset "$omega" for omega in OMEGAS
@testset "differential cross section" begin
@testset "spin and pol summed" begin
PROC = Compton()
@testset "$cos_theta $phi" for (cos_theta, phi) in
Iterators.product(COS_THETAS, PHIS)
IN_COORDS = (omega,)
OUT_COORDS = (cos_theta, phi)
IN_PS, OUT_PS = QEDbase._generate_momenta(
PROC, MODEL, PS_DEF, IN_COORDS, OUT_COORDS
)
groundtruth = _groundtruth_pert_compton_diffCS_spinsum_polsum(
omega, cos_theta, 1.0
)
PSP = PhaseSpacePoint(PROC, MODEL, PS_DEF, IN_PS, OUT_PS)
test_val = unsafe_differential_cross_section(PSP)
@test isapprox(test_val, groundtruth, atol=ATOL, rtol=RTOL)
end
end
@testset "x-pol and spin summed" begin
PROC = Compton(PolX())
@testset "$cos_theta $phi" for (cos_theta, phi) in
Iterators.product(COS_THETAS, PHIS)
IN_COORDS = (omega,)
OUT_COORDS = (cos_theta, phi)
IN_PS, OUT_PS = QEDbase._generate_momenta(
PROC, MODEL, PS_DEF, IN_COORDS, OUT_COORDS
)
groundtruth = _groundtruth_pert_compton_diffCS_spinsum_xpol(
omega, cos_theta, phi, 1.0
)
PSP = PhaseSpacePoint(PROC, MODEL, PS_DEF, IN_PS, OUT_PS)
test_val = unsafe_differential_cross_section(PSP)
@test isapprox(test_val, groundtruth, atol=ATOL, rtol=RTOL)
end
end
@testset "y-pol and spin summed" begin
PROC = Compton(PolY())
@testset "$cos_theta $phi" for (cos_theta, phi) in
Iterators.product(COS_THETAS, PHIS)
IN_COORDS = (omega,)
OUT_COORDS = (cos_theta, phi)
IN_PS, OUT_PS = QEDbase._generate_momenta(
PROC, MODEL, PS_DEF, IN_COORDS, OUT_COORDS
)
groundtruth = _groundtruth_pert_compton_diffCS_spinsum_ypol(
omega, cos_theta, phi, 1.0
)
PSP = PhaseSpacePoint(PROC, MODEL, PS_DEF, IN_PS, OUT_PS)
test_val = unsafe_differential_cross_section(PSP)
@test isapprox(test_val, groundtruth, atol=ATOL, rtol=RTOL)
end
end
end
@testset "total cross section" begin
@testset "spin and pol summed" begin
PROC = Compton()
# Klein-Nishina: total cross section
function klein_nishina_total_cross_section(in_ps)
function func(x)
return unsafe_differential_cross_section(
PhaseSpacePoint(
Compton(), PerturbativeQED(), PS_DEF, in_ps, (x, 0.0)
),
)
end
res, err = quadgk(func, -1, 1)
# note: mul by 2pi instead of the phi-integration
return 2 * pi * res
end
IN_COORDS = (omega,)
groundtruth = klein_nishina_total_cross_section(IN_COORDS)
test_val = @inferred QEDprocesses.total_cross_section(
InPhaseSpacePoint(PROC, MODEL, PS_DEF, IN_COORDS)
)
@test isapprox(test_val, groundtruth, atol=ATOL, rtol=RTOL)
@testset "$cos_theta $phi" for (cos_theta, phi) in
Iterators.product(COS_THETAS, PHIS)
OUT_COORDS = (cos_theta, phi)
test_val = @inferred QEDprocesses.total_cross_section(
PhaseSpacePoint(PROC, MODEL, PS_DEF, IN_COORDS, OUT_COORDS)
)
@test isapprox(test_val, groundtruth, atol=ATOL, rtol=RTOL)
out_moms = momenta(
PhaseSpacePoint(PROC, MODEL, PS_DEF, IN_COORDS, OUT_COORDS),
Outgoing(),
)
@test_throws MethodError QEDprocesses.total_cross_section(
OutPhaseSpacePoint(PROC, MODEL, PS_DEF, out_moms)
)
end
end
end
end
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | code | 3972 | using QEDprocesses
using Random
using QEDbase
using QEDcore
POLS = [PolX(), PolY(), AllPol()]
SPINS = [SpinUp(), SpinDown(), AllSpin()]
POL_AND_SPIN_COMBINATIONS = Iterators.product(SPINS, POLS, SPINS, POLS)
POL_COMBINATIONS = Iterators.product(POLS, POLS)
BUF = IOBuffer()
@testset "constructor" begin
@testset "default" begin
proc = Compton()
@test QEDprocesses._spin_or_pol(proc, Photon(), Incoming()) == AllPol()
@test QEDprocesses._spin_or_pol(proc, Electron(), Incoming()) == AllSpin()
@test QEDprocesses._spin_or_pol(proc, Photon(), Outgoing()) == AllPol()
@test QEDprocesses._spin_or_pol(proc, Electron(), Outgoing()) == AllSpin()
print(BUF, proc)
@test String(take!(BUF)) == "one-photon Compton scattering"
show(BUF, MIME"text/plain"(), proc)
@test String(take!(BUF)) ==
"one-photon Compton scattering\n incoming: electron ($(AllSpin())), photon ($(AllPol()))\n outgoing: electron ($(AllSpin())), photon ($(AllPol()))\n"
end
@testset "in_pol" begin
@testset "$pol" for pol in POLS
proc = Compton(pol)
@test QEDprocesses._spin_or_pol(proc, Electron(), Incoming()) == AllSpin()
@test QEDprocesses._spin_or_pol(proc, Photon(), Incoming()) == pol
@test QEDprocesses._spin_or_pol(proc, Electron(), Outgoing()) == AllSpin()
@test QEDprocesses._spin_or_pol(proc, Photon(), Outgoing()) == AllPol()
print(BUF, proc)
@test String(take!(BUF)) == "one-photon Compton scattering"
show(BUF, MIME"text/plain"(), proc)
@test String(take!(BUF)) ==
"one-photon Compton scattering\n incoming: electron ($(AllSpin())), photon ($(pol))\n outgoing: electron ($(AllSpin())), photon ($(AllPol()))\n"
end
end
@testset "in_pol+out_pol" begin
@testset "$in_pol, $out_pol" for (in_pol, out_pol) in POL_COMBINATIONS
proc = Compton(in_pol, out_pol)
@test QEDprocesses._spin_or_pol(proc, Electron(), Incoming()) == AllSpin()
@test QEDprocesses._spin_or_pol(proc, Photon(), Incoming()) == in_pol
@test QEDprocesses._spin_or_pol(proc, Electron(), Outgoing()) == AllSpin()
@test QEDprocesses._spin_or_pol(proc, Photon(), Outgoing()) == out_pol
print(BUF, proc)
@test String(take!(BUF)) == "one-photon Compton scattering"
show(BUF, MIME"text/plain"(), proc)
@test String(take!(BUF)) ==
"one-photon Compton scattering\n incoming: electron ($(AllSpin())), photon ($(in_pol))\n outgoing: electron ($(AllSpin())), photon ($(out_pol))\n"
end
end
@testset "all spins+pols" begin
@testset "$in_spin, $in_pol, $out_spin, $out_pol" for (
in_spin, in_pol, out_spin, out_pol
) in POL_AND_SPIN_COMBINATIONS
proc = Compton(in_spin, in_pol, out_spin, out_pol)
@test QEDprocesses._spin_or_pol(proc, Electron(), Incoming()) == in_spin
@test QEDprocesses._spin_or_pol(proc, Photon(), Incoming()) == in_pol
@test QEDprocesses._spin_or_pol(proc, Electron(), Outgoing()) == out_spin
@test QEDprocesses._spin_or_pol(proc, Photon(), Outgoing()) == out_pol
print(BUF, proc)
@test String(take!(BUF)) == "one-photon Compton scattering"
show(BUF, MIME"text/plain"(), proc)
@test String(take!(BUF)) ==
"one-photon Compton scattering\n incoming: electron ($(in_spin)), photon ($(in_pol))\n outgoing: electron ($(out_spin)), photon ($(out_pol))\n"
end
end
end
@testset "particle content" begin
proc = Compton()
@test incoming_particles(proc) == (Electron(), Photon())
@test outgoing_particles(proc) == (Electron(), Photon())
@test number_incoming_particles(proc) == 2
@test number_outgoing_particles(proc) == 2
end
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | docs | 5313 | # Changelog
## Version 0.2.0
[diff since 0.1.0](https://github.com/QEDjl-project/QEDprocesses.jl/compare/v0.1.0...release-0.2.0)
This release is part of the restructuring processes of QED.jl (see https://github.com/QEDjl-project/QED.jl/issues/35 for details).
It is a breaking release, indicated by the bumped minor version because we don't have a major version for this yet.
### Breaking changes
This release moves the general purpose interfaces to [`QEDbase.jl`](https://github.com/QEDjl-project/QEDbase.jl) and the core functionality to the new package [`QEDcore.jl`](https://github.com/QEDjl-project/QEDcore.jl).
The purpose of this package is to provide implementations for specific models and processes, currently the 1-photon-Compton in perturbative QED.
### New features
- [#28](https://github.com/QEDjl-project/QEDprocesses.jl/pull/28): Add particle propagators.
- [#39](https://github.com/QEDjl-project/QEDprocesses.jl/pull/39): Coordinate-based implementation of diff cs and probabilities. (Superseded by later `PhaseSpacePoint` implementations, which can be constructed using coordinates)
- [#40](https://github.com/QEDjl-project/QEDprocesses.jl/pull/40): Add perturbative one-photon Compton implementation.
- [#51](https://github.com/QEDjl-project/QEDprocesses.jl/pull/51): Add initial `PhaseSpacePoint` definition.
- [#52](https://github.com/QEDjl-project/QEDprocesses.jl/pull/52): Add overloads in the process interface using `PhaseSpacePoint`s.
- [#54](https://github.com/QEDjl-project/QEDprocesses.jl/pull/54): Add phase space generation interface.
- [#59](https://github.com/QEDjl-project/QEDprocesses.jl/pull/59): Update process interface to use `PhaseSpacePoint`s instead of raw momenta. (Supersedes [#52](https://github.com/QEDjl-project/QEDprocesses.jl/pull/52))
- [#60](https://github.com/QEDjl-project/QEDprocesses.jl/pull/60): Add pretty-printing for some of the types. (`ParticleStateful`, `PhaseSpacePoint`, models, processes, coordinate systems, frames of reference, and phase space definitions)
- [#63](https://github.com/QEDjl-project/QEDprocesses.jl/pull/63): Improvement for the `PhaseSpacePoint` type, making it type stable and allowing in-place construction.
- [#68](https://github.com/QEDjl-project/QEDprocesses.jl/pull/68): Add an `InPhaseSpacePoint` definition allowing a phase space point to not contain particle states for the outgoing particles. This is useful in some interfaces that don't need the outgoing part of a phase space point.
### Maintenance
Besides the new features, this release contains some maintenance and minor changes and fixes. Many of these were temporarily necessary to maintain a working state during the restructuring.
- [#36](https://github.com/QEDjl-project/QEDprocesses.jl/pull/36): Remove custom registry from unit tests.
- [#37](https://github.com/QEDjl-project/QEDprocesses.jl/pull/37): Remove custom registry build step.
- [#38](https://github.com/QEDjl-project/QEDprocesses.jl/pull/38): Refactor differential cross section functionality.
- [#53](https://github.com/QEDjl-project/QEDprocesses.jl/pull/53): CompatHelper: StaticArrays compat version 1.
- [#61](https://github.com/QEDjl-project/QEDprocesses.jl/pull/61): CompatHelper: QuadGK compat version 2.
- [#62](https://github.com/QEDjl-project/QEDprocesses.jl/pull/62): Move test dependencies to the main `Project.toml`.
- [#71](https://github.com/QEDjl-project/QEDprocesses.jl/pull/71): Use Julia 1.10 in CI.
- [#74](https://github.com/QEDjl-project/QEDprocesses.jl/pull/74): (Re-)move interfaces that move to QEDbase.
- [#76](https://github.com/QEDjl-project/QEDprocesses.jl/pull/76): Refactor QED.jl package dependencies and namespaces.
- [#78](https://github.com/QEDjl-project/QEDprocesses.jl/pull/78): Use function implementations from QEDcore.
- [#80](https://github.com/QEDjl-project/QEDprocesses.jl/pull/80): Fix ambiguous function calls.
- [#81](https://github.com/QEDjl-project/QEDprocesses.jl/pull/81): Update dev version.
- [#82](https://github.com/QEDjl-project/QEDprocesses.jl/pull/82): Remove QEDbase compat entry.
- [#87](https://github.com/QEDjl-project/QEDprocesses.jl/pull/87): (Re-)move abstract cross section and probability implementations (move to QEDcore).
- [#89](https://github.com/QEDjl-project/QEDprocesses.jl/pull/89): Removes temporary dependencies to other dev versions that were necessary during restructuring.
## Version 0.1.0
This is the initial verison of QEDprocesses.
[Diff since inital commit](https://github.com/QEDjl-project/QEDprocesses.jl/compare/302274695d82225f4a810c252d6919839bc59fd7...release-v0.1.0)
[Full list of PRs](https://github.com/QEDjl-project/QEDprocesses.jl/milestone/2?closed=1)
### Highlights
- interface for scattering processes described by physical models (general properties, differential/total cross sections) https://github.com/QEDjl-project/QEDprocesses.jl/pull/11
- interface for abstract setups (general computation setup and process dedicated
setups) https://github.com/QEDjl-project/QEDprocesses.jl/pull/14
- setup ci for unit and integration testing https://github.com/QEDjl-project/QEDprocesses.jl/pull/5 https://github.com/QEDjl-project/QEDprocesses.jl/pull/7 https://github.com/QEDjl-project/QEDprocesses.jl/pull/21
- setup ci/cd for docs https://github.com/QEDjl-project/QEDprocesses.jl/pull/19
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | docs | 1428 | # QEDprocesses
[](https://qedjl-project.github.io/QEDprocesses.jl/stable)
[](https://qedjl-project.github.io/QEDprocesses.jl/dev)
[](https://github.com/invenia/BlueStyle)
## Installation
To install the current stable version of `QEDprocesses.jl` you may use the standard julia package manager within the julia REPL
```julia
julia> using Pkg
julia> Pkg.add("QEDprocesses")
```
or you use the Pkg prompt by hitting `]` within the Julia REPL and then type
```julia
(@v1.9) pkg> add QEDprocesses
```
To install the locally downloaded package on Windows, change to the parent directory and type within the Pkg prompt
```julia
(@v1.9) pkg> add ./QEDprocesses.jl
```
## Building the documentation locally
To build the documentation of `QEDprocesses.jl` locally, first clone this
repository. Then, you instantiate the documentation subpackage by hitting
```julia
julia --project=docs -e 'using Pkg; Pkg.instantiate(); Pkg.develop(PackageSpec(path=pwd()))'
```
in the root directory of this repository. Afterwards, the dokumentation can be
built by running
```julia
julia --project=docs --color=yes docs/make.jl
```
To access the documentation site, just open the file `docs/_build/index.html` in
your favorite browser.
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.2.0 | 237c8c644bd45c2288836fce5c1d59b6a4bf22a9 | docs | 201 | ```@meta
CurrentModule = QEDprocesses
```
# QEDprocesses
Documentation for [QEDprocesses](https://github.com/QEDjl-project/QEDprocesses.jl).
```@index
```
```@autodocs
Modules = [QEDprocesses]
```
| QEDprocesses | https://github.com/QEDjl-project/QEDprocesses.jl.git |
|
[
"MIT"
] | 0.0.1 | 24b4e9d52d34a2ada50fee99f1bcf37d2a2b5999 | code | 2737 | module SIGPROCFiles
export loadhdr
export loadtim
export loadfil
using PackedIntegers
struct InvalidFile <: Exception end
Base.showerror(io::IO, _::InvalidFile) = print(io, "This is not a valid SIGPROC file.")
str(s) = String(read(s, read(s, Int32)))
function loadhdr(f::AbstractString)
open(f) do s
loadhdr(s)
end
end
function loadhdr(s::IOStream)
meta = Dict()
magic = str(s)
magic != "HEADER_START" && throw(InvalidFile())
while true
key = str(s)
key == "HEADER_END" && break
typ = Dict(
:filename => String,
:telescope_id => Int32,
:telescope => String,
:machine_id => Int32,
:data_type => Int32,
:rawdatafile => String,
:source_name => String,
:barycentric => Int32,
:pulsarcentric => Int32,
:az_start => Float64,
:za_start => Float64,
:src_raj => Float64,
:src_dej => Float64,
:tstart => Float64,
:tsamp => Float64,
:nbits => Int32,
:nsamples => Int32,
:fch1 => Float64,
:foff => Float64,
:fchannel => Float64,
:nchans => Int32,
:nifs => Int32,
:refdm => Float64,
:flux => Float64,
:period => Float64,
:nbeams => Int32,
:ibeam => Int32,
:hdrlen => Int32,
:pb => Float64,
:ecc => Float64,
:asini => Float64,
:orig_hdrlen => Int32,
:new_hdrlen => Int32,
:sampsize => Int32,
:bandwidth => Float64,
:fbottom => Float64,
:ftop => Float64,
:obs_date => String,
:obs_time => String,
:signed => Int8,
:accel => Float64,
)[Symbol(key)]
meta[Symbol(key)] = (typ == String) ? str(s) : read(s, typ)
end
meta
end
function get(s, nbits)
data = reinterpret(Dict(
1 => UInt8,
2 => UInt8,
4 => UInt8,
8 => UInt8,
16 => UInt16,
32 => Float32,
)[nbits], read(s))
nbits ∈ [1 2 4] ? unpack(data; n=nbits) : data
end
function loadtim(f::AbstractString)
open(f) do s
loadtim(s)
end
end
function loadtim(s::IOStream)
meta = loadhdr(s)
nbits = meta[:nbits]
data = get(s, nbits)
meta, data
end
function loadfil(f::AbstractString)
open(f) do s
loadfil(s)
end
end
function loadfil(s::IOStream)
meta = loadhdr(s)
nbits = meta[:nbits]
data = get(s, nbits)
data = reshape(data, Int64(meta[:nchans]), :)
meta, data
end
end
| SIGPROCFiles | https://github.com/astrogewgaw/SIGPROCFiles.jl.git |
|
[
"MIT"
] | 0.0.1 | 24b4e9d52d34a2ada50fee99f1bcf37d2a2b5999 | code | 1926 | using Test
using SIGPROCFiles
@testset "SIGPROCFiles.jl" begin
@testset "Header data" begin
m = SIGPROCFiles.loadhdr("./data/test.fil")
@test m == Dict(
:az_start => -1.0,
:barycentric => 0,
:data_type => 0,
:fch1 => 1465.0,
:foff => -1.0,
:ibeam => 0,
:machine_id => 0,
:nbeams => 1,
:nbits => 8,
:nchans => 336,
:nifs => 1,
:pulsarcentric => 0,
:rawdatafile => "./small.fil",
:source_name => "src1",
:src_dej => 135752.112,
:src_raj => 122637.6361,
:telescope_id => 6,
:tsamp => 0.00126646875,
:tstart => 58682.620316710374,
:za_start => -1.0,
)
end
@testset "Filterbank data" begin
m, d = SIGPROCFiles.loadfil("./data/test_1bit.fil")
@test all(d[65, 101:110] .≈ [1, 1, 0, 0, 0, 0, 0, 0, 1, 1])
m, d = SIGPROCFiles.loadfil("./data/test_2bit.fil")
@test all(d[65, 101:110] .≈ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
m, d = SIGPROCFiles.loadfil("./data/test_4bit.fil")
@test all(d[65, 101:110] .≈ [7, 4, 3, 11, 6, 10, 10, 9, 6, 7])
m, d = SIGPROCFiles.loadfil("./data/test_8bit.fil")
@test all(d[65, 101:110] .≈ [121, 94, 94, 124, 151, 118, 132, 74, 112, 65])
m, d = SIGPROCFiles.loadfil("./data/test_32bit.fil")
@test all(d[65, 101:110] .≈ [1.166237, -0.84468514, 0.874816, 1.4028563, -0.98618776, -0.80890864, -1.6307002, 1.1306021, 0.50498164, -1.6316832])
end
@testset "Time series data" begin
for fname in ["test_i8.tim", "test_ui8.tim", "test_f32.tim"]
m, d = SIGPROCFiles.loadtim("./data/$fname")
@test all(d .≈ [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0])
end
end
end
| SIGPROCFiles | https://github.com/astrogewgaw/SIGPROCFiles.jl.git |
|
[
"MIT"
] | 0.0.1 | 24b4e9d52d34a2ada50fee99f1bcf37d2a2b5999 | docs | 1459 | <div align="center">
<h1><code>SIGPROCFiles</code></h1>
<h4><i>I/O for SIGPROC filterbank and time series files, in Julia.</i></h4>
<br/>
![License][license]
![GitHub Stars][stars]
[![Gitmoji Badge][gitmoji_badge]][gitmoji]
</div>
<div align="justify">
This package adds support to load and save [**`SIGPROC`**][sigproc] filterbank (`*.fil`) and time series (`*.tim`) files. These formats are used by the `SIGPROC` library, and are used and supported by a vast number of radio telescopes and libraries. Currently, the following features are already present, or planned for future releases:
- [x] Loading SIGPROC filterbank files.
- [x] Loading SIGPROC time series data.
- [ ] Saving SIGPROC filterbank files.
- [ ] Saving SIGPROC time series data.
- [x] Support for 8/32-bit filterbank and time series data.
- [x] Support for 1/2/4-bit filterbank data (via [**`PackedIntegers.jl`**][PI]).
- [ ] Memory mapping, for files too large to be read directly into memory.
Install it by typing and running:
```julia
] add SIGPROCFiles
```
</div>
[gitmoji]: https://gitmoji.dev
[sigproc]: https://sigproc.sourceforge.net
[PI]: https://github.com/astrogewgaw/PackedIntegers.jl
[gitmoji_badge]: https://img.shields.io/badge/gitmoji-%20😜%20😍-FFDD67.svg?style=for-the-badge
[stars]: https://img.shields.io/github/stars/astrogewgaw/SIGPROCFiles.jl?style=for-the-badge
[license]: https://img.shields.io/github/license/astrogewgaw/SIGPROCFiles.jl?style=for-the-badge
| SIGPROCFiles | https://github.com/astrogewgaw/SIGPROCFiles.jl.git |
|
[
"MIT"
] | 0.2.1 | b66ec2223581d1c9bf99d7dd60ee0069a9a42d24 | code | 3247 | module PlayingCards52
using Base: Symbol
using LinearAlgebra, Random
import LinearAlgebra: rank
import Base: string, show
export Card, Suit, suit, rank, index, color
const suit_list = [:clubs, :diamonds, :hearts, :spades]
const suit_set = Set(suit_list)
const suit_strings = ["♣", "♢", "♡", "♠"]
const suit_number =
Dict{Symbol,Int}(:clubs => 1, :diamonds => 2, :hearts => 3, :spades => 4)
const rank_list = "A23456789TJQK"
"""
Suit
This a `DataType` representing the suit of a card.
* `Suit(j::Integer)` where `j ∈ {1,2,3,4}` creates a `♣`, `♢`, `♡`, or `♠`, respectively.
* `Suit(sym::Symbol)` where `sym` is one of `:clubs`, `:diamonds`, `:hearts`, or `:spades`.
"""
struct Suit
s::Int8
function Suit(s::Integer)
@assert 1 <= s <= 4 "Suit index must be 1, 2, 3, or 4"
new(s)
end
function Suit(s::Symbol)
chk = [s == c for c in suit_list]
if !any(chk)
error("No such suit $s.\nUse one of these: $suit_list.")
end
s = findfirst(chk)
new(s)
end
end
"""
Card(suit,rank)
Create a new playing card.
* `suit` is one of `:clubs`, `:diamonds`, `:hearts`, `:spades`, or one of `♣`, `♢`, `♡`, `♠`.
* `rank` is an integer from `1` (for Ace) to `13` (for King).
"""
struct Card
suit::Suit
rnk::Int8
function Card(s::Symbol, r::Integer)
@assert 1 <= r <= 13 "Card rank must be between 1 (ace) and 13 (king)"
new(Suit(s), r)
end
function Card(s::Suit, r::Integer)
@assert 1 <= r <= 13 "Card rank must be between 1 (ace) and 13 (king)"
new(s, r)
end
end
"""
Card(idx::Integer)
Where `idx` is from 1 to 52, returns a `Card` from the
Ace of Clubs to the King of Spades.
"""
function Card(idx::Integer)::Card
@assert 1 <= idx <= 52 "Card index must be from 1 to 52"
si = Int.(ceil(idx / 13)) # suit index
ri = idx - 13 * (si - 1)
Card(suit_list[si], ri)
end
"""
Card()
Return a random card.
"""
Card()::Card = Card(mod(rand(Int), 52) + 1)
"""
index(C::Card)
Return an index `k` (with `1 <= k <= 52`)
such that `C == Card(k)`.
"""
function index(C::Card)::Int
s = suit(C).s
return 13 * (s - 1) + rank(C)
end
"""
rank(C::Card)
Returns a number from `1` (for Ace) to `13` (for King)
"""
rank(c::Card)::Int8 = c.rnk
"""
suit(C::Card)
Return the suit of the card `C`. It is one of the following:
`♣`, `♢`, `♡`, or `♠`.
"""
suit(c::Card)::Suit = c.suit
string(s::Suit)::String = @inbounds suit_strings[s.s]
"""
color(C::Card)::Symbol
Return the color of the card as one of `:black` or `:red`.
"""
function color(C::Card)::Symbol
return color(suit(C))
end
"""
color(s::Suit)::Symbol
Return `:black` for `♣` or `♠` and `:red` for `♢` or `♡`.
"""
function color(s::Suit)::Symbol
s = s.s
if s == 1 || s == 4
return :black
end
return :red
end
function string(C::Card)::String
@inbounds return rank_list[rank(C)] * string(suit(C))
end
function show(io::IO, C::Card)
print(io, string(C))
end
function show(io::IO, S::Suit)
print(io, string(S))
end
include("ordering.jl")
include("deck.jl")
include("shuffle.jl")
include("input.jl")
include("unicode.jl")
include("card_names.jl")
end # module
| PlayingCards52 | https://github.com/scheinerman/PlayingCards52.jl.git |
|
[
"MIT"
] | 0.2.1 | b66ec2223581d1c9bf99d7dd60ee0069a9a42d24 | code | 514 | export name
const rank_names = [
"ace"
"two"
"three"
"four"
"five"
"six"
"seven"
"eight"
"nine"
"ten"
"jack"
"queen"
"king"
]
const suit_names = [
"clubs"
"diamonds"
"hearts"
"spades"
]
"""
name(C::Card)::String
Return the long-form name of the card.
```julia
julia> c = Card(:diamonds, 10)
T♢
julia> name(c)
"ten of diamonds"
```
"""
function name(c::Card)::String
@inbounds rank_names[rank(c)] * " of " * suit_names[suit(c).s]
end
| PlayingCards52 | https://github.com/scheinerman/PlayingCards52.jl.git |
|
[
"MIT"
] | 0.2.1 | b66ec2223581d1c9bf99d7dd60ee0069a9a42d24 | code | 816 | export deck, print_deck
"""
deck()
Returns a 52-long list of all cards in random order.
Use `deck(false)` to get a list in new-box order (from Ace of Clubs
to King of Spades).
"""
function deck(shuffled::Bool = true)::Vector{Card}
idx = collect(1:52)
if shuffled
idx = randperm(52)
end
result = Card.(idx)
end
"""
print_deck(d)
Here `d` is a 52-long list of `Card`s
(that is, a full deck). Print `d` in four lines
with 13 cards per line.
"""
function print_deck(d::Vector{Card})
@assert length(d) == 52 "You're not playing with a full deck"
for j = 1:4
a = 13 * (j - 1) + 1
for k = a:a+12
print(d[k])
if k < a + 12
print(" ")
else
println()
end
end
end
end
| PlayingCards52 | https://github.com/scheinerman/PlayingCards52.jl.git |
|
[
"MIT"
] | 0.2.1 | b66ec2223581d1c9bf99d7dd60ee0069a9a42d24 | code | 1079 | # handy ways to specify cards
export ♣, ♢, ♡, ♠, ♦, ♥
import Base: (*)
const ♣ = Suit(:clubs)
const ♢ = Suit(:diamonds)
const ♡ = Suit(:hearts)
const ♠ = Suit(:spades)
const ♦ = ♢
const ♥ = ♡
(*)(r::Int, s::Suit) = Card(s, r)
export A♣, T♣, J♣, Q♣, K♣
const A♣ = Card(♣, 1)
const T♣ = Card(♣, 10)
const J♣ = Card(♣, 11)
const Q♣ = Card(♣, 12)
const K♣ = Card(♣, 13)
export A♢, T♢, J♢, Q♢, K♢
export A♦, T♦, J♦, Q♦, K♦
const A♦ = Card(♢, 1)
const T♦ = Card(♢, 10)
const J♦ = Card(♢, 11)
const Q♦ = Card(♢, 12)
const K♦ = Card(♢, 13)
const A♢ = Card(♢, 1)
const T♢ = Card(♢, 10)
const J♢ = Card(♢, 11)
const Q♢ = Card(♢, 12)
const K♢ = Card(♢, 13)
export A♡, T♡, J♡, Q♡, K♡
export A♥, T♥, J♥, Q♥, K♥
const A♡ = Card(♡, 1)
const T♡ = Card(♡, 10)
const J♡ = Card(♡, 11)
const Q♡ = Card(♡, 12)
const K♡ = Card(♡, 13)
const A♥ = Card(♡, 1)
const T♥ = Card(♡, 10)
const J♥ = Card(♡, 11)
const Q♥ = Card(♡, 12)
const K♥ = Card(♡, 13)
export A♠, T♠, J♠, Q♠, K♠
const A♠ = Card(♠, 1)
const T♠ = Card(♠, 10)
const J♠ = Card(♠, 11)
const Q♠ = Card(♠, 12)
const K♠ = Card(♠, 13)
| PlayingCards52 | https://github.com/scheinerman/PlayingCards52.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.