licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 11019 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior.Gherkin
"""
The location of a step definition (the Julia code of a test) is a filename and a line number.
"""
struct StepDefinitionLocation
filename::String
lineno::Int
end
"Thrown if there is no step definition matching a scenario step."
struct NoMatchingStepDefinition <: Exception end
"Thrown when more than one step definition matches a scenario step."
struct NonUniqueStepDefinition <: Exception
locations::Vector{StepDefinitionLocation}
end
"A step definition matcher takes a scenario step and finds a matching step definition to execute."
abstract type StepDefinitionMatcher end
# A StepDefinitionMatcher should define a method
# findstepdefinition(::StepDefinitionMatcher, ::Gherkin.ScenarioStep)
#
# Matching utilities
#
"""
symbolwithdefault(s::AbstractString) :: Symbol
If the string `s` is empty, then it returns the `:String` symbol. Otherwise
it returns `Symbol(s)`.
This function simply encapsulates the fact that an empty parameter `{}` in a
step defaults to the `String` type.
"""
function symbolwithdefault(s::AbstractString) :: Symbol
if s == ""
:String
else
Symbol(s)
end
end
function makedescriptionregex(s::String) :: Tuple{Regex, AbstractVector{Symbol}}
variablematches = eachmatch(r"{([^{}]*)}", s)
# The variables here are the variable types if the step has parameters.
# So if the step is
# Given some value {Int} and some text {String}
# the `variables` will hold [:Int, :String]
variables = [symbolwithdefault(m[1]) for m in variablematches]
# Escaping the string here so that characters that have meaning in a regular expressios
# is treated as their actual character, instead of as regular expression characters.
# The escaped characters are the PCRE metacharacters.
escaped_s = escape_string(s, "\$^()|.[]?*+")
# Replace variables like {foo} with a regex expression for that
# variable like (?<foo>.*)
regex_s = replace(escaped_s, r"{([^{}]*)}" => s"(.*)")
Regex("^$(regex_s)\$"), variables
end
"A step definition has a description, which is used to find it, a function to execute, and a location."
struct StepDefinition
description::String
descriptionregex::Regex
definition::Function
location::StepDefinitionLocation
variabletypes::Vector{Symbol}
function StepDefinition(description::String, definition::Function, location::StepDefinitionLocation)
descriptionregex, variables = makedescriptionregex(description)
new(description, descriptionregex, definition, location, variables)
end
end
struct StepDefinitionMatch
stepdefinition::StepDefinition
variables::Vector{Any}
StepDefinitionMatch(s::StepDefinition) = new(s, Any[])
function StepDefinitionMatch(s::StepDefinition, variables::AbstractArray{<:Any})
new(s, variables)
end
end
"""
The context in which a step definition executes. This context is used to share data between
different step definitions. It is created newly for each scenario. Thus, two scenarios cannot share
data.
"""
mutable struct StepDefinitionContext
storage::Dict{Symbol, Any}
datatable::Gherkin.DataTable
StepDefinitionContext() = new(Dict{Symbol, Any}(), Gherkin.DataTable())
end
"Find a variable value given a symbol name."
Base.getindex(context::StepDefinitionContext, sym::Symbol) = context.storage[sym]
"Set a variable value given a symbol name and a value."
Base.setindex!(context::StepDefinitionContext, value::Any, sym::Symbol) = context.storage[sym] = value
"Check for a mapping for a given key"
Base.haskey(context::StepDefinitionContext, sym::Symbol) = haskey(context.storage, sym)
#
# Global state
#
currentdefinitions = Vector{StepDefinition}()
currentfilename = ""
#
# Step definition macros
#
function step_definition_(definition::Expr, description::String)
# The step definition function takes a context and executes a bit of code supplied by the
# test writer. The bit of code is in $definition.
definitionfunction = :((context, vars) -> $definition(context, vars...))
descriptionregex = makedescriptionregex(description)
quote
# Push a given step definition to the global state so it can be found by the
# `StepDefinitionMatcher`.
push!(currentdefinitions,
StepDefinition(
$description,
(context, vars) -> begin
try
# Escape the step definition code so it gets the proper scope.
$(esc(definitionfunction))(context, vars)
# Any step definition that does not throw an exception is successful.
SuccessfulStepExecution()
catch ex
# StepAssertFailures are turned into a failed result here, but all other exceptions
# are propagated.
if ex isa StepAssertFailure
StepFailed(ex.assertion, ex.evaluated)
else
rethrow()
end
end
end,
StepDefinitionLocation(currentfilename, 0)))
end
end
"Provide a more user friendly @given macro for a step definition."
macro given(description, definition)
step_definition_(description, definition)
end
"Provide a more user friendly @when macro for a step definition."
macro when(description, definition)
step_definition_(description, definition)
end
"Provide a more user friendly @then macro for a step definition."
macro then(description, definition)
step_definition_(description, definition)
end
#
# Step Definition Matcher implementation
#
"""
Finds step definitions defined in a Julia file with the @given, @when, @then macros defined above.
Takes a source text as input and reads the code defined in it.
"""
struct FromMacroStepDefinitionMatcher <: StepDefinitionMatcher
stepdefinitions::Vector{StepDefinition}
filename::String
function FromMacroStepDefinitionMatcher(source::AbstractString; filename::String="<no filename>")
global currentdefinitions
global currentfilename
currentfilename = filename
# Read all step definitions as Julia code.
include_string(Main, source, filename)
# Save the step definitions found in the global variable `currentdefinitions` into a local
# variable, so that we can clear the global state and read another file.
mydefinitions = currentdefinitions
this = new(mydefinitions, filename)
currentdefinitions = Vector{StepDefinition}()
this
end
end
"""
converttypes(typesymbol::Symbol, value) :: Any
Convert `value` to the type named by `typesymbol`.
This is necessary because `Number` types are converted from strings to its primitive
type using the `parse` method, while other types are converted from strings using the
`convert` method.
# Example
```julia-repl
julia> converttypes(:Int, "123")
123
```
"""
function converttypes(typesymbol::Symbol, value) :: Any
t = eval(typesymbol)
if t <: Number
parse(t, value)
else
convert(t, value)
end
end
function matchdefinition(stepdefinition::StepDefinition, description::String) :: Union{StepDefinitionMatch,Nothing}
m = match(stepdefinition.descriptionregex, description)
if m !== nothing
variablestrings = String[String(x) for x in m.captures]
# Combine the parameter values captured in m with their types, that
# we have from the stepdefinition.
varswithtypes = zip(stepdefinition.variabletypes, variablestrings)
# Convert each parameter value to its expected type.
# Example: if `varswithtypes = [(:Int, "17"), (:Bool, "true")]`
# then `variables` will be `[17, true]`.
variables = [converttypes(typesymbol, value)
for (typesymbol, value) in varswithtypes]
StepDefinitionMatch(stepdefinition, variables)
else
nothing
end
end
"""
findstepdefinition(matcher::FromMacroStepDefinitionMatcher, step::Gherkin.ScenarioStep)
Find a step definition that has a description that matches the provided scenario step.
If no such step definition is found, throw a `NoMatchingStepDefinition`.
If more than one such step definition is found, throw a `NonUniqueStepDefinition`.
"""
function findstepdefinition(matcher::FromMacroStepDefinitionMatcher, step::Gherkin.ScenarioStep) :: StepDefinitionMatch
allsteps = map(x -> matchdefinition(x, step.text), matcher.stepdefinitions)
matches = filter(x -> x !== nothing, allsteps)
if isempty(matches)
throw(NoMatchingStepDefinition())
end
if length(matches) > 1
locations = map(m -> StepDefinitionLocation(matcher.filename, 0),
matches)
throw(NonUniqueStepDefinition(locations))
end
matches[1]
end
#
# Composite matcher
#
"""
Find step definitions from multiple other step definition matchers.
"""
mutable struct CompositeStepDefinitionMatcher <: StepDefinitionMatcher
matchers::Vector{StepDefinitionMatcher}
CompositeStepDefinitionMatcher(matchers...) = new([matchers...])
end
function findstepdefinition(composite::CompositeStepDefinitionMatcher, step::Gherkin.ScenarioStep)
matching = StepDefinitionMatch[]
nonuniquesfound = StepDefinitionLocation[]
# Recursively call `findstepdefinition(...)` on all sub-matchers.
# When they throw a `NonUniqueStepDefinition`, record the location so it can be shown to the
# user where the step definitions are.
# Ignore `NonUniqueStepDefinition` exceptions, as normally all but one of the matchers will
# throw it.
for matcher in composite.matchers
try
stepdefinitionmatch = findstepdefinition(matcher, step)
push!(matching, stepdefinitionmatch)
catch ex
if ex isa NonUniqueStepDefinition
append!(nonuniquesfound, ex.locations)
end
end
end
if length(matching) > 1 || !isempty(nonuniquesfound)
locations = vcat(nonuniquesfound, [d.stepdefinition.location for d in matching])
throw(NonUniqueStepDefinition(locations))
end
if isempty(matching)
throw(NoMatchingStepDefinition())
end
matching[1]
end
function addmatcher!(composite::CompositeStepDefinitionMatcher, matcher::StepDefinitionMatcher)
push!(composite.matchers, matcher)
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 2239 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TerseRealTimePresenter will present scenario steps only when a Scenario fails.
Otherwise it will only present scenario descriptions.
TerseRealTimePresenter(inner::RealTimePresenter)
The `inner` presenter is used to actually print the scenarios.
"""
mutable struct TerseRealTimePresenter <: RealTimePresenter
inner::RealTimePresenter
currentsteps::AbstractArray{Gherkin.ScenarioStep}
TerseRealTimePresenter(inner::RealTimePresenter) = new(inner, Gherkin.ScenarioStep[])
end
present(terse::TerseRealTimePresenter, feature::Gherkin.Feature) = present(terse.inner, feature)
function present(terse::TerseRealTimePresenter, scenario::Scenario)
terse.currentsteps = Gherkin.ScenarioStep[]
present(terse.inner, scenario)
end
present(terse::TerseRealTimePresenter, scenario::Scenario, ::ScenarioResult) = nothing
# Since the terse presenter shall not print any scenario steps unless they fail,
# we wait until we have a result to determine if it should be presenter or not.
# Therefore, we do nothing until we have a result.
present(::TerseRealTimePresenter, ::Gherkin.ScenarioStep) = nothing
present(terse::TerseRealTimePresenter, step::Gherkin.ScenarioStep, ::SuccessfulStepExecution) = push!(terse.currentsteps, step)
present(terse::TerseRealTimePresenter, step::Gherkin.ScenarioStep, result::SkippedStep) = present(terse.inner, step, result)
# This is a catch-all for failed steps.
function present(terse::TerseRealTimePresenter, step::Gherkin.ScenarioStep, result::StepExecutionResult)
for step in terse.currentsteps
present(terse.inner, step, SuccessfulStepExecution())
end
present(terse.inner, step, result)
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 6369 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior
using Behavior.Gherkin
@testset "Asserts " begin
@testset "Assert failure; Assert is 1 == 2; Failure has human readable string 1 == 2" begin
isfailure = false
try
@expect 1 == 2
catch ex
@test "1 == 2" == ex.assertion
@test "1 == 2" == ex.evaluated
isfailure = true
end
@test isfailure
end
@testset "Assert is x == y; Assertion string is 1 == 2" begin
isfailure = false
try
x = 1
y = 2
@expect x == y
catch ex
@test "x == y" == ex.assertion
@test "1 == 2" == ex.evaluated
isfailure = true
end
@test isfailure
end
@testset "Assert is x === y; Assertion string is 1 === 2" begin
isfailure = false
try
x = 1
y = 2
@expect x === y
catch ex
@test "x === y" == ex.assertion
@test "1 === 2" == ex.evaluated
isfailure = true
end
@test isfailure
end
@testset "Assert is x != y; Assertion string is 1 != 1" begin
isfailure = false
try
x = 1
y = 1
@expect x != y
catch ex
@test "x != y" == ex.assertion
@test "1 != 1" == ex.evaluated
isfailure = true
end
@test isfailure
end
@testset "Assert is x !== y; Assertion string is 1 !== 1" begin
isfailure = false
try
x = 1
y = 1
@expect x !== y
catch ex
@test "x !== y" == ex.assertion
@test "1 !== 1" == ex.evaluated
isfailure = true
end
@test isfailure
end
@testset "Assert failure in included file; Assert is 1 == 2; Failure has human readable string 1 == 2" begin
matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context
@expect 1 == 2
end
""")
given = Gherkin.Given("some precondition")
context = Behavior.StepDefinitionContext()
m = Behavior.findstepdefinition(matcher, given)
args = Dict{Symbol, Any}()
stepfailed = m.stepdefinition.definition(context, args)
@test stepfailed.assertion == "1 == 2"
@test stepfailed.evaluated == "1 == 2"
end
@testset "Assert only one variable which contains true" begin
matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context
y = true
@expect y
end
""")
given = Gherkin.Given("some precondition")
context = Behavior.StepDefinitionContext()
m = Behavior.findstepdefinition(matcher, given)
args = Dict{Symbol, Any}()
stepAccomplished = m.stepdefinition.definition(context, args)
@test typeof(stepAccomplished) == Behavior.SuccessfulStepExecution
end
@testset "Assert only a function which result is true" begin
matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context
@expect all([true, true])
end
""")
given = Gherkin.Given("some precondition")
context = Behavior.StepDefinitionContext()
m = Behavior.findstepdefinition(matcher, given)
args = Dict{Symbol, Any}()
stepAccomplished = m.stepdefinition.definition(context, args)
@test typeof(stepAccomplished) == Behavior.SuccessfulStepExecution
end
@testset "Assert failure x == y in included file; Assert is 1 == 2; Failure has human readable string 1 == 2" begin
matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context
x = 1
y = 2
@expect x == y
end
""")
given = Gherkin.Given("some precondition")
context = Behavior.StepDefinitionContext()
m = Behavior.findstepdefinition(matcher, given)
args = Dict{Symbol, Any}()
stepfailed = m.stepdefinition.definition(context, args)
@test stepfailed.assertion == "x == y"
@test stepfailed.evaluated == "1 == 2"
end
@testset "Assert failure in included file; Assert is isempty([1]); Failure has human readable string isempty([1])" begin
matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context
@expect isempty([1])
end
""")
given = Gherkin.Given("some precondition")
context = Behavior.StepDefinitionContext()
m = Behavior.findstepdefinition(matcher, given)
args = Dict{Symbol, Any}()
stepfailed = m.stepdefinition.definition(context, args)
@test stepfailed.assertion == "isempty([1])"
@test stepfailed.evaluated == ""
end
@testset "Fail assertion used in step, Step is StepFailed with assertion 'Some reason'" begin
matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context
@fail "Some reason"
end
""")
given = Gherkin.Given("some precondition")
context = Behavior.StepDefinitionContext()
m = Behavior.findstepdefinition(matcher, given)
args = Dict{Symbol, Any}()
stepfailed = m.stepdefinition.definition(context, args)
@test stepfailed.assertion == "Some reason"
end
end
| Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 15081 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior:
Engine, ExecutorEngine, QuietRealTimePresenter, FromMacroStepDefinitionMatcher,
finish, issuccess, findstepdefinition, NoMatchingStepDefinition, runfeatures!,
Driver, readstepdefinitions!, OSAbstraction, StepDefinitionMatcher
using Behavior.Gherkin: Feature, FeatureHeader, Scenario, Given, ParseOptions
import Behavior: addmatcher!, findfileswithextension, readfile, runfeature!,
issuccess, finish, executionenvironment
@testset "Engine " begin
# Beware: This test actually exercises far too much of the code. It should be isolated to
# `Engine`` only.
@testset "Run a successful feature; Result is successful" begin
# Arrange
engine = ExecutorEngine(QuietRealTimePresenter())
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("successful step") do context end
""")
addmatcher!(engine, matcher)
successfulscenario = Scenario("", String[], ScenarioStep[Given("successful step")])
feature = Feature(FeatureHeader("", [], []), [successfulscenario])
# Act
runfeature!(engine, feature)
# Assert
result = finish(engine)
@test issuccess(result)
end
@testset "Run a failing feature; Result is not successful" begin
# Arrange
engine = ExecutorEngine(QuietRealTimePresenter())
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("failing step") do context
@expect 1 == 2
end
""")
addmatcher!(engine, matcher)
failingscenario = Scenario("", String[], ScenarioStep[Given("failing step")])
feature = Feature(FeatureHeader("", [], []), [failingscenario])
# Act
runfeature!(engine, feature)
# Assert
result = finish(engine)
@test !issuccess(result)
end
@testset "Run a failing and a successful feature; Result is not successful" begin
# Arrange
engine = ExecutorEngine(QuietRealTimePresenter())
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("failing step") do context
@expect 1 == 2
end
@given("successful step") do context end
""")
addmatcher!(engine, matcher)
failingscenario = Scenario("", String[], ScenarioStep[Given("failing step")])
successfulscenario = Scenario("", String[], ScenarioStep[Given("successful step")])
feature1 = Feature(FeatureHeader("fails", [], []), [failingscenario])
feature2 = Feature(FeatureHeader("succeeds", [], []), [successfulscenario])
# Act
runfeature!(engine, feature1)
runfeature!(engine, feature2)
# Assert
result = finish(engine)
@test !issuccess(result)
end
end
struct FakeResultAccumulator
success::Bool
end
issuccess(r::FakeResultAccumulator) = r.success
mutable struct FakeEngine <: Engine
matchers::Vector{StepDefinitionMatcher}
features::Vector{Feature}
accumulator::FakeResultAccumulator
errors::Vector{Gherkin.BadParseResult{Feature}}
FakeEngine(; finishresult=FakeResultAccumulator(true)) = new([], [], finishresult, [])
end
addmatcher!(engine::FakeEngine, m::StepDefinitionMatcher) = push!(engine.matchers, m)
runfeature!(engine::FakeEngine, feature::Feature, _keepgoing::Bool) = push!(engine.features, feature)
runfeature!(engine::FakeEngine, result::Gherkin.OKParseResult{Feature}, featurefile::String, keepgoing::Bool) = runfeature!(engine, result.value, keepgoing)
runfeature!(engine::FakeEngine, result::Gherkin.BadParseResult{Feature}, featurefile::String, _keepgoing::Bool) = push!(engine.errors, result)
finish(engine::FakeEngine) = engine.accumulator
executionenvironment(::FakeEngine) = Behavior.NoExecutionEnvironment()
struct FakeOSAbstraction <: OSAbstraction
fileswithext::Dict{String, Vector{String}}
filecontents::Dict{String, String}
findfileswithextension_args::Vector{Pair{String, String}}
FakeOSAbstraction(; fileswithext::Vector{Pair{String, Vector{String}}} = [],
filecontents::Dict{String, String} = Dict()) = new(Dict(fileswithext), filecontents, [])
end
function findfileswithextension(os::FakeOSAbstraction, path::String, extension::String)
push!(os.findfileswithextension_args, Pair(path, extension))
os.fileswithext[extension]
end
readfile(os::FakeOSAbstraction, path::String) = os.filecontents[path]
@testset "Driver " begin
@testset "Finding step definitions; One definition found; The engine has one matcher" begin
# Arrange
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".jl" => ["somepath/file.jl"]],
filecontents = Dict("somepath/file.jl" => ""))
driver = Driver(osal, engine)
# Act
readstepdefinitions!(driver, "somepath")
# Assert
@test length(engine.matchers) == 1
end
@testset "Finding step definitions; Two definitions found; The engine has two matchers" begin
# Arrange
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".jl" => ["somepath/file.jl", "somepath/file2.jl"]],
filecontents = Dict("somepath/file.jl" => "",
"somepath/file2.jl" => ""))
driver = Driver(osal, engine)
# Act
readstepdefinitions!(driver, "somepath")
# Assert
@test length(engine.matchers) == 2
end
@testset "Finding step definitions; Driver searches for .jl files" begin
# Arrange
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".jl" => ["somepath/file.jl", "somepath/file2.jl"]],
filecontents = Dict("somepath/file.jl" => "",
"somepath/file2.jl" => ""))
driver = Driver(osal, engine)
# Act
readstepdefinitions!(driver, "somepath")
# Assert
@test osal.findfileswithextension_args[1][2] == ".jl"
end
@testset "Finding step definitions; Step path is features/steps; Driver searches features/steps" begin
# Arrange
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".jl" => ["somepath/file.jl", "somepath/file2.jl"]],
filecontents = Dict("somepath/file.jl" => "",
"somepath/file2.jl" => ""))
driver = Driver(osal, engine)
# Act
readstepdefinitions!(driver, "features/steps")
# Assert
@test osal.findfileswithextension_args[1][1] == "features/steps"
end
@testset "Finding step definitions; Step path is features/othersteps; Driver search features/othersteps" begin
# Arrange
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".jl" => ["somepath/file.jl", "somepath/file2.jl"]],
filecontents = Dict("somepath/file.jl" => "",
"somepath/file2.jl" => ""))
driver = Driver(osal, engine)
# Act
readstepdefinitions!(driver, "features/othersteps")
# Assert
@test osal.findfileswithextension_args[1][1] == "features/othersteps"
end
@testset "Reading step definitions; Step definition has successful scenario step; The matcher can find that step" begin
# Arrange
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".jl" => ["features/steps/file.jl"]],
filecontents = Dict("features/steps/file.jl" => """
using Behavior
@given("successful step") do context end
"""))
driver = Driver(osal, engine)
successfulstep = Given("successful step")
# Act
readstepdefinitions!(driver, "features/othersteps")
# Assert
# This method throws if no such step definition was found.
findstepdefinition(engine.matchers[1], successfulstep)
end
@testset "Reading step definitions; Step definition has no scenario step; Matcher cannot find that senario step" begin
# Arrange
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".jl" => ["features/steps/file.jl"]],
filecontents = Dict("features/steps/file.jl" => """
using Behavior
"""))
driver = Driver(osal, engine)
successfulstep = Given("successful step")
# Act
readstepdefinitions!(driver, "features/othersteps")
# Assert
# This method throws if no such step definition was found.
@test_throws NoMatchingStepDefinition findstepdefinition(engine.matchers[1], successfulstep)
end
@testset "Running feature files; One successful feature found; One feature is executed" begin
# Arrange
filecontents = Dict("features/file.feature" => """
Feature: Some feature
Scenario: A scenario
Given successful step
""")
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".feature" => ["features/file.feature"]],
filecontents = filecontents)
driver = Driver(osal, engine)
# Act
result = runfeatures!(driver, "features")
# Assert
@test length(engine.features) == 1
end
@testset "Running feature files; Two successful features found; Two features are executed" begin
# Arrange
filecontents = Dict("features/file1.feature" => """
Feature: Some feature
Scenario: A scenario
Given successful step
""",
"features/file2.feature" => """
Feature: Some other feature
Scenario: Another scenario
Given successful step
""")
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".feature" => ["features/file1.feature",
"features/file2.feature"]],
filecontents = filecontents)
driver = Driver(osal, engine)
# Act
result = runfeatures!(driver, "features")
# Assert
@test length(engine.features) == 2
end
@testset "Running feature files; Two successful features found; They are also read from file" begin
# Arrange
filecontents = Dict("features/file1.feature" => """
Feature: Some feature
Scenario: A scenario
Given successful step
""",
"features/file2.feature" => """
Feature: Some other feature
Scenario: Another scenario
Given successful step
""")
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".feature" => ["features/file1.feature",
"features/file2.feature"]],
filecontents = filecontents)
driver = Driver(osal, engine)
# Act
result = runfeatures!(driver, "features")
# Assert
featuredescriptions = [f.header.description for f in engine.features]
@test "Some feature" in featuredescriptions
@test "Some other feature" in featuredescriptions
end
@testset "Running feature files; Two successful features found; The result is successful" begin
# Arrange
filecontents = Dict("features/file1.feature" => """
Feature: Some feature
Scenario: A scenario
Given successful step
""",
"features/file2.feature" => """
Feature: Some other feature
Scenario: Another scenario
Given successful step
""")
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".feature" => ["features/file1.feature",
"features/file2.feature"]],
filecontents = filecontents)
driver = Driver(osal, engine)
# Act
result = runfeatures!(driver, "features")
# Assert
@test issuccess(result)
end
@testset "Running feature files with parse options; One scenario with out-of-order steps; Feature is executed" begin
# Arrange
filecontents = Dict("features/file.feature" => """
Feature: Some feature
Scenario: A scenario with out-of-order steps
Then a postcondition
When an action
Given a precondition
""")
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".feature" => ["features/file.feature"]],
filecontents = filecontents)
driver = Driver(osal, engine)
# Act
result = runfeatures!(driver, "features", parseoptions = ParseOptions(allow_any_step_order = true))
# Assert
@test length(engine.features) == 1
end
@testset "Running feature files; One feature has a syntax error; No exception is thrown" begin
# I need to rewrite these tests properly, because the mock lots of things that I think I'd
# like to be part of the test. This test however, should throw an exception, which used to be the
# case.
# Arrange
filecontents = Dict("features/file1.feature" => """
Feature: Some feature
Scenario: A scenario
When an action
Given successful step
""")
engine = FakeEngine()
osal = FakeOSAbstraction(fileswithext=[".feature" => ["features/file1.feature"]],
filecontents = filecontents)
driver = Driver(osal, engine)
# Act
runfeatures!(driver, "features")
# Assert
# Not throwing an exception above is enough.
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 11666 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior
using Behavior:
StepDefinitionMatcher,
Executor, executescenario, @expect, SuccessfulStepExecution,
issuccess, beforescenario, afterscenario, FromSourceExecutionEnvironment,
FromMacroStepDefinitionMatcher, QuietRealTimePresenter
using Behavior.Gherkin.Experimental
mutable struct FakeExecutionEnvironment <: Behavior.ExecutionEnvironment
afterscenariowasexecuted::Bool
FakeExecutionEnvironment() = new(false)
end
function Behavior.beforescenario(::FakeExecutionEnvironment, context::StepDefinitionContext, scenario::Gherkin.Scenario)
context[:beforescenariowasexecuted] = true
end
function Behavior.afterscenario(f::FakeExecutionEnvironment, context::StepDefinitionContext, scenario::Gherkin.Scenario)
context[:afterscenariowasexecuted] = true
f.afterscenariowasexecuted = true
end
struct SingleStepDefinitionMatcher <: StepDefinitionMatcher
stepbody::Function
end
function Behavior.findstepdefinition(
s::SingleStepDefinitionMatcher,
step::Behavior.Gherkin.ScenarioStep)
stepdefinition = (context, args) -> begin
s.stepbody(context, args)
SuccessfulStepExecution()
end
StepDefinitionMatch(StepDefinition("some text", stepdefinition, StepDefinitionLocation("", 0)))
end
@testset "Execution Environment" begin
@testset "Scenario Execution Environment" begin
@testset "beforescenario is defined; beforescenario is executed before the scenario" begin
# Arrange
env = FakeExecutionEnvironment()
# This step definition tests that the symbol :beforescenariowasexecuted is present in
# the context at execution time. This symbol should be set by the
# FakeExecutionEnvironment.
stepdefmatcher = SingleStepDefinitionMatcher((context, args) -> @assert context[:beforescenariowasexecuted])
executor = Executor(stepdefmatcher; executionenv=env)
scenario = Scenario("Description", String[], ScenarioStep[Given("")])
# Act
result = executescenario(executor, Background(), scenario)
# Assert
@test issuccess(result.steps[1])
end
@testset "beforescenario is the default noop; beforescenario is not executed before the scenario" begin
# Arrange
stepdefmatcher = SingleStepDefinitionMatcher((context, args) -> @assert !haskey(context, :beforescenariowasexecuted))
executor = Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[Given("")])
# Act
result = executescenario(executor, Background(), scenario)
# Assert
@test issuccess(result.steps[1])
end
@testset "afterscenario is defined; afterscenario has not been called at scenario execution" begin
# Arrange
env = FakeExecutionEnvironment()
# This step definition tests that the symbol :beforescenariowasexecuted is present in
# the context at execution time. This symbol should be set by the
# FakeExecutionEnvironment.
stepdefmatcher = SingleStepDefinitionMatcher((context, args) -> @assert !haskey(context, :afterscenariowasexecuted))
executor = Executor(stepdefmatcher; executionenv=env)
scenario = Scenario("Description", String[], ScenarioStep[Given("")])
# Act
result = executescenario(executor, Background(), scenario)
# Assert
@test issuccess(result.steps[1])
end
@testset "afterscenario is defined; afterscenario is called" begin
# Arrange
env = FakeExecutionEnvironment()
# This step definition tests that the symbol :beforescenariowasexecuted is present in
# the context at execution time. This symbol should be set by the
# FakeExecutionEnvironment.
stepdefmatcher = SingleStepDefinitionMatcher((context, args) -> nothing)
executor = Executor(stepdefmatcher; executionenv=env)
scenario = Scenario("Description", String[], ScenarioStep[Given("")])
# Act
result = executescenario(executor, Background(), scenario)
# Assert
@test env.afterscenariowasexecuted
end
end
@testset "FromSourceExecutionEnvironment" begin
@testset "beginscenario is defined in source; beginscenario is executed" begin
# Arrange
env = FromSourceExecutionEnvironment("""
using Behavior
@beforescenario() do context, scenario
context[:beforescenariowasexecuted] = true
end
""")
context = StepDefinitionContext()
scenario = Gherkin.Scenario("", String[], ScenarioStep[])
# Act
beforescenario(env, context, scenario)
# Assert
@test context[:beforescenariowasexecuted]
end
@testset "No beginscenario is defined in source; beginscenario is a noop" begin
# Arrange
env = FromSourceExecutionEnvironment("")
context = StepDefinitionContext()
scenario = Gherkin.Scenario("", String[], ScenarioStep[])
# Act
beforescenario(env, context, scenario)
# Assert
@test !haskey(context, :beforescenariowasexecuted)
end
@testset "afterscenario is defined in source; afterscenario is executed" begin
# Arrange
env = FromSourceExecutionEnvironment("""
using Behavior
@afterscenario() do context, scenario
context[:afterscenariowasexecuted] = true
end
""")
context = StepDefinitionContext()
scenario = Gherkin.Scenario("", String[], ScenarioStep[])
# Act
afterscenario(env, context, scenario)
# Assert
@test context[:afterscenariowasexecuted]
end
@testset "No afterscenario is defined in source; afterscenario is a noop" begin
# Arrange
env = FromSourceExecutionEnvironment("")
context = StepDefinitionContext()
scenario = Gherkin.Scenario("", String[], ScenarioStep[])
# Act
afterscenario(env, context, scenario)
# Assert
@test !haskey(context, :afterscenariowasexecuted)
end
end
@testset "Before and After feature hooks" begin
@testset "beforefeature saves the feature; Feature runs; Feature is found in list" begin
# Arrange
# Define @beforefeature
env = FromSourceExecutionEnvironment("""
using Behavior
using Behavior.Gherkin
features = Gherkin.Feature[]
@beforefeature() do feature
push!(features, feature)
end
""")
# Step definitions
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@then("the current feature has description \\"{String}\\"") do context, description
currentfeature = Main.features[end]
@expect currentfeature.header.description == description
end
""")
# Feature
input = Experimental.ParserInput("""
Feature: This is a feature description
Scenario: Check the feature description
Then the current feature has description "This is a feature description"
""")
parser = Experimental.FeatureFileParser()
parserresult = parser(input)
feature = parserresult.value
executor = Executor(matcher, QuietRealTimePresenter(), executionenv=env)
# Act
featureresult = executefeature(executor, feature)
# Assert
steps = featureresult.scenarioresults[1].steps
@test issuccess(steps[1])
end
@testset "afterfeature saves the feature; Feature runs; Feature is not found while running the steps" begin
# Arrange
# Define @afterfeature
env = FromSourceExecutionEnvironment("""
using Behavior
using Behavior.Gherkin
features = Gherkin.Feature[]
@afterfeature() do feature
push!(features, feature)
end
""")
# Step definitions
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@then("the feature has not been added to the list") do context
@expect Main.features == []
end
""")
# Feature
input = Experimental.ParserInput("""
Feature: This is a feature description
Scenario: Afterfeature adds the feature to a list
Then the feature has not been added to the list
""")
parser = Experimental.FeatureFileParser()
parserresult = parser(input)
feature = parserresult.value
executor = Executor(matcher, QuietRealTimePresenter(), executionenv=env)
# Act
featureresult = executefeature(executor, feature)
# Assert
steps = featureresult.scenarioresults[1].steps
@test issuccess(steps[1])
end
@testset "afterfeature saves the feature; Feature runs; Feature is found after execution" begin
# Arrange
# Define @afterfeature
env = FromSourceExecutionEnvironment("""
using Behavior
using Behavior.Gherkin
features = Gherkin.Feature[]
@afterfeature() do feature
push!(features, feature)
end
""")
# Step definitions
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@then("the feature has not been added to the list") do context
@expect Main.features == []
end
""")
# Feature
input = Experimental.ParserInput("""
Feature: This is a feature description
Scenario: Afterfeature adds the feature to a list
Then the feature has not been added to the list
""")
parser = Experimental.FeatureFileParser()
parserresult = parser(input)
feature = parserresult.value
executor = Executor(matcher, QuietRealTimePresenter(), executionenv=env)
# Act
featureresult = executefeature(executor, feature)
# Assert
@test Main.features == [feature]
end
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 5761 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using .Gherkin: DataTable, DataTableRow
@testset "Executor Datatables " begin
@testset "Scenario step has a data table; Context contains the data tables" begin
table = DataTable(
DataTableRow[
["header1", "header2"],
["value 11", "value 12"],
["value 21", "value 22"],
]
)
given = Given("Some precondition", datatable=table)
function check_datatable_step_definition(context::StepDefinitionContext, _args)
expectedtable = DataTable(
DataTableRow[
["header1", "header2"],
["value 11", "value 12"],
["value 21", "value 22"],
]
)
if context.datatable == expectedtable
Behavior.SuccessfulStepExecution()
else
Behavior.StepFailed("")
end
end
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => check_datatable_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.SuccessfulStepExecution)
end
@testset "Scenario step has no data table; Context table is empty" begin
given = Given("Some precondition")
function check_datatable_step_definition(context::StepDefinitionContext, _args)
if context.datatable == []
Behavior.SuccessfulStepExecution()
else
Behavior.StepFailed("")
end
end
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => check_datatable_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.SuccessfulStepExecution)
end
@testset "First step has table but not second; Second context table is empty" begin
given1 = Given("Some precondition")
given2 = Given("Some other precondition")
function check_datatable_step_definition(context::StepDefinitionContext, _args)
if context.datatable == []
Behavior.SuccessfulStepExecution()
else
Behavior.StepFailed("")
end
end
stepdefmatcher = FakeStepDefinitionMatcher(Dict(
given1 => successful_step_definition,
given2 => check_datatable_step_definition
))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given1, given2])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[2], Behavior.SuccessfulStepExecution)
end
@testset "Scenario Outline step has a data table; Context contains the data tables" begin
table = DataTable(
DataTableRow[
["header1", "header2"],
]
)
given = Given("Some precondition <foo>", datatable=table)
when = When("Some precondition", datatable=table)
then = Then("Some precondition", datatable=table)
function check_datatable_step_definition(context::StepDefinitionContext, _args)
expectedtable = DataTable(
DataTableRow[
["header1", "header2"],
]
)
if context.datatable == expectedtable
Behavior.SuccessfulStepExecution()
else
Behavior.StepFailed("")
end
end
stepdefmatcher = FakeStepDefinitionMatcher(Dict(
Given("Some precondition 1") => check_datatable_step_definition,
Given("Some precondition 2") => check_datatable_step_definition,
Given("Some precondition 3") => check_datatable_step_definition,
when => check_datatable_step_definition,
then => check_datatable_step_definition,
))
executor = Behavior.Executor(stepdefmatcher)
scenario = ScenarioOutline(
"Description",
String[], # Tags
ScenarioStep[given, when, then], # Steps
["foo"], # Placeholders
[ "1" "2"] # Examples
)
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult[1].steps[1], Behavior.SuccessfulStepExecution)
@test isa(scenarioresult[1].steps[2], Behavior.SuccessfulStepExecution)
@test isa(scenarioresult[1].steps[3], Behavior.SuccessfulStepExecution)
@test isa(scenarioresult[2].steps[1], Behavior.SuccessfulStepExecution)
@test isa(scenarioresult[2].steps[2], Behavior.SuccessfulStepExecution)
@test isa(scenarioresult[2].steps[3], Behavior.SuccessfulStepExecution)
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 4796 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Test
using Behavior.Gherkin
using Behavior.Gherkin: ScenarioStep
using Behavior
using Behavior: StepDefinitionContext, StepDefinition, StepDefinitionLocation
using Behavior: Executor, StepExecutionResult, QuietRealTimePresenter, executefeature
import Behavior: present
@testset "Feature Executor " begin
@testset "Execute a feature; Feature has one scenario; Feature result has one scenario result" begin
presenter = QuietRealTimePresenter()
given = Given("some precondition")
matcher = FakeStepDefinitionMatcher(Dict(given => successful_step_definition))
scenario = Scenario("some scenario", String[], ScenarioStep[given])
featureheader = FeatureHeader("Some feature", [], [])
feature = Feature(featureheader, [scenario])
executor = Executor(matcher, presenter)
featureresult = executefeature(executor, feature)
@test length(featureresult.scenarioresults) == 1
end
@testset "Execute a feature; Feature has two scenarios; Result has two scenario results" begin
presenter = QuietRealTimePresenter()
given = Given("some precondition")
matcher = FakeStepDefinitionMatcher(Dict(given => successful_step_definition))
scenario1 = Scenario("some scenario", String[], ScenarioStep[given])
scenario2 = Scenario("some other scenario", String[], ScenarioStep[given])
featureheader = FeatureHeader("Some feature", [], [])
feature = Feature(featureheader, [scenario1, scenario2])
executor = Executor(matcher, presenter)
featureresult = executefeature(executor, feature)
@test length(featureresult.scenarioresults) == 2
end
@testset "Execute a feature; Feature has three scenarios; Scenarios are executed in order" begin
presenter = QuietRealTimePresenter()
given = Given("some precondition")
matcher = FakeStepDefinitionMatcher(Dict(given => successful_step_definition))
scenario1 = Scenario("some scenario", String[], ScenarioStep[given])
scenario2 = Scenario("some other scenario", String[], ScenarioStep[given])
scenario3 = Scenario("some third scenario", String[], ScenarioStep[given])
featureheader = FeatureHeader("Some feature", [], [])
feature = Feature(featureheader, [scenario1, scenario2, scenario3])
executor = Executor(matcher, presenter)
featureresult = executefeature(executor, feature)
@test featureresult.scenarioresults[1].scenario == scenario1
@test featureresult.scenarioresults[2].scenario == scenario2
@test featureresult.scenarioresults[3].scenario == scenario3
end
@testset "Execute a feature; Feature has one failing scenario; Scenario result has a failing step" begin
presenter = QuietRealTimePresenter()
given = Given("some precondition")
matcher = FakeStepDefinitionMatcher(Dict(given => failed_step_definition))
scenario = Scenario("some scenario", String[], ScenarioStep[given])
featureheader = FeatureHeader("Some feature", [], [])
feature = Feature(featureheader, [scenario])
executor = Executor(matcher, presenter)
featureresult = executefeature(executor, feature)
@test featureresult.scenarioresults[1].steps[1] isa Behavior.StepFailed
end
@testset "Execute a feature; One Scenario and an Outline with two examples; Three results" begin
presenter = QuietRealTimePresenter()
step1 = Given("step 1")
step2 = Given("step 2")
scenario = Scenario("some scenario", String[], ScenarioStep[step1])
outline = ScenarioOutline("", String[], ScenarioStep[Given("step <stepnumber>")], ["stepnumber"], ["1" "2"])
matcher = FakeStepDefinitionMatcher(Dict(step1 => successful_step_definition,
step2 => successful_step_definition))
featureheader = FeatureHeader("Some feature", [], [])
feature = Feature(featureheader, [scenario, outline])
executor = Executor(matcher, presenter)
featureresult = executefeature(executor, feature)
@test length(featureresult.scenarioresults) == 3
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 2343 | using Behavior.Gherkin.Experimental
using Behavior: ExecutorEngine, FromMacroStepDefinitionMatcher
using Behavior: addmatcher!, runfeature!, finish, issuccess
@testset "Executor options " begin
@testset "Don't keep going: Run a failing feature; Other step not executed" begin
# Arrange
engine = ExecutorEngine(QuietRealTimePresenter())
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("ok step") do context
end
@given("failing step") do context
@expect 1 == 2
end
""")
addmatcher!(engine, matcher)
source = ParserInput("""
Feature: Fails first scenario
Scenario: This fails
Given failing step
Scenario: This will not run with keepgoing=false
Given ok step
""")
parser = FeatureFileParser()
parseresult = parser(source)
feature = parseresult.value
# Act
runfeature!(engine, feature; keepgoing=false)
# Assert
result = finish(engine)
@test !issuccess(result)
@test result.features[1].n_failure == 1
@test result.features[1].n_success == 0
end
@testset "Keep going: Run a failing feature; Other step executed" begin
# Arrange
engine = ExecutorEngine(QuietRealTimePresenter())
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("ok step") do context
end
@given("failing step") do context
@expect 1 == 2
end
""")
addmatcher!(engine, matcher)
source = ParserInput("""
Feature: Fails first scenario
Scenario: This fails
Given failing step
Scenario: This will not run with keepgoing=true
Given ok step
""")
parser = FeatureFileParser()
parseresult = parser(source)
feature = parseresult.value
# Act
runfeature!(engine, feature; keepgoing=true)
# Assert
result = finish(engine)
@test !issuccess(result)
@test result.features[1].n_failure == 1
@test result.features[1].n_success == 1
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 5496 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Test
using Behavior.Gherkin
using Behavior.Gherkin: ScenarioStep, Background
using Behavior
using Behavior: StepDefinitionContext, StepDefinition, StepDefinitionLocation
using Behavior: Executor, StepExecutionResult, QuietRealTimePresenter, executefeature, ScenarioResult
import Behavior: present
mutable struct FakeRealTimePresenter <: Behavior.RealTimePresenter
scenarios::Vector{Scenario}
scenarioresults::Vector{ScenarioResult}
steps::Vector{ScenarioStep}
results::Dict{ScenarioStep, StepExecutionResult}
features::Vector{Feature}
FakeRealTimePresenter() = new([], [], [], Dict(), [])
end
present(p::FakeRealTimePresenter, scenario::Scenario) = push!(p.scenarios, scenario)
present(p::FakeRealTimePresenter, _scenario::Scenario, result::ScenarioResult) = push!(p.scenarioresults, result)
present(p::FakeRealTimePresenter, step::ScenarioStep) = push!(p.steps, step)
present(p::FakeRealTimePresenter, step::ScenarioStep, result::StepExecutionResult) = p.results[step] = result
present(p::FakeRealTimePresenter, feature::Feature) = push!(p.features, feature)
stepresult(p::FakeRealTimePresenter, step::ScenarioStep) = p.results[step]
@testset "Executor Presentation" begin
@testset "Execution presentation; Scenario is executed; Scenario is presented" begin
presenter = FakeRealTimePresenter()
matcher = FakeStepDefinitionMatcher(Dict())
executor = Executor(matcher, presenter)
scenario = Scenario("Some scenario", String[], ScenarioStep[])
Behavior.executescenario(executor, Background(),scenario)
@test presenter.scenarios[1] == scenario
end
@testset "Execution presentation; Scenario is executed; ScenarioResult is presented" begin
presenter = FakeRealTimePresenter()
matcher = FakeStepDefinitionMatcher(Dict())
executor = Executor(matcher, presenter)
scenario = Scenario("Some scenario", String[], ScenarioStep[])
Behavior.executescenario(executor, Background(),scenario)
@test presenter.scenarioresults[1].scenario == scenario
end
@testset "Execution presentation; Scenario has on Given; Given is presented" begin
presenter = FakeRealTimePresenter()
given = Given("some precondition")
matcher = FakeStepDefinitionMatcher(Dict(given => successful_step_definition))
executor = Executor(matcher, presenter)
scenario = Scenario("Some scenario", String[], ScenarioStep[given])
Behavior.executescenario(executor, Background(),scenario)
@test presenter.steps[1] == given
end
@testset "Execution presentation; Scenario step is successful; Step is presented as successful" begin
presenter = FakeRealTimePresenter()
given = Given("some precondition")
matcher = FakeStepDefinitionMatcher(Dict(given => successful_step_definition))
executor = Executor(matcher, presenter)
scenario = Scenario("Some scenario", String[], ScenarioStep[given])
Behavior.executescenario(executor, Background(),scenario)
@test stepresult(presenter, given) == Behavior.SuccessfulStepExecution()
end
@testset "Execution presentation; Scenario step fails; Next is also presented" begin
presenter = FakeRealTimePresenter()
given = Given("some precondition")
when = When("some action")
matcher = FakeStepDefinitionMatcher(Dict(given => failed_step_definition,
when => successful_step_definition))
executor = Executor(matcher, presenter)
scenario = Scenario("Some scenario", String[], ScenarioStep[given, when])
Behavior.executescenario(executor, Background(),scenario)
@test presenter.steps[2] == when
end
@testset "Execution presentation; Scenario step fails; Next has result Skipped" begin
presenter = FakeRealTimePresenter()
given = Given("some precondition")
when = When("some action")
matcher = FakeStepDefinitionMatcher(Dict(given => failed_step_definition,
when => successful_step_definition))
executor = Executor(matcher, presenter)
scenario = Scenario("Some scenario", String[], ScenarioStep[given, when])
Behavior.executescenario(executor, Background(),scenario)
@test stepresult(presenter, when) == Behavior.SkippedStep()
end
@testset "Execution presentation; Feature is executed; Feature is presented" begin
presenter = FakeRealTimePresenter()
matcher = FakeStepDefinitionMatcher(Dict())
executor = Executor(matcher, presenter)
scenario = Scenario("Some scenario", String[], ScenarioStep[])
feature = Feature(FeatureHeader("", [], []), [scenario])
Behavior.executefeature(executor, feature)
@test presenter.features[1] == feature
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 15475 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Test
using Behavior.Gherkin
using Behavior.Gherkin: ScenarioStep, Background
using Behavior
using Behavior: StepDefinitionContext, StepDefinition, StepDefinitionLocation, StepDefinitionMatch
using Behavior: Executor, StepExecutionResult, QuietRealTimePresenter, executefeature
import Behavior: present
successful_step_definition(::StepDefinitionContext, args) = Behavior.SuccessfulStepExecution()
failed_step_definition(::StepDefinitionContext, args) = Behavior.StepFailed("")
error_step_definition(::StepDefinitionContext, args) = error("Some error")
struct FakeStepDefinitionMatcher <: Behavior.StepDefinitionMatcher
steps::Dict{Behavior.Gherkin.ScenarioStep, Function}
end
function Behavior.findstepdefinition(s::FakeStepDefinitionMatcher, step::Behavior.Gherkin.ScenarioStep)
if step in keys(s.steps)
StepDefinitionMatch(StepDefinition("some text", s.steps[step], StepDefinitionLocation("", 0)))
else
throw(Behavior.NoMatchingStepDefinition())
end
end
struct ThrowingStepDefinitionMatcher <: Behavior.StepDefinitionMatcher
ex::Exception
end
Behavior.findstepdefinition(matcher::ThrowingStepDefinitionMatcher, ::Behavior.Gherkin.ScenarioStep) = throw(matcher.ex)
@testset "Executor " begin
@testset "Execute a one-step scenario; No matching step found; Result is NoStepDefinitionFound" begin
stepdefmatcher = ThrowingStepDefinitionMatcher(Behavior.NoMatchingStepDefinition())
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[Given("some precondition")])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.NoStepDefinitionFound)
end
@testset "Execute a one-step scenario; The matching step is successful; Result is Successful" begin
given = Given("Some precondition")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => successful_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.SuccessfulStepExecution)
end
@testset "Execute a one-step scenario; The matching step fails; Result is Failed" begin
given = Given("Some precondition")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => failed_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.StepFailed)
end
@testset "Execute a one-step scenario; The matching step throws an error; Result is Error" begin
given = Given("Some precondition")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => error_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.UnexpectedStepError)
end
@testset "Execute a two-step scenario; First step throws an error; Second step is Skipped" begin
given = Given("Some precondition")
when = When("some action")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => error_step_definition,
when => successful_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given, when])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[2], Behavior.SkippedStep)
end
@testset "Execute a two-step scenario; First step fails; Second step is Skipped" begin
given = Given("Some precondition")
when = When("some action")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => failed_step_definition,
when => successful_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given, when])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[2], Behavior.SkippedStep)
end
@testset "Execute a two-step scenario; Both steps succeed; All results are Success" begin
given = Given("Some precondition")
when = When("some action")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => successful_step_definition,
when => successful_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given, when])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.SuccessfulStepExecution)
@test isa(scenarioresult.steps[2], Behavior.SuccessfulStepExecution)
end
@testset "Execute a three-step scenario; All steps succeeed; All results are Success" begin
given = Given("Some precondition")
when = When("some action")
then = Then("some postcondition")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => successful_step_definition,
when => successful_step_definition,
then => successful_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given, when, then])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.SuccessfulStepExecution)
@test isa(scenarioresult.steps[2], Behavior.SuccessfulStepExecution)
@test isa(scenarioresult.steps[3], Behavior.SuccessfulStepExecution)
end
@testset "Execute a scenario; Scenario is provided; Scenario is returned with the result" begin
given = Given("Some precondition")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => successful_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("This is a scenario", String[], ScenarioStep[given])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test scenarioresult.scenario == scenario
end
@testset "Execute a scenario; No unique step definition found; Result is NonUniqueMatch" begin
stepdefmatcher = ThrowingStepDefinitionMatcher(Behavior.NonUniqueStepDefinition([]))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[Given("some precondition")])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.NonUniqueMatch)
end
@testset "Execute a ScenarioOutline; Outline has two examples; Two scenarios are returned" begin
outline = ScenarioOutline("", String[], ScenarioStep[Given("step <stepnumber>")], ["stepnumber"], ["1" "2"])
step1 = Given("step 1")
step2 = Given("step 2")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(step1 => successful_step_definition,
step2 => successful_step_definition))
executor = Behavior.Executor(stepdefmatcher)
outlineresult = Behavior.executescenario(executor, Background(), outline)
@test length(outlineresult) == 2
end
@testset "Execute a ScenarioOutline; Outline has a successful and a failing example; First is success, second is fail" begin
outline = ScenarioOutline("", String[], ScenarioStep[Given("step <stepnumber>")], ["stepnumber"], ["1" "2"])
step1 = Given("step 1")
step2 = Given("step 2")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(step1 => successful_step_definition,
step2 => failed_step_definition))
executor = Behavior.Executor(stepdefmatcher)
outlineresult = Behavior.executescenario(executor, Background(), outline)
@test outlineresult[1].steps[1] isa Behavior.SuccessfulStepExecution
@test outlineresult[2].steps[1] isa Behavior.StepFailed
end
@testset "Execute a ScenarioOutline; Outline has three examples; Three scenarios are returned" begin
outline = ScenarioOutline("", String[], ScenarioStep[Given("step <stepnumber>")], ["stepnumber"], ["1" "2" "3"])
step1 = Given("step 1")
step2 = Given("step 2")
step3 = Given("step 3")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(step1 => successful_step_definition,
step2 => successful_step_definition,
step3 => successful_step_definition))
executor = Behavior.Executor(stepdefmatcher)
outlineresult = Behavior.executescenario(executor, Background(), outline)
@test length(outlineresult) == 3
end
@testset "Block text" begin
@testset "Scenario step has a block text; Context contains the block text" begin
given = Given("Some precondition", block_text="Some block text")
function check_block_text_step_definition(context::StepDefinitionContext, _args)
if context[:block_text] == "Some block text"
Behavior.SuccessfulStepExecution()
else
Behavior.StepFailed("")
end
end
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => check_block_text_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.SuccessfulStepExecution)
end
@testset "First step has block text, but second doesn't; The block text is cleared in the second step" begin
given = Given("Some precondition", block_text="Some block text")
when = When("some action")
function check_block_text_step_definition(context::StepDefinitionContext, _args)
if context[:block_text] == ""
Behavior.SuccessfulStepExecution()
else
Behavior.StepFailed("")
end
end
stepdefmatcher = FakeStepDefinitionMatcher(Dict(
given => successful_step_definition,
when => check_block_text_step_definition))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], [given, when])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[2], Behavior.SuccessfulStepExecution)
end
end
@testset "Backgrounds" begin
@testset "Execute a one-step Background; No matching step found; Result is NoStepDefinitionFound" begin
given = Given("some precondition")
stepdefmatcher = FakeStepDefinitionMatcher(Dict(given => successful_step_definition))
background = Background("A background", ScenarioStep[Given("some background precondition")])
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[Given("some precondition")])
scenarioresult = Behavior.executescenario(executor, background, scenario)
@test isa(scenarioresult.backgroundresult[1], Behavior.NoStepDefinitionFound)
end
@testset "Execute a one-step Background; A successful match found; Background result is Success" begin
given = Given("some precondition")
bgiven = Given("some background precondition")
stepdefmatcher = FakeStepDefinitionMatcher(
Dict(
given => successful_step_definition,
bgiven => successful_step_definition,
))
background = Background("A background", ScenarioStep[Given("some background precondition")])
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[Given("some precondition")])
scenarioresult = Behavior.executescenario(executor, background, scenario)
@test isa(scenarioresult.backgroundresult[1], Behavior.SuccessfulStepExecution)
end
@testset "Execute a one-step background; The matching step fails; Result is Failed" begin
given = Given("Some precondition")
bgiven = Given("some background precondition")
stepdefmatcher = FakeStepDefinitionMatcher(
Dict(
given => successful_step_definition,
bgiven => failed_step_definition,
))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given])
background = Background("Background description", ScenarioStep[bgiven])
scenarioresult = Behavior.executescenario(executor, background, scenario)
@test isa(scenarioresult.backgroundresult[1], Behavior.StepFailed)
end
@testset "Execute a one-step background; The background step fails; The Scenario step is skipped" begin
given = Given("Some precondition")
bgiven = Given("some background precondition")
stepdefmatcher = FakeStepDefinitionMatcher(
Dict(
given => successful_step_definition,
bgiven => failed_step_definition,
))
executor = Behavior.Executor(stepdefmatcher)
scenario = Scenario("Description", String[], ScenarioStep[given])
background = Background("Background description", ScenarioStep[bgiven])
scenarioresult = Behavior.executescenario(executor, background, scenario)
@test isa(scenarioresult.steps[1], Behavior.SkippedStep)
end
# TODO Additional tests to ensure that Backgrounds work as any Scenario section does
end
end
| Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 5433 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Test
using Behavior
using Behavior.Gherkin
using Behavior: transformoutline
@testset "Scenario Outline " begin
@testset "Transform; Outline description is \"Some description\"; Result description is same" begin
outline = ScenarioOutline("Some description", String[],
ScenarioStep[Given("placeholder <foo>")],
["foo"],
["bar"])
scenarios = transformoutline(outline)
@test scenarios[1].description == "Some description"
end
@testset "Transform; Outline tags are @foo @bar; Result tags are @foo @bar" begin
outline = ScenarioOutline("", ["@foo", "@bar"],
ScenarioStep[Given("some <foo>")],
["foo"],
["bar"])
scenarios = transformoutline(outline)
@test scenarios[1].tags == ["@foo", "@bar"]
end
@testset "Transform; Scenario Outline has one example; One Scenario" begin
outline = ScenarioOutline("", String[],
ScenarioStep[Given("placeholder <foo>")],
["foo"],
["bar"])
scenarios = transformoutline(outline)
@test length(scenarios) == 1
scenario = scenarios[1]
@test scenario.steps[1] == Given("placeholder bar")
end
@testset "Transform; Placeholder is quux; quux is replaced by example" begin
outline = ScenarioOutline("", String[],
ScenarioStep[Given("placeholder <quux>")],
["quux"],
["baz"])
scenarios = transformoutline(outline)
scenario = scenarios[1]
@test scenario.steps[1] == Given("placeholder baz")
end
@testset "Transform; Two placeholders foo, quux; foo and quux are replaced" begin
outline = ScenarioOutline("", String[],
ScenarioStep[Given("placeholders <foo> <quux>")],
["foo", "quux"],
["bar"; "baz"])
scenarios = transformoutline(outline)
scenario = scenarios[1]
@test scenario.steps[1] == Given("placeholders bar baz")
end
@testset "Transform; Steps Given and When; Both steps are transformed" begin
steps = ScenarioStep[Given("placeholder <quux>"), When("other <quux>")]
outline = ScenarioOutline("", String[],
steps,
["quux"],
["baz"])
scenarios = transformoutline(outline)
scenario = scenarios[1]
@test scenario.steps[1] == Given("placeholder baz")
@test scenario.steps[2] == When("other baz")
end
@testset "Transform; Steps Given and When again; Both steps are transformed" begin
steps = ScenarioStep[Given("placeholder <quux>"), When("another step <quux>")]
outline = ScenarioOutline("", String[],
steps,
["quux"],
["baz"])
scenarios = transformoutline(outline)
scenario = scenarios[1]
@test scenario.steps[1] == Given("placeholder baz")
@test scenario.steps[2] == When("another step baz")
end
@testset "Transform; Step Then; Step is transformed" begin
steps = ScenarioStep[Then("step <quux>")]
outline = ScenarioOutline("", String[],
steps,
["quux"],
["baz"])
scenarios = transformoutline(outline)
scenario = scenarios[1]
@test scenario.steps[1] == Then("step baz")
end
@testset "Transform; Two examples; Two scenarios in the result" begin
outline = ScenarioOutline("", String[],
ScenarioStep[Given("step <quux>")],
["quux"],
["bar" "baz"])
scenarios = transformoutline(outline)
scenario = scenarios[1]
@test scenarios[1].steps[1] == Given("step bar")
@test scenarios[2].steps[1] == Given("step baz")
end
@testset "Transform; Placeholders in the block_text; Placeholders are replaced with examples" begin
outline = ScenarioOutline("", String[],
[Given(""; block_text="given <quux>"),
When(""; block_text="when <quux>"),
Then(""; block_text="then <quux>")],
["quux"],
["bar"])
scenarios = transformoutline(outline)
scenario = scenarios[1]
@test scenarios[1].steps[1] == Given(""; block_text="given bar")
@test scenarios[1].steps[2] == When(""; block_text="when bar")
@test scenarios[1].steps[3] == Then(""; block_text="then bar")
end
@testset "Transform; Outline examples are AbstractStrings; Interpolation works" begin
outline = ScenarioOutline("Some description", String[],
ScenarioStep[Given("placeholder <foo>")],
["foo"],
AbstractString["bar"])
scenarios = transformoutline(outline)
@test scenarios[1].description == "Some description"
end
# TODO: Mismatching placeholders
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 9507 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior.Gherkin
using Behavior:
ResultAccumulator, accumulateresult!, issuccess, featureresults,
FeatureResult, ScenarioResult, Given, SuccessfulStepExecution, Scenario, StepFailed
@testset "Result Accumulator " begin
@testset "Accumulate results; One feature with a successful scenario; Total result is success" begin
accumulator = ResultAccumulator()
feature = Gherkin.Feature(FeatureHeader("", [], []), Scenario[])
given = Given("some precondition")
successfulscenario = ScenarioResult(
[SuccessfulStepExecution()],
Scenario("Some scenario", String[], ScenarioStep[given]),
Background(),
ScenarioStep[])
featureresult = FeatureResult(feature, [successfulscenario])
accumulateresult!(accumulator, featureresult)
@test issuccess(accumulator)
end
@testset "Accumulate results; One feature with a failing scenario; Total result is fail" begin
accumulator = ResultAccumulator()
feature = Gherkin.Feature(FeatureHeader("", [], []), Scenario[])
given = Given("some precondition")
scenario = ScenarioResult(
[StepFailed("")],
Scenario("Some scenario", String[], ScenarioStep[given]),
Background(),
ScenarioStep[])
featureresult = FeatureResult(feature, [scenario])
accumulateresult!(accumulator, featureresult)
@test issuccess(accumulator) == false
end
@testset "Accumulate results; One scenario with one successful and one failing step; Total result is fail" begin
accumulator = ResultAccumulator()
feature = Gherkin.Feature(FeatureHeader("", [], []), Scenario[])
given = Given("some precondition")
when = When("some action")
scenario = ScenarioResult(
[SuccessfulStepExecution(), StepFailed("")],
Scenario("Some scenario", String[], ScenarioStep[given, when]),
Background(),
ScenarioStep[])
featureresult = FeatureResult(feature, [scenario])
accumulateresult!(accumulator, featureresult)
@test issuccess(accumulator) == false
end
@testset "Accumulate results; Two scenarios, one failing; Total result is fail" begin
accumulator = ResultAccumulator()
feature = Gherkin.Feature(FeatureHeader("", [], []), Scenario[])
given = Given("some precondition")
when = When("some action")
successfulscenario = ScenarioResult(
[SuccessfulStepExecution()],
Scenario("Some scenario", String[], ScenarioStep[given]),
Background(),
ScenarioStep[])
failingscenario = ScenarioResult(
[StepFailed("")],
Scenario("Some other scenario", String[], ScenarioStep[when]),
Background(),
ScenarioStep[])
featureresult = FeatureResult(feature, [successfulscenario, failingscenario])
accumulateresult!(accumulator, featureresult)
@test issuccess(accumulator) == false
end
@testset "Accumulate results; One feature, one passing Scenario; One success and zero failures" begin
accumulator = ResultAccumulator()
feature = Gherkin.Feature(FeatureHeader("", [], []), Scenario[])
given = Given("some precondition")
successfulscenario = ScenarioResult(
[SuccessfulStepExecution()],
Scenario("Some scenario", String[], ScenarioStep[given]),
Background(),
ScenarioStep[])
featureresult = FeatureResult(feature, [successfulscenario])
accumulateresult!(accumulator, featureresult)
@test featureresults(accumulator)[1].n_success == 1
@test featureresults(accumulator)[1].n_failure == 0
end
@testset "Accumulate results; Two scenarios, one failing; One success and one failure" begin
accumulator = ResultAccumulator()
feature = Gherkin.Feature(FeatureHeader("", [], []), Scenario[])
given = Given("some precondition")
when = When("some action")
successfulscenario = ScenarioResult(
[SuccessfulStepExecution()],
Scenario("Some scenario", String[], ScenarioStep[given]),
Background(),
ScenarioStep[])
failingscenario = ScenarioResult(
[StepFailed("")],
Scenario("Some other scenario", String[], ScenarioStep[when]),
Background(),
ScenarioStep[])
featureresult = FeatureResult(feature, [successfulscenario, failingscenario])
accumulateresult!(accumulator, featureresult)
@test featureresults(accumulator)[1].n_success == 1
@test featureresults(accumulator)[1].n_failure == 1
end
@testset "Accumulate results; Seven scenarios, two failing; 5 success and 2 failures" begin
accumulator = ResultAccumulator()
feature = Gherkin.Feature(FeatureHeader("", [], []), Scenario[])
given = Given("some precondition")
when = When("some action")
successfulscenario = ScenarioResult(
[SuccessfulStepExecution()],
Scenario("Some scenario", String[], ScenarioStep[given]),
Background(),
ScenarioStep[])
failingscenario = ScenarioResult(
[StepFailed("")],
Scenario("Some other scenario", String[], ScenarioStep[when]),
Background(),
ScenarioStep[])
featureresult = FeatureResult(feature,
[successfulscenario, successfulscenario, successfulscenario, successfulscenario, successfulscenario,
failingscenario, failingscenario])
accumulateresult!(accumulator, featureresult)
@test featureresults(accumulator)[1].n_success == 5
@test featureresults(accumulator)[1].n_failure == 2
end
@testset "Accumulate results; Two successful features; Both have no failures" begin
accumulator = ResultAccumulator()
feature1 = Gherkin.Feature(FeatureHeader("1", [], []), Scenario[])
feature2 = Gherkin.Feature(FeatureHeader("1", [], []), Scenario[])
given = Given("some precondition")
successfulscenario = ScenarioResult(
[SuccessfulStepExecution()],
Scenario("Some scenario", String[], ScenarioStep[given]),
Background(),
ScenarioStep[])
featureresult1 = FeatureResult(feature1, [successfulscenario])
featureresult2 = FeatureResult(feature1, [successfulscenario])
accumulateresult!(accumulator, featureresult1)
accumulateresult!(accumulator, featureresult2)
@test featureresults(accumulator)[1].n_success > 0
@test featureresults(accumulator)[1].n_failure == 0
@test featureresults(accumulator)[2].n_success > 0
@test featureresults(accumulator)[2].n_failure == 0
end
@testset "Accumulate results; Failing Background with a successful Scenario; Total result is failed" begin
accumulator = ResultAccumulator()
feature = Gherkin.Feature(FeatureHeader("", [], []), Scenario[])
given = Given("some precondition")
successfulscenario = ScenarioResult(
[SuccessfulStepExecution()],
Scenario("Some scenario", String[], ScenarioStep[given]),
Background("Failing background", ScenarioStep[Given("some failing background step")]),
StepExecutionResult[
StepFailed(""),
])
featureresult = FeatureResult(feature, [successfulscenario])
accumulateresult!(accumulator, featureresult)
@test issuccess(accumulator) == false
end
@testset "Accumulate results; One feature with a successful scenario; Results accumulator is not empty" begin
accumulator = ResultAccumulator()
feature = Gherkin.Feature(FeatureHeader("", [], []), Scenario[])
given = Given("some precondition")
successfulscenario = ScenarioResult(
[SuccessfulStepExecution()],
Scenario("Some scenario", String[], ScenarioStep[given]),
Background(),
ScenarioStep[])
featureresult = FeatureResult(feature, [successfulscenario])
accumulateresult!(accumulator, featureresult)
@test !isempty(accumulator)
end
@testset "Accumulate results; No features; Results accumulator is empty" begin
accumulator = ResultAccumulator()
@test isempty(accumulator)
end
@testset "Accumulate results; One feature with syntax error; Total result is failure" begin
accumulator = ResultAccumulator()
parseresult = Gherkin.BadParseResult{Feature}(:somereason, :someexpected, :someactual, 0, "Some line")
accumulateresult!(accumulator, parseresult, "features/some/path/to/my.feature")
@test issuccess(accumulator) == false
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 599 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include("engine_test.jl") | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 1330 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include("selection/tag_expressions_test.jl")
include("gherkin/experimental/runtests.jl")
include("gherkin/feature_test.jl")
include("gherkin/scenario_test.jl")
include("gherkin/scenario_outline_test.jl")
include("gherkin/scenario_descriptions_test.jl")
include("gherkin/tag_test.jl")
include("gherkin/data_tables_test.jl")
include("selection/tag_selection_test.jl")
include("executor_test.jl")
include("executor_options_test.jl")
include("executor_presentation_test.jl")
include("executor_feature_test.jl")
include("executor_datatables_test.jl")
include("step_def_test.jl")
include("result_accumulator_test.jl")
include("asserts_test.jl")
include("outlines_test.jl")
include("exec_env_test.jl")
include("variables_test.jl")
include("suggestion_test.jl") | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 657 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior
using Test
include("runsmalltests.jl")
include("runmediumtests.jl") | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 22559 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior
using Behavior: findstepdefinition, NonUniqueStepDefinition, StepDefinitionLocation, NoMatchingStepDefinition
using Behavior: FromMacroStepDefinitionMatcher, CompositeStepDefinitionMatcher, addmatcher!
using Behavior.Gherkin
using Behavior.Gherkin: Given, When, Then
@testset "Step definitions " begin
@testset "Find a step definition" begin
@testset "Find a step definition; A matching given step; A step is found" begin
given = Behavior.Gherkin.Given("some definition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some definition") do context
x = 1
end
""")
stepmatch = Behavior.findstepdefinition(stepdef_matcher, given)
stepdefinition = stepmatch.stepdefinition
@test isa(stepdefinition.definition, Function)
end
@testset "Find a step definition; A non-matching given step; No step definition found" begin
given = Behavior.Gherkin.Given("some other definition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some definition") do context
x = 1
end
""")
@test_throws Behavior.NoMatchingStepDefinition Behavior.findstepdefinition(stepdef_matcher, given)
end
@testset "Find a step definition; A matching given step with another description; A step is found" begin
given = Behavior.Gherkin.Given("some other definition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some other definition") do context
x = 1
end
""")
stepmatch = Behavior.findstepdefinition(stepdef_matcher, given)
stepdefinition = stepmatch.stepdefinition
@test isa(stepdefinition.definition, Function)
end
@testset "Find a step definition in another matcher; The other matcher has no matching step; No step is found" begin
# This test ensures that step definitions are local to a single matcher, so that they aren't
# kept globally.
given = Behavior.Gherkin.Given("some definition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some definition") do context
x = 1
end
""")
# There is no step definitions here, so it should not find any matching definitions.
empty_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
""")
@test_throws Behavior.NoMatchingStepDefinition Behavior.findstepdefinition(empty_matcher, given)
end
end
@testset "Execute a step definition" begin
@testset "Execute a step definition; Store an int in context; Context stores the value" begin
given = Behavior.Gherkin.Given("some definition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some definition") do context
context[:x] = 1
end
""")
context = Behavior.StepDefinitionContext()
stepmatch = Behavior.findstepdefinition(stepdef_matcher, given)
stepdefinition = stepmatch.stepdefinition
args = Dict{Symbol, Any}()
stepdefinition.definition(context, args)
@test context[:x] == 1
end
@testset "Execute a step definition; Store a string in context; Context stores the value" begin
given = Behavior.Gherkin.Given("some definition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some definition") do context
context[:x] = "Some string"
end
""")
context = Behavior.StepDefinitionContext()
stepmatch = Behavior.findstepdefinition(stepdef_matcher, given)
stepdefinition = stepmatch.stepdefinition
args = Dict{Symbol, Any}()
stepdefinition.definition(context, args)
@test context[:x] == "Some string"
end
@testset "Execute a step definition; Retrieve a value from the context; Context value is present" begin
given = Behavior.Gherkin.Then(":x has value 1")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @then, @expect
@then(":x has value 1") do context
@expect context[:x] == 1
end
""")
context = Behavior.StepDefinitionContext()
context[:x] = 1
stepmatch = Behavior.findstepdefinition(stepdef_matcher, given)
stepdefinition = stepmatch.stepdefinition
args = Dict{Symbol, Any}()
@test stepdefinition.definition(context, args) == Behavior.SuccessfulStepExecution()
end
@testset "Execute a step definition; An empty step definition; Success is returned" begin
given = Behavior.Gherkin.Given("some definition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some definition") do context
end
""")
context = Behavior.StepDefinitionContext()
stepmatch = Behavior.findstepdefinition(stepdef_matcher, given)
stepdefinition = stepmatch.stepdefinition
args = Dict{Symbol, Any}()
@test stepdefinition.definition(context, args) == Behavior.SuccessfulStepExecution()
end
@testset "Execute a step definition; An assert fails; StepFailed is returned" begin
given = Behavior.Gherkin.Given("some definition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given, @expect
@given("some definition") do context
@expect 1 == 2
end
""")
context = Behavior.StepDefinitionContext()
stepmatch = Behavior.findstepdefinition(stepdef_matcher, given)
stepdefinition = stepmatch.stepdefinition
args = Dict{Symbol, Any}()
@test stepdefinition.definition(context, args) isa Behavior.StepFailed
end
@testset "Execute a step definition; An empty When step; Success is returned" begin
when = When("some action")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @when
@when("some action") do context
end
""")
context = Behavior.StepDefinitionContext()
stepmatch = Behavior.findstepdefinition(stepdef_matcher, when)
stepdefinition = stepmatch.stepdefinition
args = Dict{Symbol, Any}()
@test stepdefinition.definition(context, args) == Behavior.SuccessfulStepExecution()
end
@testset "Execute a step definition; An empty Then step; Success is returned" begin
then = Then("some postcondition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @then
@then("some postcondition") do context
end
""")
context = Behavior.StepDefinitionContext()
stepmatch = Behavior.findstepdefinition(stepdef_matcher, then)
stepdefinition = stepmatch.stepdefinition
args = Dict{Symbol, Any}()
@test stepdefinition.definition(context, args) == Behavior.SuccessfulStepExecution()
end
@testset "Execute a step definition; Step throws an exception; The error is not caught" begin
given = Given("some precondition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some precondition") do context
throw(ErrorException("Some error"))
end
""")
context = Behavior.StepDefinitionContext()
stepmatch = findstepdefinition(stepdef_matcher, given)
stepdefinition = stepmatch.stepdefinition
args = Dict{Symbol, Any}()
@test_throws ErrorException stepdefinition.definition(context, args)
end
@testset "Execute a step definition; Call a method defined in the steps file; Method is in scope" begin
when = Behavior.Gherkin.When("calling empty function foo")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @when
foo() = nothing
@when("calling empty function foo") do context
foo()
end
""")
context = Behavior.StepDefinitionContext()
stepmatch = Behavior.findstepdefinition(stepdef_matcher, when)
stepdefinition = stepmatch.stepdefinition
args = Dict{Symbol, Any}()
@test stepdefinition.definition(context, args) == Behavior.SuccessfulStepExecution()
end
end
@testset "Non-unique step definitions" begin
@testset "Find a step definition; Two step definitions have the same description; NonUniqueStepDefinition is thrown" begin
given = Behavior.Gherkin.Given("some definition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some definition") do context
end
@given("some definition") do context
end
""")
context = Behavior.StepDefinitionContext()
@test_throws NonUniqueStepDefinition Behavior.findstepdefinition(stepdef_matcher, given)
end
@testset "Find a step definition; Two step definitions have the same description; File is reported for both" begin
given = Behavior.Gherkin.Given("some definition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some definition") do context
end
@given("some definition") do context
end
"""; filename="steps.jl")
context = Behavior.StepDefinitionContext()
exception_thrown = false
try
Behavior.findstepdefinition(stepdef_matcher, given)
catch ex
if ex isa NonUniqueStepDefinition
exception_thrown = true
@test ex.locations[1].filename == "steps.jl"
@test ex.locations[2].filename == "steps.jl"
else
rethrow()
end
end
@assert exception_thrown "No NonUniqueStepDefinition exception was thrown!"
end
@testset "Find a step definition; Two step definitions have the same description; Another file is reported for both" begin
given = Behavior.Gherkin.Given("some definition")
stepdef_matcher = Behavior.FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some definition") do context
end
@given("some definition") do context
end
"""; filename="othersteps.jl")
context = Behavior.StepDefinitionContext()
exception_thrown = false
try
Behavior.findstepdefinition(stepdef_matcher, given)
catch ex
if ex isa NonUniqueStepDefinition
exception_thrown = true
@test ex.locations[1].filename == "othersteps.jl"
@test ex.locations[2].filename == "othersteps.jl"
else
rethrow()
end
end
@assert exception_thrown "No NonUniqueStepDefinition exception was thrown!"
end
end
@testset "Composite matcher" begin
@testset "Find a step definition from a composite; First matcher has the definition; Definition is found" begin
given = Given("some precondition")
matcher1 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some precondition") do context
end
""")
compositematcher = CompositeStepDefinitionMatcher(matcher1)
stepmatch = findstepdefinition(compositematcher, given)
stepdefinition = stepmatch.stepdefinition
@test stepdefinition.definition isa Function
@test stepdefinition.description == "some precondition"
end
@testset "Find a step definition from a composite; Second matcher has the definition; Definition is found" begin
given = Given("some other precondition")
matcher1 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
""")
matcher2 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some other precondition") do context
end
""")
compositematcher = CompositeStepDefinitionMatcher(matcher1, matcher2)
stepmatch = findstepdefinition(compositematcher, given)
stepdefinition = stepmatch.stepdefinition
@test stepdefinition.definition isa Function
@test stepdefinition.description == "some other precondition"
end
@testset "Find two step definitions from a composite; Both exist in a matcher; Definitions are found" begin
given = Given("some other precondition")
when = When("some action")
matcher1 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
""")
matcher2 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some other precondition") do context
end
""")
matcher3 = FromMacroStepDefinitionMatcher("""
using Behavior: @when
@when("some action") do context
end
""")
compositematcher = CompositeStepDefinitionMatcher(matcher1, matcher2, matcher3)
stepmatch = findstepdefinition(compositematcher, given)
stepdefinition = stepmatch.stepdefinition
@test stepdefinition.definition isa Function
@test stepdefinition.description == "some other precondition"
stepmatch = findstepdefinition(compositematcher, when)
stepdefinition = stepmatch.stepdefinition
@test stepdefinition.description == "some action"
end
@testset "Find a step definition from a composite; Matching two definitions; Non unique step exception thrown" begin
given = Given("some precondition")
matcher1 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some precondition") do context
end
""")
matcher2 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some precondition") do context
end
""")
compositematcher = CompositeStepDefinitionMatcher(matcher1, matcher2)
@test_throws NonUniqueStepDefinition findstepdefinition(compositematcher, given)
end
@testset "Find a step definition from a composite; Matching two definitions in one matcher; Non unique step exception thrown" begin
given = Given("some precondition")
matcher1 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some precondition") do context
end
@given("some precondition") do context
end
""")
matcher2 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
""")
compositematcher = CompositeStepDefinitionMatcher(matcher1, matcher2)
@test_throws NonUniqueStepDefinition findstepdefinition(compositematcher, given)
end
@testset "Find a step definition from a composite; Matches in both matchers; Non unique locations indicates both matchers" begin
given = Given("some precondition")
matcher1 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some precondition") do context
end
"""; filename="matcher1.jl")
matcher2 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some precondition") do context
end
"""; filename="matcher2.jl")
compositematcher = CompositeStepDefinitionMatcher(matcher1, matcher2)
exception_is_thrown = false
try
findstepdefinition(compositematcher, given)
catch ex
if ex isa NonUniqueStepDefinition
exception_is_thrown = true
location_filenames = [location.filename for location in ex.locations]
@test "matcher1.jl" in location_filenames
@test "matcher2.jl" in location_filenames
else
rethrow()
end
end
@test exception_is_thrown
end
@testset "Find a step definition from a composite; Two matchings in one and one matching in second matcher; Non unique locations indicates both matchers" begin
given = Given("some precondition")
matcher1 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some precondition") do context
end
@given("some precondition") do context
end
"""; filename="matcher1.jl")
matcher2 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some precondition") do context
end
"""; filename="matcher2.jl")
compositematcher = CompositeStepDefinitionMatcher(matcher1, matcher2)
exception_is_thrown = false
try
findstepdefinition(compositematcher, given)
catch ex
if ex isa NonUniqueStepDefinition
exception_is_thrown = true
location_filenames = [location.filename for location in ex.locations]
@test "matcher1.jl" in location_filenames
@test "matcher2.jl" in location_filenames
else
rethrow()
end
end
@test exception_is_thrown
end
@testset "Find a step definition from a composite; No matches found; NoMatchingStepDefinition thrown" begin
given = Given("some precondition")
matcher1 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
""")
matcher2 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
""")
compositematcher = CompositeStepDefinitionMatcher(matcher1, matcher2)
@test_throws NoMatchingStepDefinition findstepdefinition(compositematcher, given)
end
@testset "Add a matcher after construction; Definition is found" begin
given = Given("some precondition")
matcher1 = FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context end
""")
compositematcher = CompositeStepDefinitionMatcher()
addmatcher!(compositematcher, matcher1)
stepmatch = findstepdefinition(compositematcher, given)
stepdefinition = stepmatch.stepdefinition
@test stepdefinition.definition isa Function
@test stepdefinition.description == "some precondition"
end
@testset "Add two step definitions to a composite; Both exist in a matcher; Definitions are found" begin
given = Given("some other precondition")
when = When("some action")
matcher1 = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some other precondition") do context
end
""")
matcher2 = FromMacroStepDefinitionMatcher("""
using Behavior: @when
@when("some action") do context
end
""")
compositematcher = CompositeStepDefinitionMatcher()
addmatcher!(compositematcher, matcher1)
addmatcher!(compositematcher, matcher2)
stepmatch = findstepdefinition(compositematcher, given)
stepdefinition = stepmatch.stepdefinition
@test stepdefinition.definition isa Function
@test stepdefinition.description == "some other precondition"
stepmatch = findstepdefinition(compositematcher, when)
stepdefinition = stepmatch.stepdefinition
@test stepdefinition.description == "some action"
end
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 15221 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior: findmissingsteps, ExecutorEngine, suggestmissingsteps
@testset "Suggestions " begin
@testset "Find missing steps" begin
@testset "One step missing; Step is listed as missing in result" begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
""")
executor = Executor(matcher, QuietRealTimePresenter())
missinggiven = Given("some step")
successfulscenario = Scenario("", String[], ScenarioStep[missinggiven])
feature = Feature(FeatureHeader("", [], []), [successfulscenario])
# Act
result = findmissingsteps(executor, feature)
# Assert
@test missinggiven in result
end
@testset "No step missing; Step is not listed as missing in result" begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("some step") do context end
""")
executor = Executor(matcher, QuietRealTimePresenter())
given = Given("some step")
successfulscenario = Scenario("", String[], ScenarioStep[given])
feature = Feature(FeatureHeader("", [], []), [successfulscenario])
# Act
result = findmissingsteps(executor, feature)
# Assert
@test !(given in result)
end
@testset "One step missing, two found; Only missing step is listed" begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context end
@when("some action") do context end
""")
executor = Executor(matcher, QuietRealTimePresenter())
steps = [
Given("some missing step"),
Given("some precondition"),
When("some action"),
]
successfulscenario = Scenario("", String[], steps)
feature = Feature(FeatureHeader("", [], []), [successfulscenario])
# Act
result = findmissingsteps(executor, feature)
# Assert
@test Given("some missing step") in result
@test !(Given("some precondition") in result)
@test !(When("some action") in result)
end
@testset "Two scenarios, two missing steps; Missing steps from both scenarios listed" begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context end
""")
executor = Executor(matcher, QuietRealTimePresenter())
scenario1 = Scenario("1", String[], ScenarioStep[Given("some missing step")])
scenario2 = Scenario("2", String[], ScenarioStep[Given("some other missing step")])
feature = Feature(FeatureHeader("", [], []), [scenario1, scenario2])
# Act
result = findmissingsteps(executor, feature)
# Assert
@test Given("some missing step") in result
@test Given("some other missing step") in result
end
@testset "Two scenarios, one missing step; Step is only listed once" begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context end
""")
executor = Executor(matcher, QuietRealTimePresenter())
scenario1 = Scenario("1", String[], ScenarioStep[Given("some missing step")])
scenario2 = Scenario("2", String[], ScenarioStep[Given("some missing step")])
feature = Feature(FeatureHeader("", [], []), [scenario1, scenario2])
# Act
result = findmissingsteps(executor, feature)
# Assert
@test result == [Given("some missing step")]
end
@testset "Background has a missing step; Missing step is listed" begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context end
""")
executor = Executor(matcher, QuietRealTimePresenter())
background = Background("", ScenarioStep[Given("some missing step")])
scenario1 = Scenario("1", String[], ScenarioStep[Given("some precondition")])
feature = Feature(FeatureHeader("", [], []), background, [scenario1])
# Act
result = findmissingsteps(executor, feature)
# Assert
@test result == [Given("some missing step")]
end
@testset "Two missing steps with different block texts; Step is only listed once" begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("some precondition") do context end
""")
executor = Executor(matcher, QuietRealTimePresenter())
scenario1 = Scenario("1", String[], ScenarioStep[Given("some missing step"; block_text="1")])
scenario2 = Scenario("2", String[], ScenarioStep[Given("some missing step"; block_text="2")])
feature = Feature(FeatureHeader("", [], []), [scenario1, scenario2])
# Act
result = findmissingsteps(executor, feature)
# Assert
@test length(result) == 1
end
end
@testset "Suggestions" begin
@testset "One missing step; Add step according to suggestion; Step is found" begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("successful step") do context end
""")
executor = Executor(matcher, QuietRealTimePresenter())
scenario1 = Scenario("1", String[], ScenarioStep[Given("missing step")])
feature = Feature(FeatureHeader("", [], []), [scenario1])
# Act
missingstepscode = suggestmissingsteps(executor, feature)
# Assert
missingmatcher = FromMacroStepDefinitionMatcher(missingstepscode)
compositematcher = CompositeStepDefinitionMatcher()
addmatcher!(compositematcher, matcher)
addmatcher!(compositematcher, missingmatcher)
assertexecutor = Executor(compositematcher, QuietRealTimePresenter())
result = findmissingsteps(assertexecutor, feature)
@test result == []
end
@testset "Many missing steps; Add steps according to suggestion; Steps are found" begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("successful step") do context end
""")
executor = Executor(matcher, QuietRealTimePresenter())
missingsteps = ScenarioStep[
Given("missing given"),
When("missing when"),
Then("missing then"),
]
scenario1 = Scenario("1", String[], missingsteps)
feature = Feature(FeatureHeader("", [], []), [scenario1])
# Act
missingstepscode = suggestmissingsteps(executor, feature)
# Assert
missingmatcher = FromMacroStepDefinitionMatcher(missingstepscode)
compositematcher = CompositeStepDefinitionMatcher()
addmatcher!(compositematcher, matcher)
addmatcher!(compositematcher, missingmatcher)
assertexecutor = Executor(compositematcher, QuietRealTimePresenter())
result = findmissingsteps(assertexecutor, feature)
@test result == []
end
@testset "Missing step ending with a double-quote; Suggestion works" begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
@given("successful step") do context end
""")
executor = Executor(matcher, QuietRealTimePresenter())
missingsteps = ScenarioStep[
Given("missing given\""),
]
scenario1 = Scenario("1", String[], missingsteps)
feature = Feature(FeatureHeader("", [], []), [scenario1])
# Act
missingstepscode = suggestmissingsteps(executor, feature)
# Assert
missingmatcher = FromMacroStepDefinitionMatcher(missingstepscode)
compositematcher = CompositeStepDefinitionMatcher()
addmatcher!(compositematcher, matcher)
addmatcher!(compositematcher, missingmatcher)
assertexecutor = Executor(compositematcher, QuietRealTimePresenter())
result = findmissingsteps(assertexecutor, feature)
@test result == []
end
@testset "One missing step; Add step according to suggestion; Step fails when executed" begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
""")
executor = Executor(matcher, QuietRealTimePresenter())
scenario1 = Scenario("1", String[], ScenarioStep[Given("missing step")])
feature = Feature(FeatureHeader("", [], []), [scenario1])
# Act
missingstepscode = suggestmissingsteps(executor, feature)
# Assert
missingmatcher = FromMacroStepDefinitionMatcher(missingstepscode)
compositematcher = CompositeStepDefinitionMatcher()
addmatcher!(compositematcher, matcher)
addmatcher!(compositematcher, missingmatcher)
assertexecutor = Executor(compositematcher, QuietRealTimePresenter())
featureresult = Behavior.executefeature(assertexecutor, feature)
scenarioresult = featureresult.scenarioresults[1]
@test scenarioresult.steps[1] isa Behavior.StepFailed
end
end
@testset "Escaping PCRE metacharacters" begin
function testsuggestionescaping(steps::AbstractVector{ScenarioStep})
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
""")
executor = Executor(matcher, QuietRealTimePresenter())
scenario1 = Scenario("1", String[], steps)
feature = Feature(FeatureHeader("", [], []), [scenario1])
# Act
missingstepscode = suggestmissingsteps(executor, feature)
# Assert
missingmatcher = FromMacroStepDefinitionMatcher(missingstepscode)
compositematcher = CompositeStepDefinitionMatcher()
addmatcher!(compositematcher, matcher)
addmatcher!(compositematcher, missingmatcher)
assertexecutor = Executor(compositematcher, QuietRealTimePresenter())
featureresult = Behavior.executefeature(assertexecutor, feature)
scenarioresult = featureresult.scenarioresults[1]
@test scenarioresult.steps[1] isa Behavior.StepFailed
end
# Check that each PCRE metacharacter is escaped properly.
# We do this by implementing the suggested step and check that it doesn't
# throw an unexpected exception and that it can be found. If any metacharacter is
# unescaped, one of those two things will happen, with at least one exception: the period.
#
# Since the step, like
# Given some step with (parentheses)
# will be converted into a regex, we want those parentheses to be
# escaped, so they are treated as the () themselves, not the regex
# metacharacter.
#
# Note: Since . matches itself we can't use the same test as for everything else,
# since a step
# Given some step with . in it
# will match regardless if it's escaped or not, this means that we must instead test
# that it _does not match_
# Given some step with x in it
# which will ensure that the . is escaped.
for (steps, testdescription) in [
# Check that $ is escaped properly
(ScenarioStep[Given("some step \$x")], "\$"),
# Check ^
(ScenarioStep[Given("some ^")], "^"),
# Check ()
(ScenarioStep[Given("some (and some)")], "parentheses"),
# Check |
(ScenarioStep[Given("some (x|y) or")], "pipe"),
# Check []
(ScenarioStep[Given("some [or]")], "square brackets"),
# Check ?
(ScenarioStep[Given("some question?")], "question mark"),
# Check *
(ScenarioStep[Given("some *")], "*"),
# Check +
(ScenarioStep[Given("some +")], "+"),
# Check {}
(ScenarioStep[Given("some {")], "{"),
(ScenarioStep[Given("some }")], "}"),
]
@testset "Escaping regular expressions characters: $testdescription" begin
testsuggestionescaping(steps)
end
end
end
@testset "Escaping the PCRE metacharacter ." begin
# Arrange
matcher = FromMacroStepDefinitionMatcher("""
using Behavior
""")
executor = Executor(matcher, QuietRealTimePresenter())
steps = ScenarioStep[
Given("some step with a . in it")
]
scenario1 = Scenario("1", String[], steps)
feature = Feature(FeatureHeader("", [], []), [scenario1])
# Act
missingstepscode = suggestmissingsteps(executor, feature)
# Assert
# Ensure that the step
# Given some step with a x in it
# _does not_ have a matching step.
missingmatcher = FromMacroStepDefinitionMatcher(missingstepscode)
compositematcher = CompositeStepDefinitionMatcher()
addmatcher!(compositematcher, matcher)
addmatcher!(compositematcher, missingmatcher)
assertexecutor = Executor(compositematcher, QuietRealTimePresenter())
shouldbemissing = Given("some step with a x in it")
newscenario = Scenario("1", String[], ScenarioStep[shouldbemissing])
newfeature = Feature(FeatureHeader("", [], []), [newscenario])
result = findmissingsteps(assertexecutor, newfeature)
@test shouldbemissing in result
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 6230 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Test
using Behavior.Gherkin: Given, When, ScenarioStep, Scenario, Background
using Behavior: FromMacroStepDefinitionMatcher, findstepdefinition
@testset "Parameters " begin
@testset "Matching against parameters; Definition has one String parameter; has value bar" begin
given = Given("some bar")
stepdef_matcher = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some {String}") do context, v end
""")
stepdefinitionmatch = findstepdefinition(stepdef_matcher, given)
@test stepdefinitionmatch.variables == ["bar"]
end
@testset "Matching against parameters; Definition has one empty String parameter; has value bar" begin
given = Given("some bar")
stepdef_matcher = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some {}") do context, v end
""")
stepdefinitionmatch = findstepdefinition(stepdef_matcher, given)
@test stepdefinitionmatch.variables == ["bar"]
end
@testset "Matching against parameters; Definition has one String parameter; has value baz" begin
given = Given("some baz")
stepdef_matcher = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some {String}") do context, v end
""")
stepdefinitionmatch = findstepdefinition(stepdef_matcher, given)
@test stepdefinitionmatch.variables == ["baz"]
end
@testset "Matching against parameters; Definition has two String parameters; has values bar and fnord" begin
given = Given("some bar and fnord")
stepdef_matcher = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some {String} and {String}") do context, v1, v2 end
""")
stepdefinitionmatch = findstepdefinition(stepdef_matcher, given)
@test stepdefinitionmatch.variables == ["bar", "fnord"]
end
@testset "Scenario step has a variable foo; Args has :foo => bar" begin
stepdefmatcher = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some value {String}") do context, v
@expect v == "bar"
end
""")
executor = Behavior.Executor(stepdefmatcher)
given = Given("some value bar")
scenario = Scenario("Description", String[], ScenarioStep[given])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.SuccessfulStepExecution)
end
@testset "Scenario step has two String parameters; Arguments are bar, fnord" begin
stepdefmatcher = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some values {} and {}") do context, v1, v2
@expect v1 == "bar"
@expect v2 == "fnord"
end
""")
executor = Behavior.Executor(stepdefmatcher)
given = Given("some values bar and fnord")
scenario = Scenario("Description", String[], ScenarioStep[given])
scenarioresult = Behavior.executescenario(executor, Background(), scenario)
@test isa(scenarioresult.steps[1], Behavior.SuccessfulStepExecution)
end
@testset "Typed parameters" begin
@testset "Definition has one Int parameter; has value 123" begin
given = Given("some value 123")
stepdef_matcher = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some value {Int}") do context, v end
""")
stepdefinitionmatch = findstepdefinition(stepdef_matcher, given)
@test stepdefinitionmatch.variables == [123]
end
@testset "Definition has one Float64 parameter; has value 234.0" begin
given = Given("some value 234.0")
stepdef_matcher = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some value {Float64}") do context, v end
""")
stepdefinitionmatch = findstepdefinition(stepdef_matcher, given)
@test stepdefinitionmatch.variables == [234.0]
end
@testset "Definition has one Bool parameter; has value true" begin
given = Given("some value true")
stepdef_matcher = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some value {Bool}") do context, v end
""")
stepdefinitionmatch = findstepdefinition(stepdef_matcher, given)
@test stepdefinitionmatch.variables == [true]
end
@testset "Definition has one Bool parameter; has value false" begin
given = Given("some value false")
stepdef_matcher = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some value {Bool}") do context, v end
""")
stepdefinitionmatch = findstepdefinition(stepdef_matcher, given)
@test stepdefinitionmatch.variables == [false]
end
@testset "Definition has one Bool parameter; has value false" begin
given = Given("some value false")
stepdef_matcher = FromMacroStepDefinitionMatcher("""
using Behavior: @given
@given("some value {Bool}") do context, v end
""")
stepdefinitionmatch = findstepdefinition(stepdef_matcher, given)
@test stepdefinitionmatch.variables == [false]
end
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 3676 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior.Gherkin: issuccessful, parsescenario!, Given, When, Then, ByLineParser, ScenarioStep, ParseOptions
@testset "Data tables " begin
@testset "A Scenario with a data table; The data table is associated with the step" begin
text = """
Scenario: Data tables
When some action
Then some tabular data
| header 1 | header 2 |
| foo 1 | bar 1 |
| foo 2 | bar 2 |
| foo 3 | bar 3 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps[2].datatable == [
["header 1", "header 2"],
["foo 1", "bar 1"],
["foo 2", "bar 2"],
["foo 3", "bar 3"],
]
end
@testset "A data table with comments in the elements; Comments are ignored" begin
text = """
Scenario: Data tables
When some action
Then some tabular data
| header 1 | header 2 |
| foo 1 | bar 1 |
| foo 2 | bar 2 |
# Comment
| foo 3 | bar 3 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps[2].datatable == [
["header 1", "header 2"],
["foo 1", "bar 1"],
["foo 2", "bar 2"],
["foo 3", "bar 3"],
]
end
@testset "A data table with blank lines; Blank lines are ignored" begin
text = """
Scenario: Data tables
When some action
Then some tabular data
| header 1 | header 2 |
| foo 1 | bar 1 |
| foo 2 | bar 2 |
| foo 3 | bar 3 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps[2].datatable == [
["header 1", "header 2"],
["foo 1", "bar 1"],
["foo 2", "bar 2"],
["foo 3", "bar 3"],
]
end
@testset "The data table step is followed by another step; The data table still has four rows" begin
text = """
Scenario: Data tables
When some action
Then some tabular data
| header 1 | header 2 |
| foo 1 | bar 1 |
| foo 2 | bar 2 |
| foo 3 | bar 3 |
And some other step
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps[2].datatable == [
["header 1", "header 2"],
["foo 1", "bar 1"],
["foo 2", "bar 2"],
["foo 3", "bar 3"],
]
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 23296 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior.Gherkin:
parsefeature, issuccessful, ParseOptions,
Given, When, Then
@testset "Feature " begin
@testset "Feature description" begin
@testset "Read feature description; Description matches input" begin
text = """
Feature: This is a feature
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.header.description == "This is a feature"
end
@testset "Read another feature description; Description matches input" begin
text = """
Feature: This is another feature
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.header.description == "This is another feature"
end
@testset "Read long feature description" begin
text = """
Feature: This is another feature
This is the long description.
It contains several lines.
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test "This is the long description." in feature.header.long_description
@test "It contains several lines." in feature.header.long_description
end
@testset "Scenarios are not part of the feature description" begin
text = """
Feature: This is another feature
This is the long description.
It contains several lines.
Scenario: Some scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test ("Given a precondition" in feature.header.long_description) == false
end
end
@testset "Read scenarios" begin
@testset "Feature has one scenario; one scenarios is parsed" begin
text = """
Feature: This feature has one scenario
Scenario: This is one scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test length(feature.scenarios) == 1
end
@testset "Feature has two scenarios; two scenarios are parsed" begin
text = """
Feature: This feature has one scenario
Scenario: This is one scenario
Given a precondition
Scenario: This is a second scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test length(feature.scenarios) == 2
end
@testset "Feature has one scenario; The description is read from the scenario" begin
text = """
Feature: This feature has one scenario
Scenario: This is one scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.scenarios[1].description == "This is one scenario"
end
@testset "Feature has two scenarios; two scenarios are parsed" begin
text = """
Feature: This feature has one scenario
Scenario: This is one scenario
Given a precondition
Scenario: This is a second scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.scenarios[1].description == "This is one scenario"
@test feature.scenarios[2].description == "This is a second scenario"
end
@testset "Scenario with three steps; The parsed scenario has three steps" begin
text = """
Feature: This feature has one scenario
Scenario: This is one scenario
Given a precondition
When an action is performed
Then some postcondition holds
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test length(feature.scenarios[1].steps) == 3
end
@testset "Scenario with one step; The parsed scenario has one step" begin
text = """
Feature: This feature has one scenario
Scenario: This is one scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test length(feature.scenarios[1].steps) == 1
end
@testset "Feature has a scenario outline; The feature scenarios list has one element" begin
text = """
Feature: This feature has one scenario
Scenario Outline: This is one scenario outline
Given a precondition with field <Foo>
Examples:
| Foo |
| 1 |
| 2 |
"""
result = parsefeature(text)
@test issuccessful(result)
if !issuccessful(result)
println("Error: $(result.reason): $(result.expected) but got $(result.actual)")
end
feature = result.value
@test length(feature.scenarios) == 1
end
@testset "Feature has a scenario outline and a normal scenario; Two scenarios are parsed" begin
text = """
Feature: This feature has one scenario
Scenario Outline: This is one scenario outline
Given a precondition with field <Foo>
Examples:
| Foo |
| 1 |
| 2 |
Scenario: A normal scenario
Given some precondition
"""
result = parsefeature(text)
@test issuccessful(result)
if !issuccessful(result)
println("Error: $(result.reason): $(result.expected) but got $(result.actual)")
end
feature = result.value
@test length(feature.scenarios) == 2
end
end
@testset "Robustness" begin
@testset "Many empty lines before scenario; Empty lines are ignored" begin
text = """
Feature: This feature has many empty lines between scenarios
Scenario: This is one scenario
Given a precondition
Scenario: This is another scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test length(feature.scenarios) == 2
end
@testset "No empty lines between scenarios; Two scenarios found" begin
text = """
Feature: This feature has no empty lines between scenarios
Scenario: This is one scenario
Given a precondition
Scenario: This is another scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test length(feature.scenarios) == 2
end
@testset "No empty lines between a Scenario and a Scenario Outline; Two scenarios found" begin
text = """
Feature: This feature has no empty lines between scenarios
Scenario: This is one scenario
Given a precondition
Scenario Outline: This is one scenario outline
Given a precondition with field <Foo>
Examples:
| Foo |
| 1 |
| 2 |
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test length(feature.scenarios) == 2
end
@testset "No empty lines between a Scenario Outline and a Scenario; Two scenarios found" begin
text = """
Feature: This feature has no empty lines between scenarios
Scenario Outline: This is one scenario outline
Given a precondition with field <Foo>
Examples:
| Foo |
| 1 |
| 2 |
Scenario: This is one scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test length(feature.scenarios) == 2
end
@testset "No empty lines between a Scenario Outline and the examples; One scenario found" begin
text = """
Feature: This feature has no empty lines between scenarios
Scenario Outline: This is one scenario outline
Given a precondition with field <Foo>
Examples:
| Foo |
| 1 |
| 2 |
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test length(feature.scenarios) == 1
end
@testset "No empty lines between a Feature and a Scenario; Scenario found" begin
text = """
Feature: This feature has no empty lines between scenarios
Scenario: This is one scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test length(feature.scenarios) == 1
end
@testset "The feature file has three empty scenarios; The Feature has three scenarios" begin
text = """
Feature: This feature has no empty lines between scenarios
Scenario: This is one scenario
Scenario: This is another scenario
Scenario: This is a third scenario
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test length(feature.scenarios) == 3
end
end
@testset "Malformed features" begin
@testset "Scenario found before feature; Parse fails with feature expected" begin
text = """
Scenario: This is one scenario
Given a precondition
Feature: This feature has one scenario
Scenario: This is a second scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result) == false
@test result.reason == :unexpected_construct
@test result.expected == :feature
@test result.actual == :scenario
end
@testset "Scenario has out-of-order steps; Parse fails with :bad_step_order" begin
text = """
Feature: This feature has one scenario
Scenario: This scenario has out-of-order steps
When an action
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result) == false
@test result.reason == :bad_step_order
end
@testset "Invalid step keyword; Syntax error on line 5" begin
text = """
Feature: This feature has one scenario
Scenario: This scenario has out-of-order steps
When an action
Given a precondition
"""
result = parsefeature(text)
@test !issuccessful(result)
@test result.linenumber == 5
end
@testset "Scenario found before feature; Parser fails on line 1" begin
text = """
Scenario: This is one scenario
Given a precondition
Feature: This feature has one scenario
Scenario: This is a second scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result) == false
@test result.linenumber == 1
end
@testset "Invalid step keyword; Syntax error includes current line" begin
text = """
Feature: This feature has one scenario
Scenario: This scenario has out-of-order steps
When an action
Given a precondition
"""
result = parsefeature(text)
@test !issuccessful(result)
@test strip(result.line) == "Given a precondition"
end
@testset "Scenario found before feature; Parser fails on line 1" begin
text = """
Scenario: This is one scenario
Given a precondition
Feature: This feature has one scenario
Scenario: This is a second scenario
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result) == false
@test strip(result.line) == "Scenario: This is one scenario"
end
end
@testset "Lenient parser" begin
@testset "Allow arbitrary step order" begin
text = """
Feature: This feature has one scenario
Scenario: This scenario has steps out-of-order
Then a postcondition
When an action
Given a precondition
"""
result = parsefeature(text, options=ParseOptions(allow_any_step_order = true))
@test issuccessful(result)
end
end
@testset "Background sections" begin
@testset "Background with a single Given step; Background description is available in the result" begin
text = """
Feature: This feature has a Background section
Background: Some background steps
Given some background precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.background.description == "Some background steps"
end
@testset "Background with a single Given step; The Given step is available in the result" begin
text = """
Feature: This feature has a Background section
Background: Some background steps
Given some background precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.background.steps == [Given("some background precondition")]
end
@testset "Background with three Given steps; The Given steps are available in the result" begin
text = """
Feature: This feature has a Background section
Background: Some background steps
Given some background precondition 1
Given some background precondition 2
Given some background precondition 3
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.background.steps == [
Given("some background precondition 1"),
Given("some background precondition 2"),
Given("some background precondition 3"),
]
end
@testset "Background with a doc string; The doc string is part of the step" begin
text = """
Feature: This feature has a Background section
Background: Some background steps
Given some background precondition
\"\"\"
Doc string
\"\"\"
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.background.steps == [
Given("some background precondition"; block_text="Doc string"),
]
end
@testset "Background with a When step type; Parser error is :invalid_step" begin
text = """
Feature: This feature has a Background section
Background: Some background steps
Given some background precondition
When some action
"""
result = parsefeature(text)
@test !issuccessful(result)
@test result.reason == :invalid_step
end
@testset "Background with a Then step type; Parser error is :invalid_step" begin
text = """
Feature: This feature has a Background section
Background: Some background steps
Given some background precondition
Then some postcondition
"""
result = parsefeature(text)
@test !issuccessful(result)
@test result.reason == :invalid_step
end
@testset "Background has no description; Description is empty" begin
text = """
Feature: This feature has a Background section
Background:
Given some background precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.background.description == ""
end
end
@testset "Comments" begin
@testset "Comments preceding the Feature; Ignored" begin
text = """
# Comment line 1
# Comment line 2
Feature: This is a feature
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.header.description == "This is a feature"
end
@testset "Comments within a Background; Ignored" begin
text = """
Feature: This is a feature
Background: Some description
Given some precondition 1
# Comment line 1
# Comment line 2
Given some precondition 2
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.background.steps == [
Given("some precondition 1"),
Given("some precondition 2"),
]
end
@testset "Comments within a Scenario; Ignored" begin
text = """
Feature: This is a feature
Scenario: Some description
Given some precondition 1
# Comment line 1
# Comment line 2
Given some precondition 2
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.scenarios[1].steps == [
Given("some precondition 1"),
Given("some precondition 2"),
]
end
@testset "Comments between a Background and a Scenario; Ignored" begin
text = """
Feature: This is a feature
Background: Some background description
Given some background precondition
# Comment line 1
# Comment line 2
Scenario: Some description
Given some precondition 1
When some action
Then some postcondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.background.steps == [
Given("some background precondition"),
]
@test feature.scenarios[1].steps == [
Given("some precondition 1"),
When("some action"),
Then("some postcondition"),
]
end
@testset "Comments between a Background and a Scenario without blank lines; Ignored" begin
text = """
Feature: This is a feature
Background: Some background description
Given some background precondition
# Comment line 1
# Comment line 2
Scenario: Some description
Given some precondition 1
When some action
Then some postcondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.background.steps == [
Given("some background precondition"),
]
@test feature.scenarios[1].steps == [
Given("some precondition 1"),
When("some action"),
Then("some postcondition"),
]
end
@testset "Comments at the end of a Feature; Ignored" begin
text = """
Feature: This is a feature
Scenario: Some description
Given some precondition 1
When some action
Then some postcondition
# Comment line 1
# Comment line 2
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test feature.scenarios[1].steps == [
Given("some precondition 1"),
When("some action"),
Then("some postcondition"),
]
end
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 7126 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior.Gherkin: parsescenario!, issuccessful, Given, When, Then, ByLineParser, ScenarioStep
@testset "Scenario descriptions" begin
@testset "Scenario long description, one line; Long description is available" begin
text = """
Scenario: Some description
This is a longer description
Given some precondition
When some action
Then some postcondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.long_description == "This is a longer description"
end
@testset "Scenario long description, three lines; Long description is available" begin
text = """
Scenario: Some description
This is a longer description
This is another line.
This is a third line.
Given some precondition
When some action
Then some postcondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.long_description == "This is a longer description\nThis is another line.\nThis is a third line."
end
@testset "Scenario with blank lines between description and steps; Trailing blank lines are ignored" begin
text = """
Scenario: Some description
This is a longer description
This is another line.
This is a third line.
Given some precondition
When some action
Then some postcondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.long_description == "This is a longer description\nThis is another line.\nThis is a third line."
end
@testset "Scenario with blank lines in description; Blank lines in description are included" begin
text = """
Scenario: Some description
This is a longer description
This is another line.
This is a third line.
Given some precondition
When some action
Then some postcondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.long_description == "This is a longer description\n\nThis is another line.\nThis is a third line."
end
@testset "Long description without steps; Zero steps and a long description" begin
text = """
Scenario: Some description
NotAStep some more text
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test isempty(scenario.steps)
@test scenario.long_description == "NotAStep some more text"
end
end
@testset "Outline descriptions " begin
@testset "Scenario long description, one line; Long description is available" begin
text = """
Scenario Outline: Some description
This is a longer description
Given some precondition <foo>
When some action
Then some postcondition
Examples:
| foo |
| 1 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.long_description == "This is a longer description"
end
@testset "Scenario long description, three lines; Long description is available" begin
text = """
Scenario Outline: Some description
This is a longer description
This is another line.
This is a third line.
Given some precondition <foo>
When some action
Then some postcondition
Examples:
| foo |
| 1 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.long_description == "This is a longer description\nThis is another line.\nThis is a third line."
end
@testset "Scenario with blank lines between description and steps; Trailing blank lines are ignored" begin
text = """
Scenario Outline: Some description
This is a longer description
This is another line.
This is a third line.
Given some precondition <foo>
When some action
Then some postcondition
Examples:
| foo |
| 1 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.long_description == "This is a longer description\nThis is another line.\nThis is a third line."
end
@testset "Scenario with blank lines in description; Blank lines in description are included" begin
text = """
Scenario Outline: Some description
This is a longer description
This is another line.
This is a third line.
Given some precondition <foo>
When some action
Then some postcondition
Examples:
| foo |
| 1 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.long_description == "This is a longer description\n\nThis is another line.\nThis is a third line."
end
# @testset "Long description without steps; Undefined" begin
# # We have problems supporting empty Scenario Outlines because I don't want
# # to make "Examples" an unallowed word in scenario descriptions.
# # If this turns out to be a problem, we'll have to come up with a solution.
# text = """
# Scenario Outline: Some description
# NotAStep some more text
#
# Examples:
# | foo |
# | 1 |
# """
#
# byline = ByLineParser(text)
# result = parsescenario!(byline)
# end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 6871 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior.Gherkin: parsescenario!, issuccessful, Given, When, Then, ByLineParser, ScenarioStep
@testset "Scenario Outline " begin
@testset "Outline has a Given step; Step is parsed" begin
text = """
Scenario Outline: This is one scenario outline
Given a precondition with field <Foo>
Examples:
| Foo |
| 1 |
| 2 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Given("a precondition with field <Foo>")]
end
@testset "Scenario Outline has description; Description is parsed" begin
text = """
Scenario Outline: This is one scenario outline
Given a precondition with field <Foo>
Examples:
| Foo |
| 1 |
| 2 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.description == "This is one scenario outline"
end
@testset "Scenario Outline has tags; Tags are parsed" begin
text = """
@tag1 @tag2
Scenario Outline: This is one scenario outline
Given a precondition with field <Foo>
Examples:
| Foo |
| 1 |
| 2 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.tags == ["@tag1", "@tag2"]
end
@testset "Scenario Outline Examples" begin
@testset "Outline has three placeholders; The placeholders are parsed" begin
text = """
Scenario Outline: This is one scenario outline
Given a precondition with placeholders <Foo>, <Bar>, <Baz>
Examples:
| Foo | Bar | Baz |
| 1 | 2 | 3 |
| 1 | 2 | 3 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.placeholders == ["Foo", "Bar", "Baz"]
end
@testset "Two examples with three placeholders are provided; Examples array is 3x2" begin
text = """
Scenario Outline: This is one scenario outline
Given a precondition with placeholders <Foo>, <Bar>, <Baz>
Examples:
| Foo | Bar | Baz |
| 1 | 2 | 3 |
| 1 | 2 | 3 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test size(scenario.examples) == (3,2)
end
@testset "Three examples with four placeholders are provided; Examples array is 4x3" begin
text = """
Scenario Outline: This is one scenario outline
Given a precondition with placeholders <Foo>, <Bar>, <Baz>, <Quux>
Examples:
| Foo | Bar | Baz | Quux |
| 1 | 2 | 3 | 4 |
| 1 | 2 | 3 | 4 |
| 1 | 2 | 3 | 4 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test size(scenario.examples) == (4,3)
end
@testset "Two examples with three placeholders are provided; Examples array has all values" begin
text = """
Scenario Outline: This is one scenario outline
Given a precondition with placeholders <Foo>, <Bar>, <Baz>
Examples:
| Foo | Bar | Baz |
| 1 | 2 | 3 |
| 4 | 5 | 6 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.examples[:,1] == ["1", "2", "3"]
@test scenario.examples[:,2] == ["4", "5", "6"]
end
@testset "Examples with spaces; Examples are split on | not on spaces" begin
text = """
Scenario Outline: This is one scenario outline
Given a precondition with placeholder <Foo>
Examples:
| Foo |
| word |
| two words |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.examples[:,1] == ["word"]
@test scenario.examples[:,2] == ["two words"]
end
@testset "Example with an empty element; The empty line results in an empty value" begin
text = """
Scenario Outline: This is one scenario outline
Given a precondition with placeholder <Foo>
Examples:
| Foo |
||
| two words |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.examples[:,1] == [""]
@test scenario.examples[:,2] == ["two words"]
end
@testset "A placeholder is non-alphanumerical; The placeholders are correct" begin
text = """
Scenario Outline: This is one scenario outline
Given a precondition with placeholders <Foo>, <Bar-baz>, <>
Examples:
| Foo | Bar-baz |
| 1 | 2 |
| 4 | 5 |
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.placeholders == ["Foo", "Bar-baz"]
end
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 16209 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior.Gherkin: issuccessful, parsescenario!, Given, When, Then, ByLineParser, ScenarioStep, ParseOptions
@testset "Scenario " begin
@testset "Scenario has a Given step; the parsed scenario has a Given struct" begin
text = """
Scenario: Some description
Given a precondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Given("a precondition")]
end
@testset "Scenario has a When step; the parsed scenario has a When struct" begin
text = """
Scenario: Some description
When some action
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[When("some action")]
end
@testset "Scenario has a Then step; the parsed scenario has a Then struct" begin
text = """
Scenario: Some description
Then a postcondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Then("a postcondition")]
end
@testset "Scenario has an And following a Given; the And step becomes a Given" begin
text = """
Scenario: Some description
Given a precondition
And another precondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Given("a precondition"),
Given("another precondition")]
end
@testset "Scenario has an And following a When; the And step becomes a When" begin
text = """
Scenario: Some description
When some action
And another action
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[When("some action"),
When("another action")]
end
@testset "Scenario has an And following a Then; the And step becomes a Then" begin
text = """
Scenario: Some description
Then some postcondition
And another postcondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Then("some postcondition"),
Then("another postcondition")]
end
@testset "Blank lines" begin
text = """
Scenario: Some description
Given some precondition
When some action
Then some postcondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Given("some precondition"),
When("some action"),
Then("some postcondition"),
]
end
@testset "But and * keywords" begin
@testset "But follows Given/When/Then; Each But step is same as the preceding" begin
text = """
Scenario: Some description
Given some precondition
But after given
When some action
But after when
Then some postcondition
But after then
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Given("some precondition"),
Given("after given"),
When("some action"),
When("after when"),
Then("some postcondition"),
Then("after then")]
end
@testset "* follows Given/When/Then; Each * step is same as the preceding" begin
text = """
Scenario: Some description
Given some precondition
* after given
When some action
* after when
Then some postcondition
* after then
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Given("some precondition"),
Given("after given"),
When("some action"),
When("after when"),
Then("some postcondition"),
Then("after then")]
end
@testset "List items as *; Items are the same type as the preceding step" begin
text = """
Scenario: Some description
Given some precondition
* item 1
* item 2
* item 3
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Given("some precondition"),
Given("item 1"),
Given("item 2"),
Given("item 3")]
end
end
@testset "Scenario is not terminated by newline; EOF is also an OK termination" begin
text = """
Scenario: Some description
Then some postcondition
And another postcondition"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
end
@testset "A given step multiple spaces before the step description; The parsed given is stripped" begin
text = """
Scenario: Some description
Given a precondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Given("a precondition")]
end
@testset "A given step multiple spaces after the step description; The parsed given is stripped" begin
text = """
Scenario: Some description
Given a precondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Given("a precondition")]
end
@testset "Malformed scenarios" begin
@testset "And as a first step; Expected Given, When, or Then before that" begin
text = """
Scenario: Some description
And another postcondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test !issuccessful(result)
@test result.reason == :leading_and
@test result.expected == :specific_step
@test result.actual == :and_step
end
@testset "Given after a When; Expected When or Then" begin
text = """
Scenario: Some description
When some action
Given some precondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test !issuccessful(result)
@test result.reason == :bad_step_order
@test result.expected == :NotGiven
@test result.actual == :Given
end
@testset "Given after Then; Expected Then" begin
text = """
Scenario: Some description
Then some postcondition
Given some precondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test !issuccessful(result)
@test result.reason == :bad_step_order
@test result.expected == :NotGiven
@test result.actual == :Given
end
@testset "When after Then; Expected Then" begin
text = """
Scenario: Some description
Then some postcondition
When some action
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test !issuccessful(result)
@test result.reason == :bad_step_order
@test result.expected == :NotWhen
@test result.actual == :When
end
@testset "A step definition without text; Expected a valid step definition" begin
text = """
Scenario: Some description
Given
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test !issuccessful(result)
@test result.reason == :invalid_step
@test result.expected == :step_definition
@test result.actual == :invalid_step_definition
end
@testset "Improper scenario header; Expected valid scenario header" begin
text = """
Scenario malformed: This is not a proper scenario header
Given some precondition
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test !issuccessful(result)
@test result.reason == :invalid_scenario_header
@test result.expected == :scenario_or_outline
@test result.actual == :invalid_header
end
end
@testset "Lenient parsing" begin
@testset "Allow arbitrary step order" begin
@testset "Given after a When; Steps are When and Given" begin
text = """
Scenario: Some description
When some action
Given some precondition
"""
byline = ByLineParser(text, ParseOptions(allow_any_step_order=true))
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[When("some action"), Given("some precondition")]
end
@testset "When after a Then; Steps are Then and When" begin
text = """
Scenario: Some description
Then some postcondition
When some action
"""
byline = ByLineParser(text, ParseOptions(allow_any_step_order=true))
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Then("some postcondition"), When("some action")]
end
@testset "Steps are Then/When/Given; Parse result is in that order" begin
text = """
Scenario: Some description
Then some postcondition
When some action
Given some precondition
"""
byline = ByLineParser(text, ParseOptions(allow_any_step_order=true))
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps == ScenarioStep[Then("some postcondition"), When("some action"), Given("some precondition")]
end
end
end
@testset "Block text" begin
@testset "Block text in a Given; Block text is present in step" begin
text = """
Scenario: Some description
Given some precondition
\"\"\"
This is block text.
There are two lines.
\"\"\"
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps[1].block_text == """
This is block text.
There are two lines."""
end
@testset "Another block text in a Given; Block text is present in step" begin
text = """
Scenario: Some description
Given some precondition
\"\"\"
This is another block text.
There are three lines.
This is the last line.
\"\"\"
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps[1].block_text == """
This is another block text.
There are three lines.
This is the last line."""
end
@testset "Block text in a When step; Block text is present in step" begin
text = """
Scenario: Some description
When some action
\"\"\"
This is block text.
\"\"\"
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps[1] == When("some action"; block_text="""This is block text.""")
end
@testset "Block text in a Then step; Block text is present in step" begin
text = """
Scenario: Some description
Then some postcondition
\"\"\"
This is block text.
\"\"\"
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps[1] == Then("some postcondition"; block_text="""This is block text.""")
end
@testset "Block text with a blank line; Block text is present in step" begin
text = """
Scenario: Some description
Then some postcondition
\"\"\"
This is block text.
This is another line.
\"\"\"
"""
byline = ByLineParser(text)
result = parsescenario!(byline)
@test issuccessful(result)
scenario = result.value
@test scenario.steps[1] == Then("some postcondition"; block_text="""This is block text.\n\nThis is another line.""")
end
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 6515 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Test
using Behavior.Gherkin: hastag, parsefeature, issuccessful, istagsline
@testset "Tags " begin
@testset "Feature tags" begin
@testset "@tag1 is applied to a feature; The parsed feature has @tag1" begin
text = """
@tag1
Feature: Some description
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test hastag(feature, "@tag1")
end
@testset "Feature without tags; The parsed feature does not have @tag1" begin
text = """
Feature: Some description
"""
result = parsefeature(text)
@test issuccessful(result)
@test hastag(result.value, "@tag1") == false
end
@testset "Feature with multiple tags; The parsed feature has all tags" begin
text = """
@tag1 @tag2 @tag3
Feature: Some description
"""
result = parsefeature(text)
@test issuccessful(result)
@test hastag(result.value, "@tag1")
@test hastag(result.value, "@tag2")
@test hastag(result.value, "@tag3")
end
end
@testset "Scenario tags" begin
@testset "Scenario has one tag; The parsed scenario has tag1" begin
text = """
Feature: Some description
@tag1
Scenario: Some description
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test hastag(feature.scenarios[1], "@tag1")
end
@testset "Scenario has no tags; The parsed scenario does not have tag1" begin
text = """
Feature: Some description
Scenario: Some description
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
@test hastag(result.value.scenarios[1], "@tag1") == false
end
@testset "Feature has tag1, but no the scenario; The parsed scenario does not have tag1" begin
text = """
@tag1
Feature: Some description
Scenario: Some description
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
@test hastag(result.value.scenarios[1], "@tag1") == false
end
@testset "Second Scenario has one tag; The second scenario has tag1" begin
text = """
Feature: Some description
Scenario: The first scenario with no tags
Given a precondition
@tag1
Scenario: Some description
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test hastag(feature.scenarios[2], "@tag1")
end
@testset "Scenario tags followed by comment; Comment is ignored" begin
text = """
Feature: Some description
Scenario: The first scenario with no tags
Given a precondition
@tag4
# Comment
Scenario: Some description with tag after comment
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test hastag(feature.scenarios[2], "@tag4")
end
end
@testset "Robustness" begin
@testset "Tag @tag1-2 contains a hyphen; Tag is read as @tag1-2" begin
text = """
@tag1-2
Feature: Some description
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test hastag(feature, "@tag1-2")
end
@testset "Feature has a list of tags in its free text header, and no scenarios; The tags are in the free text header" begin
text = """
Feature: Some description
@tag1
@tag2
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test "@tag1" in feature.header.long_description
@test "@tag2" in feature.header.long_description
end
@testset "Issue #58: Scenario with an @ in the description but no tags; Scenario has no tags" begin
text = """
Feature: Some description
Scenario: Some description with @tag that looks like a tag but is not
Given a precondition
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test isempty(feature.scenarios[1].tags)
end
@testset "Issue #58: Last step has a @ but is not a tags line; Next scenario follows without a blank line and is properly parsed" begin
text = """
Feature: Some description
Scenario: Some description
When some action
Then some @tag
Scenario: Some other scenario
Given some precondtion
"""
result = parsefeature(text)
@test issuccessful(result)
feature = result.value
@test isempty(feature.scenarios[1].tags)
@test isempty(feature.scenarios[2].tags)
end
end
@testset "Is tags" begin
@testset "One tag; Yes" begin
@test istagsline("@tag")
end
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 22630 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior.Gherkin.Experimental: BadExpectationParseResult
@testset "Combinators " begin
@testset "Line" begin
@testset "Match Foo; Foo; OK" begin
# Arrange
input = ParserInput("Foo")
# Act
p = Line("Foo")
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo"
end
@testset "Match Foo; Bar; Not OK" begin
# Arrange
input = ParserInput("Foo")
# Act
p = Line("Bar")
result = p(input)
# Assert
@test result isa BadParseResult{String}
@test result.expected == "Bar"
@test result.actual == "Foo"
end
@testset "Match Foo, then Bar; Foo Bar; OK" begin
# Arrange
input = ParserInput("""
Foo
Bar
""")
# Act
foo = Line("Foo")
bar = Line("Bar")
result1 = foo(input)
result2 = bar(result1.newinput)
# Assert
@test result1 isa OKParseResult{String}
@test result1.value == "Foo"
@test result2 isa OKParseResult{String}
@test result2.value == "Bar"
end
@testset "Match Foo, then Bar; Foo Baz; Not OK" begin
# Arrange
input = ParserInput("""
Foo
Baz
""")
# Act
foo = Line("Foo")
bar = Line("Bar")
result1 = foo(input)
result2 = bar(result1.newinput)
# Assert
@test result1 isa OKParseResult{String}
@test result1.value == "Foo"
@test result2 isa BadParseResult{String}
@test result2.expected == "Bar"
@test result2.actual == "Baz"
end
@testset "Match Foo; Bar; Not OK, state is unchanged" begin
# Arrange
input = ParserInput("Foo")
# Act
p = Line("Bar")
result = p(input)
# Assert
@test result isa BadParseResult{String}
@test result.newinput == input
end
@testset "Match Foo; No more input; Unexpected EOF" begin
# Arrange
input = ParserInput("")
# Act
p = Line("Foo")
result = p(input)
# Assert
@test result isa BadUnexpectedEOFParseResult{String}
@test result.newinput == input
end
end
@testset "Optionally" begin
@testset "Optionally Foo; Foo; OK" begin
# Arrange
input = ParserInput("Foo")
# Act
parser = Optionally{String}(Line("Foo"))
result = parser(input)
# Assert
@test result isa OKParseResult{Union{Nothing, String}}
@test result.value == "Foo"
end
@testset "Optionally Foo; Bar; OK with nothing" begin
# Arrange
input = ParserInput("Bar")
# Act
parser = Optionally{String}(Line("Foo"))
result = parser(input)
# Assert
@test result isa OKParseResult{Union{Nothing, String}}
@test result.value === nothing
end
@testset "Optionally Bar; Bar; OK" begin
# Arrange
input = ParserInput("Bar")
# Act
parser = Optionally{String}(Line("Bar"))
result = parser(input)
# Assert
@test result isa OKParseResult{Union{Nothing, String}}
@test result.value == "Bar"
end
@testset "Optionally Foo then Bar; Foo Bar; OK" begin
# Arrange
input = ParserInput("""
Foo
Bar
""")
# Act
parser1 = Optionally{String}(Line("Foo"))
result1 = parser1(input)
parser2 = Line("Bar")
result2 = parser2(result1.newinput)
# Assert
@test result1 isa OKParseResult{Union{Nothing, String}}
@test result1.value == "Foo"
@test result2 isa OKParseResult{String}
@test result2.value == "Bar"
end
@testset "Optionally Foo then Bar; Bar; OK" begin
# Arrange
input = ParserInput("""
Bar
""")
# Act
parser1 = Optionally{String}(Line("Foo"))
result1 = parser1(input)
parser2 = Line("Bar")
result2 = parser2(result1.newinput)
# Assert
@test result1 isa OKParseResult{Union{Nothing, String}}
@test result1.value === nothing
@test result2 isa OKParseResult{String}
@test result2.value == "Bar"
end
end
@testset "Or" begin
@testset "Foo or Bar; Foo; OK" begin
# Arrange
input = ParserInput("Foo")
# Act
p = Line("Foo") | Line("Bar")
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo"
end
@testset "Foo or Bar; Bar; OK" begin
# Arrange
input = ParserInput("Bar")
# Act
p = Line("Foo") | Line("Bar")
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Bar"
end
@testset "Foo or Bar; Baz; Not OK" begin
# Arrange
input = ParserInput("Baz")
# Act
p = Line("Foo") | Line("Bar")
result = p(input)
# Assert
@test result isa BadParseResult{String}
end
end
@testset "Transformer" begin
@testset "Transform to Int; 1; OK" begin
# Arrange
input = ParserInput("1")
# Act
digit = Line("1")
p = Transformer{String, Int}(digit, x -> parse(Int, x))
result = p(input)
# Assert
@test result isa OKParseResult{Int}
@test result.value == 1
end
@testset "Transform to Int; 2; OK" begin
# Arrange
input = ParserInput("2")
# Act
digit = Line("1") | Line("2")
p = Transformer{String, Int}(digit, x -> parse(Int, x))
result = p(input)
# Assert
@test result isa OKParseResult{Int}
@test result.value == 2
end
end
@testset "Sequence" begin
@testset "Sequence Line Baz; Baz; OK" begin
# Arrange
input = ParserInput("Baz")
# Act
p = Sequence{String}(Line("Baz"))
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["Baz"]
end
@testset "Sequence Line Baz then Quux; Baz Quux; OK" begin
# Arrange
input = ParserInput("""
Baz
Quux
""")
# Act
p = Sequence{String}(Line("Baz"), Line("Quux"))
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["Baz", "Quux"]
end
@testset "Sequence Line Baz then Quux; Baz Bar; Not OK" begin
# Arrange
input = ParserInput("""
Baz
Bar
""")
# Act
p = Sequence{String}(Line("Baz"), Line("Quux"))
result = p(input)
# Assert
@test result isa BadParseResult{Vector{String}}
end
@testset "Sequence Line Baz then Quux; Foo Quux; Not OK" begin
# Arrange
input = ParserInput("""
Foo
Quux
""")
# Act
p = Sequence{String}(Line("Baz"), Line("Quux"))
result = p(input)
# Assert
@test result isa BadParseResult{Vector{String}}
end
@testset "Sequence Ints 1 then 2; 1 then 2; OK" begin
# Arrange
input = ParserInput("""
1
2
""")
# Act
digits = Line("1") | Line("2")
intparser = Transformer{String, Int}(digits, x -> parse(Int, x))
p = Sequence{Int}(intparser, intparser)
result = p(input)
# Assert
@test result isa OKParseResult{Vector{Int}}
@test result.value == [1, 2]
end
end
@testset "Joined" begin
@testset "Join sequence of Baz or Quux; Baz Quux; OK" begin
# Arrange
input = ParserInput("""
Baz
Quux
""")
# Act
s = Line("Baz") | Line("Quux")
p = Joined(Sequence{String}(s, s))
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Baz\nQuux"
end
@testset "Join sequence of Baz or Quux; Quux Baz; OK" begin
# Arrange
input = ParserInput("""
Quux
Baz
""")
# Act
s = Line("Baz") | Line("Quux")
p = Joined(Sequence{String}(s, s))
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Quux\nBaz"
end
@testset "Join sequence of Baz or Quux; Foo Bar; Not OK" begin
# Arrange
input = ParserInput("""
Foo
Bar
""")
# Act
s = Line("Baz") | Line("Quux")
p = Joined(Sequence{String}(s, s))
result = p(input)
# Assert
@test result isa BadParseResult{String}
end
end
@testset "Repeating" begin
@testset "Repeating Baz or Quux; Bar; OK and empty" begin
# Arrange
input = ParserInput("""
Bar
""")
# Act
s = Line("Baz") | Line("Quux")
p = Repeating{String}(s)
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == []
end
@testset "Repeating Baz or Quux; Baz Quux Bar; OK" begin
# Arrange
input = ParserInput("""
Baz
Quux
Bar
""")
# Act
s = Line("Baz") | Line("Quux")
p = Repeating{String}(s)
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["Baz", "Quux"]
end
@testset "Repeating Baz or Quux followed by Bar; Baz Quux Bar; OK" begin
# Arrange
input = ParserInput("""
Baz
Quux
Bar
""")
# Act
s = Line("Baz") | Line("Quux")
p = Sequence{String}(Joined(Repeating{String}(s)), Line("Bar"))
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["Baz\nQuux", "Bar"]
end
@testset "Repeating digits; 3 2 1; OK" begin
# Arrange
input = ParserInput("""
3
2
1
Not a digit
""")
# Act
digits = Line("1") | Line("2") | Line("3")
intparser = Transformer{String, Int}(digits, x -> parse(Int, x))
p = Repeating{Int}(intparser)
result = p(input)
# Assert
@test result isa OKParseResult{Vector{Int}}
@test result.value == [3, 2, 1]
end
@testset "Repeating Baz or Quux, at least 1; Bar; Not OK" begin
# Arrange
input = ParserInput("""
Bar
""")
# Act
s = Line("Baz") | Line("Quux")
p = Repeating{String}(s, atleast=1)
result = p(input)
# Assert
@test result isa BadParseResult{Vector{String}}
end
@testset "Repeating Baz or Quux, at least 1; Baz Bar; OK" begin
# Arrange
input = ParserInput("""
Baz
Bar
""")
# Act
s = Line("Baz") | Line("Quux")
p = Repeating{String}(s, atleast=1)
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["Baz"]
end
end
@testset "LineIfNot" begin
@testset "LineIfNot Baz; Baz; Not OK" begin
# Arrange
input = ParserInput("""
Baz
""")
# Act
p = LineIfNot(Line("Baz"))
result = p(input)
# Assert
@test result isa BadParseResult{String}
@test result.unexpected == "Baz"
end
@testset "LineIfNot Baz; Foo Baz; OK" begin
# Arrange
input = ParserInput("""
Foo
""")
# Act
p = LineIfNot(Line("Baz"))
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo"
end
@testset "LineIfNot Baz then Baz; Bar Baz; OK" begin
# Arrange
input = ParserInput("""
Bar
Baz
""")
# Act
p = Sequence{String}(LineIfNot(Line("Baz")), Line("Baz"))
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["Bar", "Baz"]
end
@testset "LineIfNot Baz; EOF; Not OK" begin
# Arrange
input = ParserInput("")
# Act
p = LineIfNot(Line("Baz"))
result = p(input)
# Assert
@test result isa BadUnexpectedEOFParseResult{String}
@test result.newinput == input
end
end
@testset "StartsWith" begin
@testset "Foo; Foo; OK" begin
# Arrange
input = ParserInput(
"""
Foo
"""
)
# Act
parser = StartsWith("Foo")
result = parser(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo"
end
@testset "Foo; Bar; Not OK" begin
# Arrange
input = ParserInput(
"""
Bar
"""
)
# Act
parser = StartsWith("Foo")
result = parser(input)
# Assert
@test result isa BadParseResult{String}
end
@testset "Foo; Foo Bar; OK" begin
# Arrange
input = ParserInput(
"""
Foo Bar
"""
)
# Act
parser = StartsWith("Foo")
result = parser(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo Bar"
end
@testset "Foo the Quux; Foo Bar, Quux; OK" begin
# Arrange
input = ParserInput(
"""
Foo Bar
Quux
"""
)
# Act
parser = Sequence{String}(StartsWith("Foo"), Line("Quux"))
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["Foo Bar", "Quux"]
end
@testset "Foo; EOF; Not OK" begin
# Arrange
input = ParserInput("")
# Act
parser = StartsWith("Foo")
result = parser(input)
# Assert
@test result isa BadUnexpectedEOFParseResult{String}
@test result.newinput == input
end
end
@testset "Whitespace and comments" begin
@testset "Match Foo; Blank line, then Foo; OK" begin
# Arrange
input = ParserInput("""
Foo
""")
# Act
p = Line("Foo")
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo"
end
@testset "Match Foo then Bar; Blank line, then Foo, Bar; OK" begin
# Arrange
input = ParserInput("""
Foo
Bar
""")
# Act
p = Sequence{String}(Line("Foo"), Line("Bar"))
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value[1] == "Foo"
@test result.value[2] == "Bar"
end
@testset "Match Foo then Bar; Blank line between, then Foo, Bar; OK" begin
# Arrange
input = ParserInput("""
Foo
Bar
""")
# Act
p = Sequence{String}(Line("Foo"), Line("Bar"))
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value[1] == "Foo"
@test result.value[2] == "Bar"
end
@testset "Match Foo then Bar; 3 blank line before and between, then Foo, Bar; OK" begin
# Arrange
input = ParserInput("""
Foo
Bar
""")
# Act
p = Sequence{String}(Line("Foo"), Line("Bar"))
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value[1] == "Foo"
@test result.value[2] == "Bar"
end
@testset "Match Foo; Comment, then Foo; OK" begin
# Arrange
input = ParserInput("""
# Some comment
Foo
""")
# Act
p = Line("Foo")
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo"
end
@testset "Match Foo then Bar; Comment between, then Foo, Bar; OK" begin
# Arrange
input = ParserInput("""
Foo
# Skip this comment
Bar
""")
# Act
p = Sequence{String}(Line("Foo"), Line("Bar"))
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value[1] == "Foo"
@test result.value[2] == "Bar"
end
end
@testset "EOF" begin
@testset "No non-blank lines left; OK" begin
# Arrange
input = ParserInput("")
# Act
parser = EOFParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Nothing}
end
@testset "Line Foo; Not OK" begin
# Arrange
input = ParserInput("Foo")
# Act
parser = EOFParser()
result = parser(input)
# Assert
@test result isa BadExpectedEOFParseResult{Nothing}
end
@testset "Foo, then EOF; OK" begin
# Arrange
input = ParserInput("Foo")
# Act
parser = Sequence{Union{Nothing, String}}(Line("Foo"), EOFParser())
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{Union{Nothing, String}}}
@test result.value[1] == "Foo"
@test result.value[2] === nothing
end
@testset "Foo, then Bar; Not OK" begin
# Arrange
input = ParserInput("""
Foo
Bar
""")
# Act
parser = Sequence{Union{Nothing, String}}(Line("Foo"), EOFParser())
result = parser(input)
# Assert
@test result isa BadParseResult{Vector{Union{Nothing, String}}}
end
@testset "Feature, then EOF; OK" begin
# Arrange
input = ParserInput("""
Feature: Some feature
Scenario: Some scenario
Scenario: Other scenario
""")
# Act
parser = Sequence{Union{Nothing, Feature}}(FeatureParser(), EOFParser())
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{Union{Nothing, Feature}}}
@test result.value[1] isa Feature
@test result.value[2] === nothing
end
@testset "Feature, then unallowed new feature; Not OK" begin
# Arrange
input = ParserInput("""
Feature: Some feature
Scenario: Some scenario
Scenario: Other scenario
Feature: Not allowed here
""")
# Act
parser = Sequence{Union{Nothing, Feature}}(FeatureParser(), EOFParser())
result = parser(input)
# Assert
@test result isa BadParseResult{Vector{Union{Nothing, Feature}}}
end
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 41660 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@testset "Gherkin combinators " begin
@testset "Block text" begin
@testset "Empty; OK" begin
# Arrange
input = ParserInput("""
\"\"\"
\"\"\"
""")
# Act
p = BlockText()
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == ""
end
@testset "Empty, then Quux; OK" begin
# Arrange
input = ParserInput("""
\"\"\"
\"\"\"
Quux
""")
# Act
p = Sequence{String}(BlockText(), Line("Quux"))
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["", "Quux"]
end
@testset "Foo; OK" begin
# Arrange
input = ParserInput("""
\"\"\"
Foo
\"\"\"
""")
# Act
p = BlockText()
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo"
end
@testset "Foo Bar Baz; OK" begin
# Arrange
input = ParserInput("""
\"\"\"
Foo
Bar
Baz
\"\"\"
""")
# Act
p = BlockText()
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo\nBar\nBaz"
end
@testset "Foo Bar Baz, then Quux; OK" begin
# Arrange
input = ParserInput("""
\"\"\"
Foo
Bar
Baz
\"\"\"
Quux
""")
# Act
p = Sequence{String}(BlockText(), Line("Quux"))
result = p(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["Foo\nBar\nBaz", "Quux"]
end
@testset "Foo, empty line, then Baz; Empty line is included" begin
# Arrange
input = ParserInput("""
\"\"\"
Foo
Baz
\"\"\"
""")
# Act
p = BlockText()
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo\n\nBaz"
end
@testset "Foo, comment, then Baz; Comment line is included" begin
# Arrange
input = ParserInput("""
\"\"\"
Foo
# Comment line
Baz
\"\"\"
""")
# Act
p = BlockText()
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo\n# Comment line\nBaz"
end
@testset "Foo, comment and empty, then Baz; Comment line and empty are included" begin
# Arrange
input = ParserInput("""
\"\"\"
Foo
# Comment line
Baz
\"\"\"
""")
# Act
p = BlockText()
result = p(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "Foo\n# Comment line\n\nBaz"
end
end
@testset "KeywordParser" begin
@testset "Scenario:, Scenario:; OK" begin
# Arrange
input = ParserInput("""
Scenario:
""")
# Act
parser = KeywordParser("Scenario:")
result = parser(input)
# Assert
@test result isa OKParseResult{Keyword}
@test result.value.keyword == "Scenario:"
@test result.value.rest == ""
end
@testset "Given, Given; OK" begin
# Arrange
input = ParserInput("""
Given
""")
# Act
parser = KeywordParser("Given")
result = parser(input)
# Assert
@test result isa OKParseResult{Keyword}
@test result.value.keyword == "Given"
@test result.value.rest == ""
end
@testset "Scenario:, Scenario Outline:; Not OK" begin
# Arrange
input = ParserInput("""
Scenario Outline:
""")
# Act
parser = KeywordParser("Scenario:")
result = parser(input)
# Assert
@test result isa BadParseResult{Keyword}
end
@testset "Scenario: then Scenario Outline:, Scenario:, Scenario Outline:; OK" begin
# Arrange
input = ParserInput("""
Scenario:
Scenario Outline:
""")
# Act
parser = Sequence{Keyword}(KeywordParser("Scenario:"), KeywordParser("Scenario Outline:"))
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{Keyword}}
@test result.value[1].keyword == "Scenario:"
@test result.value[1].rest == ""
@test result.value[2].keyword == "Scenario Outline:"
@test result.value[2].rest == ""
end
@testset "Scenario:, Scenario: Some description; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some description
""")
# Act
parser = KeywordParser("Scenario:")
result = parser(input)
# Assert
@test result isa OKParseResult{Keyword}
@test result.value.keyword == "Scenario:"
@test result.value.rest == "Some description"
end
@testset "Scenario:, Scenario:Some description, without a space; OK" begin
# Arrange
input = ParserInput("""
Scenario:Some description
""")
# Act
parser = KeywordParser("Scenario:")
result = parser(input)
# Assert
@test result isa OKParseResult{Keyword}
@test result.value.keyword == "Scenario:"
@test result.value.rest == "Some description"
end
@testset "Given; Given on the description as well; Only first given is removed" begin
# Arrange
input = ParserInput("""
Given Given
""")
# Act
parser = KeywordParser("Given")
result = parser(input)
# Assert
@test result isa OKParseResult{Keyword}
@test result.value.keyword == "Given"
@test result.value.rest == "Given"
end
end
@testset "GivenParser" begin
@testset "Given some precondition; OK" begin
# Arrange
input = ParserInput("""
Given some precondition
""")
# Act
parser = GivenParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Given}
@test result.value == Given("some precondition")
end
@testset "Given some other precondition; OK" begin
# Arrange
input = ParserInput("""
Given some other precondition
""")
# Act
parser = GivenParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Given}
@test result.value == Given("some other precondition")
end
@testset "When some action; Not OK" begin
# Arrange
input = ParserInput("""
When some action
""")
# Act
parser = GivenParser()
result = parser(input)
# Assert
@test result isa BadParseResult{Given}
end
@testset "Followed by block text; OK and the text is present" begin
# Arrange
input = ParserInput("""
Given some precondition
\"\"\"
Some block text.
On two lines.
\"\"\"
""")
# Act
parser = GivenParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Given}
@test result.value isa Given
@test result.value.text == "some precondition"
@test result.value.block_text == "Some block text.\nOn two lines."
end
@testset "Givennospace; Not OK" begin
# Arrange
input = ParserInput("""
Givennospace some precondition
""")
# Act
parser = GivenParser()
result = parser(input)
# Assert
@test result isa BadParseResult{Given}
end
end
@testset "WhenParser" begin
@testset "When some action; OK" begin
# Arrange
input = ParserInput("""
When some action
""")
# Act
parser = WhenParser()
result = parser(input)
# Assert
@test result isa OKParseResult{When}
@test result.value == When("some action")
end
@testset "When some other action; OK" begin
# Arrange
input = ParserInput("""
When some other action
""")
# Act
parser = WhenParser()
result = parser(input)
# Assert
@test result isa OKParseResult{When}
@test result.value == When("some other action")
end
@testset "Given some precondition; Not OK" begin
# Arrange
input = ParserInput("""
Given some precondition
""")
# Act
parser = WhenParser()
result = parser(input)
# Assert
@test result isa BadParseResult{When}
end
@testset "Followed by block text; OK and the text is present" begin
# Arrange
input = ParserInput("""
When some action
\"\"\"
Some block text.
On two lines.
\"\"\"
""")
# Act
parser = WhenParser()
result = parser(input)
# Assert
@test result isa OKParseResult{When}
@test result.value isa When
@test result.value.text == "some action"
@test result.value.block_text == "Some block text.\nOn two lines."
end
@testset "Whennospace; Not OK" begin
# Arrange
input = ParserInput("""
Whennospace some action
""")
# Act
parser = WhenParser()
result = parser(input)
# Assert
@test result isa BadParseResult{When}
end
end
@testset "ThenParser" begin
# Tests for Given and When demonstrate correct behavior
# and the design of the parser is such that this step will
# have the same behavior, so I'm merely demonstrating the existence
# of a ThenParser, not fully testing it.
@testset "Then some postcondition; OK" begin
# Arrange
input = ParserInput("""
Then some postcondition
""")
# Act
parser = ThenParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Then}
@test result.value == Then("some postcondition")
end
@testset "Thennospace; Not OK" begin
# Arrange
input = ParserInput("""
Thennospace some postcondition
""")
# Act
parser = ThenParser()
result = parser(input)
# Assert
@test result isa BadParseResult{Then}
end
end
@testset "Steps parser" begin
@testset "Two givens; OK" begin
# Arrange
input = ParserInput("""
Given some precondition
Given some other precondition
""")
# Act
parser = StepsParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{ScenarioStep}}
@test result.value[1] == Given("some precondition")
@test result.value[2] == Given("some other precondition")
end
@testset "Two givens separated by a block text; OK" begin
# Arrange
input = ParserInput("""
Given some precondition
\"\"\"
Block text line 1.
Block text line 2.
\"\"\"
Given some other precondition
""")
# Act
parser = StepsParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{ScenarioStep}}
@test result.value[1] isa Given
@test result.value[1].text == "some precondition"
@test result.value[1].block_text == "Block text line 1.\nBlock text line 2."
@test result.value[2] == Given("some other precondition")
end
@testset "Two givens follow by a block text; OK" begin
# Arrange
input = ParserInput("""
Given some precondition
Given some other precondition
\"\"\"
Block text line 1.
Block text line 2.
\"\"\"
""")
# Act
parser = StepsParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{ScenarioStep}}
@test result.value[1] == Given("some precondition")
@test result.value[2] isa Given
@test result.value[2].text == "some other precondition"
@test result.value[2].block_text == "Block text line 1.\nBlock text line 2."
end
@testset "No givens; OK" begin
# Arrange
input = ParserInput("""
""")
# Act
parser = StepsParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{ScenarioStep}}
@test result.value == []
end
@testset "Given then When; OK" begin
# Arrange
input = ParserInput("""
Given some precondition
When some action
""")
# Act
parser = StepsParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{ScenarioStep}}
@test result.value[1] == Given("some precondition")
@test result.value[2] == When("some action")
end
@testset "When, Then; OK" begin
# Arrange
input = ParserInput("""
When some action
Then some postcondition
""")
# Act
parser = StepsParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{ScenarioStep}}
@test result.value[1] == When("some action")
@test result.value[2] == Then("some postcondition")
end
@testset "Given, When, Then; OK" begin
# Arrange
input = ParserInput("""
Given some precondition
When some action
Then some postcondition
""")
# Act
parser = StepsParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{ScenarioStep}}
@test result.value[1] == Given("some precondition")
@test result.value[2] == When("some action")
@test result.value[3] == Then("some postcondition")
end
end
@testset "BackgroundParser" begin
@testset "Empty Background, no description; OK" begin
# Arrange
input = ParserInput("""
Background:
""")
# Act
parser = BackgroundParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Background}
@test result.value.description == ""
@test result.value.steps == []
end
@testset "Empty Background, Some description; OK" begin
# Arrange
input = ParserInput("""
Background: Some description
""")
# Act
parser = BackgroundParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Background}
@test result.value.description == "Some description"
@test result.value.steps == []
end
@testset "Scenario:; Not OK" begin
# Arrange
input = ParserInput("""
Scenario:
""")
# Act
parser = BackgroundParser()
result = parser(input)
# Assert
@test result isa BadParseResult{Background}
end
@testset "Given/When/Then; OK" begin
# Arrange
input = ParserInput("""
Background: Some new description
Given some precondition
When some action
Then some precondition
""")
# Act
parser = BackgroundParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Background}
@test result.value.description == "Some new description"
@test result.value.steps == [Given("some precondition"), When("some action"), Then("some precondition")]
end
end
@testset "FeatureParser" begin
@testset "Empty feature; OK" begin
# Arrange
input = ParserInput("""
Feature: Some feature
""")
# Act
parser = FeatureParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Feature}
@test result.value.header.description == "Some feature"
end
@testset "Feature with two scenarios any steps; OK" begin
# Arrange
input = ParserInput("""
Feature: Some feature
Scenario: Some scenario
Scenario: Some other scenario
""")
# Act
parser = FeatureParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Feature}
@test result.value.header.description == "Some feature"
@test result.value.scenarios[1].description == "Some scenario"
@test result.value.scenarios[2].description == "Some other scenario"
end
@testset "Feature with a scenario; Default background" begin
# Arrange
input = ParserInput("""
Feature: Some feature
Scenario: Some scenario
""")
# Act
parser = FeatureParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Feature}
@test result.value.header.description == "Some feature"
@test result.value.background.description == ""
@test result.value.background.steps == []
end
@testset "Feature and Rule with one scenario; OK" begin
# Arrange
input = ParserInput("""
Feature: A feature description
Rule: Some rule description
Scenario: Some scenario
""")
# Act
parser = FeatureParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Feature}
@test result.value.header.description == "A feature description"
@test result.value.scenarios[1].description == "Some rule description"
rule = result.value.scenarios[1]
@test rule.scenarios[1].description == "Some scenario"
end
@testset "Feature with a background; OK" begin
# Arrange
input = ParserInput("""
Feature: Some feature
Background: Some background
Given some precondition
""")
# Act
parser = FeatureParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Feature}
@test result.value.header.description == "Some feature"
@test result.value.background.description == "Some background"
@test result.value.background.steps == [Given("some precondition")]
@test result.value.scenarios == []
end
@testset "Feature with a background, then a Scenario; OK" begin
# Arrange
input = ParserInput("""
Feature: Some feature
Background: Some background
Given some precondition
Scenario: Some scenario
When some action
""")
# Act
parser = FeatureParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Feature}
@test result.value.header.description == "Some feature"
@test result.value.background.description == "Some background"
@test result.value.background.steps == [Given("some precondition")]
@test result.value.scenarios[1].description == "Some scenario"
@test result.value.scenarios[1].steps == [When("some action")]
end
@testset "Feature with a scenario outline; Scenario outline is the only step" begin
# Arrange
input = ParserInput("""
Feature: Some feature
Scenario Outline: Some scenario
Given some precondition <Foo>
Examples:
| Foo |
| bar |
""")
# Act
parser = FeatureParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Feature}
@test result.value.header.description == "Some feature"
outline = result.value.scenarios[1]
@test outline.description == "Some scenario"
@test outline.placeholders == ["Foo"]
@test outline.examples == [["bar"]]
end
end
@testset "FeatureFileParser" begin
@testset "Feature with two scenarios any steps; OK" begin
# Arrange
input = ParserInput("""
Feature: Some feature
Scenario: Some scenario
Scenario: Some other scenario
""")
# Act
parser = FeatureFileParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Feature}
@test result.value.header.description == "Some feature"
@test result.value.scenarios[1].description == "Some scenario"
@test result.value.scenarios[2].description == "Some other scenario"
end
@testset "Feature, then unallowed new Feature; Not OK" begin
# Arrange
input = ParserInput("""
Feature: Some feature
Scenario: Some scenario
Scenario: Some other scenario
Feature: Not allowed here
""")
# Act
parser = FeatureFileParser()
result = parser(input)
# Assert
@test result isa BadParseResult{Feature}
end
end
@testset "DataTable" begin
@testset "One row, one column; OK" begin
# Arrange
input = ParserInput("""
| Foo |
""")
# Act
parser = DataTableParser()
result = parser(input)
# Assert
@test result isa OKParseResult{DataTable}
@test result.value == [["Foo"]]
end
@testset "One row, one column; OK" begin
# Arrange
input = ParserInput("""
| Bar |
""")
# Act
parser = DataTableParser()
result = parser(input)
# Assert
@test result isa OKParseResult{DataTable}
@test result.value == [["Bar"]]
end
@testset "Table row then line Baz; OK" begin
# Arrange
input = ParserInput("""
| Foo |
Baz
""")
# Act
parser = Sequence{Union{DataTable, String}}(DataTableParser(), Line("Baz"))
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{Union{DataTable, String}}}
@test result.value[1] == [["Foo"]]
@test result.value[2] == "Baz"
end
@testset "No pipes on the line; Not OK" begin
# Arrange
input = ParserInput("""
Baz
""")
# Act
parser = DataTableParser()
result = parser(input)
# Assert
@test result isa BadParseResult{DataTable}
end
@testset "Two columns; OK" begin
# Arrange
input = ParserInput("""
| Foo | Bar |
""")
# Act
parser = DataTableParser()
result = parser(input)
# Assert
@test result isa OKParseResult{DataTable}
@test result.value == [["Foo", "Bar"]]
end
@testset "Three columns; OK" begin
# Arrange
input = ParserInput("""
| Foo | Bar | Baz |
""")
# Act
parser = DataTableParser()
result = parser(input)
# Assert
@test result isa OKParseResult{DataTable}
@test result.value == [["Foo", "Bar", "Baz"]]
end
@testset "Two rows; OK" begin
# Arrange
input = ParserInput("""
| Foo |
| Bar |
""")
# Act
parser = DataTableParser()
result = parser(input)
# Assert
@test result isa OKParseResult{DataTable}
@test result.value == [["Foo"], ["Bar"]]
end
@testset "EOF; Not OK" begin
# Arrange
input = ParserInput("""
""")
# Act
parser = DataTableParser()
result = parser(input)
# Assert
@test result isa BadParseResult{DataTable}
end
@testset "Many columns and rows; OK" begin
# Arrange
input = ParserInput("""
| Foo | Bar |
| Baz | Quux |
""")
# Act
parser = DataTableParser()
result = parser(input)
# Assert
@test result isa OKParseResult{DataTable}
@test result.value == [["Foo", "Bar"], ["Baz", "Quux"]]
end
end
@testset "TagsParser" begin
@testset "AnyLine; @tag; OK" begin
# Arrange
input = ParserInput("@tag")
# Act
parser = Experimental.AnyLine()
result = parser(input)
# Assert
@test result isa OKParseResult{String}
@test result.value == "@tag"
end
@testset "Splitter; @tag1 and @tag2; OK" begin
# Arrange
input = ParserInput("@tag1 @tag2")
# Act
parser = Experimental.Splitter(Experimental.AnyLine(), isspace)
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["@tag1", "@tag2"]
end
@testset "Validator; All strings begin with an @; OK" begin
# Arrange
input = ParserInput("@tag1 @tag2")
# Act
inner = Experimental.Splitter(Experimental.AnyLine(), isspace)
parser = Experimental.Validator{String}(inner, x -> startswith(x, "@"))
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["@tag1", "@tag2"]
end
@testset "Validator; Not all strings begin with an @; Not OK" begin
# Arrange
input = ParserInput("@tag1 tag2")
# Act
inner = Experimental.Splitter(Experimental.AnyLine(), isspace)
parser = Experimental.Validator{String}(inner, x -> startswith(x, "@"))
result = parser(input)
# Assert
@test result isa BadParseResult{Vector{String}}
end
@testset "@tag; OK" begin
# Arrange
input = ParserInput("""
@tag
""")
# Act
parser = TagParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["@tag"]
end
@testset "tag; Not OK" begin
# Arrange
input = ParserInput("""
tag
""")
# Act
parser = TagParser()
result = parser(input)
# Assert
@test result isa BadParseResult{Vector{String}}
end
@testset "EOF; Not OK" begin
# Arrange
input = ParserInput("")
# Act
parser = TagParser()
result = parser(input)
# Assert
@test result isa BadParseResult{Vector{String}}
end
@testset "@tag followed by Scenario:; OK" begin
# Arrange
input = ParserInput("""
@tag1
Scenario: Some scenario
""")
# Act
parser = Sequence{Union{Vector{String}, Keyword}}(
TagParser(),
KeywordParser("Scenario:")
)
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{Union{Vector{String}, Keyword}}}
@test result.value[1] == ["@tag1"]
@test result.value[2] == Keyword("Scenario:", "Some scenario")
end
@testset "@tag1 @tag2; OK" begin
# Arrange
input = ParserInput("""
@tag1 @tag2
""")
# Act
parser = TagParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["@tag1", "@tag2"]
end
@testset "@tag1 @tag2 with multiple spaces; OK" begin
# Arrange
input = ParserInput("""
@tag1 @tag2
""")
# Act
parser = TagParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["@tag1", "@tag2"]
end
@testset "@tag1 then @tag2; OK" begin
# Arrange
input = ParserInput("""
@tag1
@tag2
""")
# Act
parser = TagLinesParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["@tag1", "@tag2"]
end
@testset "@tag1 @tag2, then @tag3; OK" begin
# Arrange
input = ParserInput("""
@tag1 @tag2
@tag3
""")
# Act
parser = TagLinesParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{String}}
@test result.value == ["@tag1", "@tag2", "@tag3"]
end
end
@testset "Tags" begin
@testset "Empty Scenario, with tags; OK" begin
# Arrange
input = ParserInput("""
@tag1
@tag2
Scenario: Some description
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some description"
@test result.value.tags == ["@tag1", "@tag2"]
end
@testset "Empty feature with tags; OK" begin
# Arrange
input = ParserInput("""
@sometag
@othertag
Feature: Some feature
""")
# Act
parser = FeatureParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Feature}
@test result.value.header.tags == ["@sometag", "@othertag"]
end
@testset "Empty Rule with tags; OK" begin
# Arrange
input = ParserInput("""
@sometag
@othertag
Rule: Some rule
""")
# Act
parser = RuleParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Rule}
@test result.value.tags == ["@sometag", "@othertag"]
end
end
@testset "Long descriptions parser" begin
@testset "Description Foo; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Foo
Given some precondition
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
@test result.value.long_description == "Foo"
end
@testset "Description Bar; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Bar
Given some precondition
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
@test result.value.long_description == "Bar"
end
@testset "Description Bar then When; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Bar
When some action
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
@test result.value.long_description == "Bar"
end
@testset "Description Bar then Then; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Bar
Then some postcondition
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
@test result.value.long_description == "Bar"
end
end
@testset "Long descriptions" begin
@testset "Description on a feature, no Background; OK" begin
# Arrange
input = ParserInput("""
Feature: Some feature
This is a description.
On two lines.
Scenario: Some scenario
When some action
""")
# Act
parser = FeatureParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Feature}
@test result.value.header.description == "Some feature"
@test result.value.header.long_description == ["This is a description.\nOn two lines."]
@test result.value.scenarios[1].description == "Some scenario"
@test result.value.scenarios[1].steps == [When("some action")]
end
@testset "Description on a feature, with Background; OK" begin
# Arrange
input = ParserInput("""
Feature: Some feature
This is a description.
On two lines.
Background: Some background
Given some precondition
Scenario: Some scenario
When some action
""")
# Act
parser = FeatureParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Feature}
@test result.value.header.description == "Some feature"
@test result.value.header.long_description == ["This is a description.\nOn two lines."]
@test result.value.background.description == "Some background"
@test result.value.background.steps == [Given("some precondition")]
@test result.value.scenarios[1].description == "Some scenario"
@test result.value.scenarios[1].steps == [When("some action")]
end
@testset "Scenario with a description; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Foo
Bar
Baz
Given some precondition
Given some other precondition
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
@test result.value.long_description == "Foo\nBar\nBaz"
end
@testset "Background with a description; OK" begin
# Arrange
input = ParserInput("""
Background: Some background description
This is a description.
On two lines.
Given some precondition
Given some other precondition
""")
# Act
parser = BackgroundParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Background}
@test result.value.description == "Some background description"
@test result.value.long_description == "This is a description.\nOn two lines."
end
@testset "Description on a Rule; OK" begin
# Arrange
input = ParserInput("""
Rule: Some rule
This is a description.
On two lines.
Scenario: Some scenario
When some action
""")
# Act
parser = RuleParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Rule}
@test result.value.description == "Some rule"
@test result.value.longdescription == "This is a description.\nOn two lines."
@test result.value.scenarios[1].description == "Some scenario"
@test result.value.scenarios[1].steps == [When("some action")]
end
# TODO Scenario Outline
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 955 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Behavior.Gherkin.Experimental
using Behavior.Gherkin.Experimental: BadExpectedEOFParseResult, BadUnexpectedEOFParseResult, And
using Behavior.Gherkin: Given, When, Then, Scenario, Feature, ScenarioStep, Background
using Behavior.Gherkin: DataTable, ScenarioOutline
using Test
include("combinators_test.jl")
include("gherkin_combinators_test.jl")
include("scenarios_test.jl") | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 19596 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@testset "Gherkin Scenarios " begin
@testset "Scenario parser" begin
@testset "Empty Scenario, no description; OK" begin
# Arrange
input = ParserInput("""
Scenario:
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == ""
@test result.value.steps == []
end
@testset "Empty Scenario, some description; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some description
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some description"
@test result.value.steps == []
end
@testset "Scenario Outline:, Not OK" begin
# Arrange
input = ParserInput("""
Scenario Outline:
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa BadParseResult{Scenario}
end
@testset "Two givens, Some new description; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Given some precondition
Given some other precondition
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
@test result.value.steps == [Given("some precondition"), Given("some other precondition")]
end
@testset "Given/When/Then; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Given some precondition
When some action
Then some precondition
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
@test result.value.steps == [Given("some precondition"), When("some action"), Then("some precondition")]
end
@testset "When/Then; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
When some action
Then some precondition
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
@test result.value.steps == [When("some action"), Then("some precondition")]
end
@testset "Then; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Then some precondition
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
@test result.value.steps == [Then("some precondition")]
end
@testset "Two givens with a block text, Some new description; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Given some precondition
\"\"\"
Block text line.
\"\"\"
Given some other precondition
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
@test result.value.steps == [
Given("some precondition"; block_text="Block text line."),
Given("some other precondition")]
end
@testset "A step has a data table; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Given some precondition
| Foo | Bar |
| Baz | Quux |
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
given = result.value.steps[1]
@test given.text == "some precondition"
@test given.datatable == [
["Foo", "Bar"],
["Baz", "Quux"]
]
end
@testset "A step has a data table and a block text; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Given some precondition
| Foo | Bar |
| Baz | Quux |
\"\"\"
Some block text
\"\"\"
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
given = result.value.steps[1]
@test given.text == "some precondition"
@test given.datatable == [
["Foo", "Bar"],
["Baz", "Quux"]
]
@test given.block_text == "Some block text"
end
@testset "A step has block text and data table; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Given some precondition
\"\"\"
Some block text
\"\"\"
| Foo | Bar |
| Baz | Quux |
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
given = result.value.steps[1]
@test given.text == "some precondition"
@test given.datatable == [
["Foo", "Bar"],
["Baz", "Quux"]
]
@test given.block_text == "Some block text"
end
end
@testset "RuleParser" begin
@testset "Empty Rule, no description; OK" begin
# Arrange
input = ParserInput("""
Rule:
""")
# Act
parser = RuleParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Rule}
@test result.value.description == ""
@test result.value.scenarios == []
end
@testset "Empty Rule, Some rule description; OK" begin
# Arrange
input = ParserInput("""
Rule: Some rule description
""")
# Act
parser = RuleParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Rule}
@test result.value.description == "Some rule description"
@test result.value.scenarios == []
end
@testset "Rule with one scenario; OK" begin
# Arrange
input = ParserInput("""
Rule: Some rule description
Scenario: Some scenario
""")
# Act
parser = RuleParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Rule}
@test result.value.description == "Some rule description"
@test result.value.scenarios[1].description == "Some scenario"
end
@testset "Rule with two scenarios; OK" begin
# Arrange
input = ParserInput("""
Rule: Some rule description
Scenario: Some scenario
Scenario: Some other scenario
""")
# Act
parser = RuleParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Rule}
@test result.value.description == "Some rule description"
@test result.value.scenarios[1].description == "Some scenario"
@test result.value.scenarios[2].description == "Some other scenario"
end
@testset "Rule with two scenarios any steps; OK" begin
# Arrange
input = ParserInput("""
Rule: Some rule description
Scenario: Some scenario
Given some precondition
Given some other precondition
Scenario: Some other scenario
Given some third precondition
""")
# Act
parser = RuleParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Rule}
@test result.value.description == "Some rule description"
@test result.value.scenarios[1].description == "Some scenario"
@test result.value.scenarios[1].steps == [
Given("some precondition"), Given("some other precondition")
]
@test result.value.scenarios[2].description == "Some other scenario"
@test result.value.scenarios[2].steps == [
Given("some third precondition")
]
end
@testset "Rule with two scenarios any steps separated by blank lines; OK" begin
# Arrange
input = ParserInput("""
Rule: Some rule description
Scenario: Some scenario
Given some precondition
Given some other precondition
Scenario: Some other scenario
Given some third precondition
""")
# Act
parser = RuleParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Rule}
@test result.value.description == "Some rule description"
@test result.value.scenarios[1].description == "Some scenario"
@test result.value.scenarios[1].steps == [
Given("some precondition"), Given("some other precondition")
]
@test result.value.scenarios[2].description == "Some other scenario"
@test result.value.scenarios[2].steps == [
Given("some third precondition")
]
end
end
@testset "Scenario Outlines" begin
@testset "Scenario Outline one step, no description; OK" begin
# Arrange
input = ParserInput("""
Scenario Outline:
Given some value <Foo>
Examples:
| Foo |
| bar |
""")
# Act
parser = ScenarioOutlineParser()
result = parser(input)
# Assert
@test result isa OKParseResult{ScenarioOutline}
@test result.value.description == ""
@test result.value.tags == []
@test result.value.steps == [Given("some value <Foo>")]
@test result.value.placeholders == ["Foo"]
@test result.value.examples == [["bar"]]
end
@testset "Scenario Outline, then EOF; OK" begin
# Arrange
input = ParserInput("""
Scenario Outline:
Given some value <Foo>
Examples:
| Foo |
| bar |
""")
# Act
parser = Sequence{Union{ScenarioOutline, Nothing}}(
ScenarioOutlineParser(),
EOFParser())
result = parser(input)
# Assert
@test result isa OKParseResult{Vector{Union{ScenarioOutline, Nothing}}}
@test result.value[1].description == ""
@test result.value[1].steps == [Given("some value <Foo>")]
@test result.value[1].placeholders == ["Foo"]
@test result.value[1].examples == [["bar"]]
@test result.value[2] === nothing
end
@testset "Scenario Outline one step, and a description; OK" begin
# Arrange
input = ParserInput("""
Scenario Outline: Some scenario outline
Given some value <Foo>
Examples:
| Foo |
| bar |
""")
# Act
parser = ScenarioOutlineParser()
result = parser(input)
# Assert
@test result isa OKParseResult{ScenarioOutline}
@test result.value.description == "Some scenario outline"
end
@testset "Scenario Outline with tags; OK" begin
# Arrange
input = ParserInput("""
@tag1 @tag2
Scenario Outline: Some scenario outline
Given some value <Foo>
Examples:
| Foo |
| bar |
""")
# Act
parser = ScenarioOutlineParser()
result = parser(input)
# Assert
@test result isa OKParseResult{ScenarioOutline}
@test result.value.tags == ["@tag1", "@tag2"]
end
@testset "Scenario Outline with Given/When/Then; OK" begin
# Arrange
input = ParserInput("""
@tag1 @tag2
Scenario Outline: Some scenario outline
Given some value <Foo>
When some action
Then some postcondition
Examples:
| Foo |
| bar |
""")
# Act
parser = ScenarioOutlineParser()
result = parser(input)
# Assert
@test result isa OKParseResult{ScenarioOutline}
@test result.value.steps == [
Given("some value <Foo>"),
When("some action"),
Then("some postcondition")
]
end
@testset "Scenario Outline a longer description; OK" begin
# Arrange
input = ParserInput("""
@tag1 @tag2
Scenario Outline: Some scenario outline
This is a long description.
On two lines.
Given some value <Foo>
When some action
Then some postcondition
Examples:
| Foo |
| bar |
""")
# Act
parser = ScenarioOutlineParser()
result = parser(input)
# Assert
@test result isa OKParseResult{ScenarioOutline}
@test result.value.long_description == "This is a long description.\nOn two lines."
end
@testset "Scenario Outline with two placeholders; OK" begin
# Arrange
input = ParserInput("""
@tag1 @tag2
Scenario Outline: Some scenario outline
Given some value <Foo>
When some action
Then some postcondition
Examples:
| Foo | Bar |
| baz | quux |
""")
# Act
parser = ScenarioOutlineParser()
result = parser(input)
# Assert
@test result isa OKParseResult{ScenarioOutline}
@test result.value.placeholders == ["Foo", "Bar"]
end
@testset "Scenario Outline with two columns of examples; OK" begin
# Arrange
input = ParserInput("""
@tag1 @tag2
Scenario Outline: Some scenario outline
Given some value <Foo>
When some action
Then some postcondition
Examples:
| Foo | Bar |
| baz | quux |
""")
# Act
parser = ScenarioOutlineParser()
result = parser(input)
# Assert
@test result isa OKParseResult{ScenarioOutline}
@test result.value.examples[1] == ["baz", "quux"]
end
@testset "Scenario Outline with two rows of examples; OK" begin
# Arrange
input = ParserInput("""
@tag1 @tag2
Scenario Outline: Some scenario outline
Given some value <Foo>
When some action
Then some postcondition
Examples:
| Foo | Bar |
| baz | quux |
| fnord | quuxbaz |
""")
# Act
parser = ScenarioOutlineParser()
result = parser(input)
# Assert
@test result isa OKParseResult{ScenarioOutline}
@test result.value.examples[1] == ["baz", "quux"]
@test result.value.examples[2] == ["fnord", "quuxbaz"]
end
end
@testset "And/But*" begin
@testset "Scenario has an And; OK" begin
# Arrange
input = ParserInput("""
Scenario: Some new description
Given some precondition
And some other precondition
""")
# Act
parser = ScenarioParser()
result = parser(input)
# Assert
@test result isa OKParseResult{Scenario}
@test result.value.description == "Some new description"
@test result.value.steps == [Given("some precondition"), And("some other precondition")]
end
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 18423 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Test
using Behavior.Selection
using Behavior.Selection: TagExpressionInput, SingleTagParser, SequenceParser, Tag, AnyTagExpression, NotTagParser, ParenthesesParser
using Behavior.Selection: TagExpression
@testset "Selection combinators" begin
@testset "Repeating" begin
@testset "While tag; not @b; OK, empty" begin
# Arrange
input = TagExpressionInput("not @b")
parser = Selection.Repeating(SingleTagParser())
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Vector{Tag}}
@test result.value == []
end
@testset "While tag; @a not @b; OK, a" begin
# Arrange
input = TagExpressionInput("@a not @b")
parser = Selection.Repeating(SingleTagParser())
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Vector{Tag}}
@test result.value == [Tag("@a")]
end
@testset "While tag; @a @c not @b; OK, ac" begin
# Arrange
input = TagExpressionInput("@a @c not @b")
parser = Selection.Repeating(SingleTagParser())
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Vector{Tag}}
@test result.value == [Tag("@a"), Tag("@c")]
end
end
@testset "TagSelection parser" begin
@testset "@foo; OK" begin
# Arrange
input = TagExpressionInput("@foo")
parser = SingleTagParser()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.Tag}
@test result.value == Selection.Tag("@foo")
end
@testset "@bar; OK" begin
# Arrange
input = TagExpressionInput("@bar")
parser = SingleTagParser()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.Tag}
@test result.value == Selection.Tag("@bar")
end
@testset "foo; Not OK" begin
# Arrange
input = TagExpressionInput("foo")
parser = SingleTagParser()
# Act
result = parser(input)
# Assert
@test result isa Selection.BadParseResult{Selection.Tag}
end
@testset "Tag followed by a non-tag character; OK" begin
nontagchars = "() "
for nontagchar in nontagchars
@testset "Non-tag character $(nontagchar)" begin
# Arrange
input = TagExpressionInput("@foo$(nontagchar)")
parser = SingleTagParser()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.Tag}
@test result.value == Selection.Tag("@foo")
end
end
end
end
@testset "TakeUntil parser" begin
@testset "TakeUntil b; ab; a" begin
# Arrange
input = TagExpressionInput("ab")
parser = Selection.TakeUntil("b")
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{String}
@test result.value == "a"
end
@testset "TakeUntil b; aab; aa" begin
# Arrange
input = TagExpressionInput("aab")
parser = Selection.TakeUntil("b")
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{String}
@test result.value == "aa"
end
@testset "TakeUntil b; aa; aa" begin
# Arrange
input = TagExpressionInput("aa")
parser = Selection.TakeUntil("b")
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{String}
@test result.value == "aa"
end
@testset "TakeUntil space; cc ; cc" begin
# Arrange
input = TagExpressionInput("cc ")
parser = Selection.TakeUntil(" ")
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{String}
@test result.value == "cc"
end
@testset "TakeUntil space or ); cc ; cc" begin
# Arrange
input = TagExpressionInput("cc ")
parser = Selection.TakeUntil(" )")
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{String}
@test result.value == "cc"
end
@testset "TakeUntil space or ); cc); cc" begin
# Arrange
input = TagExpressionInput("cc)")
parser = Selection.TakeUntil(" )")
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{String}
@test result.value == "cc"
end
@testset "TakeUntil b, then TakeUntil d; abcd; a, bc" begin
# Arrange
input = TagExpressionInput("abcd")
# Act
parser1 = Selection.TakeUntil("b")
result1 = parser1(input)
parser2 = Selection.TakeUntil("d")
result2 = parser2(result1.newinput)
# Assert
@test result1 isa Selection.OKParseResult{String}
@test result1.value == "a"
@test result2 isa Selection.OKParseResult{String}
@test result2.value == "bc"
end
@testset "TakeUntil space; Prefix whitespace then cc ; cc" begin
# Arrange
input = TagExpressionInput(" cc ")
parser = Selection.TakeUntil(" ")
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{String}
@test result.value == "cc"
end
@testset "TakeUntil space twice; a d ; a, then d" begin
# Arrange
input = TagExpressionInput("a d ")
# Act
parser = Selection.TakeUntil(" ")
result1 = parser(input)
result2 = parser(result1.newinput)
# Assert
@test result1 isa Selection.OKParseResult{String}
@test result1.value == "a"
@test result2 isa Selection.OKParseResult{String}
@test result2.value == "d"
end
end
@testset "Sequences" begin
@testset "@foo then @bar; OK" begin
# Arrange
input = TagExpressionInput("@foo @bar")
parser = SequenceParser{Tag}(
SingleTagParser(),
SingleTagParser()
)
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Vector{Tag}}
@test result.value == [Tag("@foo"), Tag("@bar")]
end
@testset "@foo @bar @baz; OK" begin
# Arrange
input = TagExpressionInput("@foo @bar @baz")
parser = SequenceParser{Tag}(
SingleTagParser(),
SingleTagParser(),
SingleTagParser()
)
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Vector{Tag}}
@test result.value == [Tag("@foo"), Tag("@bar"), Tag("@baz")]
end
@testset "@foo @bar, then a standalone @baz; OK" begin
# Arrange
input = TagExpressionInput("@foo @bar @baz")
parser1 = SequenceParser{Tag}(
SingleTagParser(),
SingleTagParser()
)
# Act
result1 = parser1(input)
parser2 = SingleTagParser()
result2 = parser2(result1.newinput)
# Assert
@test result1 isa Selection.OKParseResult{Vector{Tag}}
@test result1.value == [Tag("@foo"), Tag("@bar")]
@test result2 isa Selection.OKParseResult{Tag}
@test result2.value == Tag("@baz")
end
@testset "not @a then @c; @c; Not OK" begin
# Arrange
input = TagExpressionInput("@c")
parser = SequenceParser{Selection.TagExpression}(
NotTagParser(),
SingleTagParser()
)
# Act
result = parser(input)
# Assert
@test result isa Selection.BadParseResult{Vector{Selection.TagExpression}}
end
end
@testset "Literal parser" begin
@testset "Literal foo; foo; OK" begin
# Arrange
input = TagExpressionInput("foo")
parser = Selection.Literal("foo")
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{String}
@test result.value == "foo"
end
@testset "Literal foo; bar; Not OK" begin
# Arrange
input = TagExpressionInput("bar")
parser = Selection.Literal("foo")
# Act
result = parser(input)
# Assert
@test result isa Selection.BadParseResult{String}
end
@testset "Literal foobar; bar; Not OK" begin
# Arrange
input = TagExpressionInput("bar")
parser = Selection.Literal("foobar")
# Act
result = parser(input)
# Assert
@test result isa Selection.BadParseResult{String}
end
@testset "Literal foo then bar; foobar; OK" begin
# Arrange
input = TagExpressionInput("foobar")
# Act
parser = SequenceParser{String}(
Selection.Literal("foo"),
Selection.Literal("bar")
)
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Vector{String}}
@test result.value == ["foo", "bar"]
end
end
@testset "Or parser" begin
@testset "@foo or @bar; OK" begin
# Arrange
input = TagExpressionInput("@foo or @bar")
parser = AnyTagExpression()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{TagExpression}
@test result.value == Selection.Or(Tag("@foo"), Tag("@bar"))
end
# TODO When Or parser support tag expressions.
# Currently they only support single tags.
# @testset "(not @foo) or @bar; OK" begin
# # Arrange
# input = TagExpressionInput("(not @foo) or @bar")
# parser = AnyTagExpression()
# # Act
# result = parser(input)
# # Assert
# @test result isa Selection.OKParseResult{Selection.Or}
# @test result.value == Selection.Or(
# Selection.Parentheses(
# Selection.Not(Tag("@foo"))),
# Tag("@bar"))
# end
end
@testset "ParenthesesParser" begin
@testset "Parentheses around tag; (@foo); OK, tag @foo" begin
# Arrange
input = TagExpressionInput("(@foo)")
parser = Selection.ParenthesesParser()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.Parentheses}
@test result.value == Selection.Parentheses(Tag("@foo"))
end
@testset "Parentheses around Or; (@foo or @bar); OK, @foo or @bar" begin
# Arrange
input = TagExpressionInput("(@foo or @bar)")
parser = Selection.ParenthesesParser()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.Parentheses}
@test result.value == Selection.Parentheses(Selection.Or(Tag("@foo"), Tag("@bar")))
end
end
@testset "AnyOfParser" begin
@testset "AnyOf Tag, Not; @a; OK, @a" begin
# Arrange
input = TagExpressionInput("@a")
parser = Selection.AnyOfParser(
SingleTagParser(),
NotTagParser()
)
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.TagExpression}
@test result.value == Tag("@a")
end
@testset "AnyOf Tag,Not; not @c ; OK, not @c" begin
# Arrange
input = TagExpressionInput("not @c")
parser = Selection.AnyOfParser(
SingleTagParser(),
NotTagParser()
)
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.TagExpression}
@test result.value == Selection.Not(Selection.Tag("@c"))
end
@testset "AnyOf Or,Not; @c; Not OK" begin
# Arrange
input = TagExpressionInput("@c")
parser = Selection.AnyOfParser(
ParenthesesParser(),
NotTagParser()
)
# Act
result = parser(input)
# Assert
@test result isa Selection.BadParseResult{Selection.TagExpression}
end
@testset "AnyOf Group,Not; @c; AnyOfParser fails, next parser finds @c" begin
# Arrange
input = TagExpressionInput("@c")
parser1 = Selection.AnyOfParser(
ParenthesesParser(),
NotTagParser()
)
parser2 = SingleTagParser()
# Act
result1 = parser1(input)
result2 = parser2(result1.newinput)
# Assert
@test result2 isa Selection.OKParseResult{Tag}
@test result2.value == Tag("@c")
end
end
@testset "AnyTagExpression" begin
@testset "AnyTagExpression; @a; OK" begin
# Arrange
input = TagExpressionInput("@a")
parser = AnyTagExpression()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.TagExpression}
@test result.value == Tag("@a")
end
@testset "AnyTagExpression; not @a; OK" begin
# Arrange
input = TagExpressionInput("not @a")
parser = AnyTagExpression()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.TagExpression}
@test result.value == Selection.Not(Tag("@a"))
end
@testset "AnyTagExpression; @a or @b; OK" begin
# Arrange
input = TagExpressionInput("@a or @b")
parser = AnyTagExpression()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.TagExpression}
@test result.value == Selection.Or(Tag("@a"), Tag("@b"))
end
@testset "AnyTagExpression; (@a); OK" begin
# Arrange
input = TagExpressionInput("(@a)")
parser = AnyTagExpression()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.TagExpression}
@test result.value == Selection.Parentheses(Tag("@a"))
end
@testset "AnyTagExpression; (@a or @b); OK" begin
# Arrange
input = TagExpressionInput("(@a or @b)")
parser = AnyTagExpression()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.TagExpression}
@test result.value == Selection.Parentheses(
Selection.Or(Tag("@a"), Tag("@b"))
)
end
@testset "AnyTagExpression; (not @c); OK" begin
# Arrange
input = TagExpressionInput("(not @c)")
parser = AnyTagExpression()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.TagExpression}
@test result.value == Selection.Parentheses(
Selection.Not(Tag("@c"))
)
end
@testset "AnyTagExpression; not (@a or @b); OK" begin
# Arrange
input = TagExpressionInput("not (@a or @b)")
parser = AnyTagExpression()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.TagExpression}
@test result.value == Selection.Not(
Selection.Parentheses(
Selection.Or(Tag("@a"), Tag("@b"))
)
)
end
@testset "SingleTag, Or; @a or @c; Longest, @a or @c, OK" begin
# Arrange
input = TagExpressionInput("@a or @c")
parser = AnyTagExpression()
# Act
result = parser(input)
# Assert
@test result isa Selection.OKParseResult{Selection.TagExpression}
@test result.value == Selection.Or(Tag("@a"), Tag("@c"))
end
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | code | 11875 | # Copyright 2018 Erik Edin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
using Test
using Behavior.Selection
using Behavior.Gherkin
using Behavior.Gherkin: AbstractScenario
@testset "Selection " begin
# These tests check that the tag selector is parsed and that the expressions are used
# properly. It uses only tags on scenarios to check this, for simplicity. Tests in a
# section below check that tags are inherited properly between features and scenarios.
@testset "Tag selector" begin
@testset "Feature has tag @foo and no scenarios; Selecting @foo returns no scenarios" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@foo"])
feature = Feature(header, AbstractScenario[])
# Act
selector = parsetagselector("@foo")
newfeature = select(selector, feature)
# Assert
@test isempty(newfeature.scenarios)
end
@testset "Feature has tag @foo and one scenario; selecting @foo returns the feature unchanged" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@foo"])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("@foo")
newfeature = select(selector, feature)
# Assert
@test newfeature.scenarios == feature.scenarios
end
@testset "Feature has no tags and no scenarios; select @foo returns no scenarios" begin
# Arrange
header = FeatureHeader("Some feature", String[], String[])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("@foo")
newfeature = select(selector, feature)
# Assert
@test isempty(newfeature.scenarios)
end
@testset "Feature has tag @bar; Select for @foo returns nothing" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@bar"])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("@foo")
newfeature = select(selector, feature)
# Assert
@test isempty(newfeature.scenarios)
end
@testset "Feature has tag @foo; Select for @bar returns nothing" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@foo"])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("@bar")
newfeature = select(selector, feature)
# Assert
@test isempty(newfeature.scenarios)
end
@testset "Feature has tags @bar and @foo; Select for @foo returns the feature unchanged" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@bar", "@foo"])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("@foo")
newfeature = select(selector, feature)
# Assert
@test newfeature.scenarios == feature.scenarios
end
@testset "Feature has no tags; Selector is (not @foo); select returns the feature unchanged" begin
# Arrange
header = FeatureHeader("Some feature", String[], [])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("not @foo")
newfeature = select(selector, feature)
# Assert
@test newfeature.scenarios == feature.scenarios
end
@testset "Feature has tag @foo; Selector is (not @foo); select returns nothing" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@foo"])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("not @foo")
newfeature = select(selector, feature)
# Assert
@test isempty(newfeature.scenarios)
end
@testset "Feature has tag @foo; Empty selector; Select returns the feature unchanged" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@foo"])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("")
newfeature = select(selector, feature)
# Assert
@test newfeature.scenarios == feature.scenarios
end
@testset "Feature has no tags; Empty selector; Select returns the feature unchanged" begin
# Arrange
header = FeatureHeader("Some feature", String[], [])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("")
newfeature = select(selector, feature)
# Assert
@test newfeature.scenarios == feature.scenarios
end
@testset "Feature has no tags; Selector is only whitespace; Select returns the feature unchanged" begin
# Arrange
header = FeatureHeader("Some feature", String[], [])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector(" ")
newfeature = select(selector, feature)
# Assert
@test newfeature.scenarios == feature.scenarios
end
@testset "Feature has tag @baz; Selector is only whitespace; Select returns the feature unchanged" begin
# Arrange
header = FeatureHeader("Some feature", String[], [])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector(" ")
newfeature = select(selector, feature)
# Assert
@test newfeature.scenarios == feature.scenarios
end
end
# These tests check that tags are inherited properly between features and scenarios.
# It assumes that the parsing of tag selectors works as intended, tested above.
@testset "Feature and Scenario selection" begin
@testset "Feature has tag @foo and one scenario, no tags; select returns the feature with one scenario" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@foo"])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[])
])
# Act
selector = parsetagselector("@foo")
newfeature = select(selector, feature)
# Assert
@test newfeature.scenarios == feature.scenarios
end
@testset "Only one scenario has tag @bar; select returns the feature with only that scenario" begin
# Arrange
header = FeatureHeader("Some feature", String[], [])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String["@bar"], ScenarioStep[]),
Scenario("Some other scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("@bar")
newfeature = select(selector, feature)
# Assert
@test length(newfeature.scenarios) == 1
@test newfeature.scenarios[1] == feature.scenarios[1]
end
@testset "One scenario has tag @ignore; selecting on (not @ignore) returns only the feature without that tag" begin
# Arrange
header = FeatureHeader("Some feature", String[], [])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String["@ignore"], ScenarioStep[]),
Scenario("Some other scenario", String["@other"], ScenarioStep[]),
])
# Act
selector = parsetagselector("not @ignore")
newfeature = select(selector, feature)
# Assert
@test length(newfeature.scenarios) == 1
@test newfeature.scenarios[1] == feature.scenarios[2]
end
end
@testset "Or expressions" begin
@testset "Feature has tags @foo and one scenario; selecting @foo,@bar returns the feature unchanged" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@foo"])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("@foo,@bar")
newfeature = select(selector, feature)
# Assert
@test newfeature.scenarios == feature.scenarios
end
@testset "Feature has tags @foo and one scenario; selecting @bar,@foo returns the feature unchanged" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@foo"])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("@bar,@foo")
newfeature = select(selector, feature)
# Assert
@test newfeature.scenarios == feature.scenarios
end
@testset "Feature has tag @foo; Selector is (not @foo,@bar); select returns nothing" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@foo"])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("not @foo,@bar")
newfeature = select(selector, feature)
# Assert
@test isempty(newfeature.scenarios)
end
@testset "Feature has tag @bar; Selector is (not @foo,@bar); select returns nothing" begin
# Arrange
header = FeatureHeader("Some feature", String[], ["@bar"])
feature = Feature(header, AbstractScenario[
Scenario("Some scenario", String[], ScenarioStep[]),
])
# Act
selector = parsetagselector("not @foo,@bar")
newfeature = select(selector, feature)
# Assert
@test isempty(newfeature.scenarios)
end
end
end | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | docs | 2913 | # Behavior
[](http://codecov.io/github/erikedin/Behavior.jl?branch=master)

BDD is an acronym for _Behaviour Driven Development_. It is a process for creating and verifying
requirements, written in such a way that they can be executed as code. This package aims to be a
tool for executing such requirements, and creating reports.
# Usage
See [docs/src/usage.md](docs/src/usage.md) for more detailed usage.
See [docs/src/tutorial.md](docs/src/tutorial.md) for a step-by-step introduction to this package.
Specifications are written in the Gherkin format, such as
```gherkin
Feature: Making coffee
Scenario: Making a cup of coffee
Given that there is a cup in the coffee machine
When the "Coffee" button is pressed
Then the cup is filled with coffee
```
For each `Given`, `When`, and `Then` line, a corresponding method is written, which is executed when
that line is reached.
```julia
using Behavior
using CoffeeMachine
hascoffee(cup::Cup) = cup[:coffee] > 0.0
@given("that there is a cup in the coffee machine") do context
cup = Cup()
machine = Machine()
cupisinthemachine(machine, cup)
context[:cup] = cup
context[:machine] = machine
end
@when("the \"Coffee\" button is pressed") do context
machine = context[:machine]
coffeewaspressed(machine)
end
@then("the cup is filled with coffee") do context
cup = context[:cup]
@expect hascoffee(cup)
end
```
Feature files have extension `.feature`, and are stored in the `features` directory (see
"Current state" for current limitations), and step definitions (the executable code) have the
extension `.jl` and are stored in `features/steps`.
# Example project
The project [CoffeeMachine.jl](https://github.com/erikedin/CoffeeMachine.jl) is an example of how to
use Behavior.jl.
# Running
Run the command line tool `runspec.jl` from the directory containing the `features` directory, or
from the Julia REPL with
```julia
julia> using Behavior
julia> runspec()
```
See "Current state" for limitations.
# Current state
The package is not feature complete, but is absolutely in a usable state. It is also under active
development.
These are some current limitations and missing features, that will be lifted as development progresses:
- [ ] Currently does not function in Julia 1.4 and probably not before
- [ ] Presenting the results of scenarios is very rudimentary.
- [ ] Step definition variables do not yet have type information.
- [ ] Gherkin Rules support
## Completed
- [x] Reads feature files from anywhere under `features`.
- [x] Reads step files from anywhere under `features/steps`.
- [x] Variables in step definition strings.
# License
Behavior.jl is licensed under the Apache License version 2.0.
| Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | docs | 97 | # Functions
```@meta
CurrentModule = Behavior
```
```@docs
runspec
```
## Index
```@index
``` | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | docs | 1375 | # Gherkin Experimental Parser
There is a new Gherkin parser, which has support for Rules, and will
replace the current Gherkin parser. It is possible to use this new parser today,
with an experimental flag.
## Command line
If you are running Behavior from the command line,
add a flag like this, to try this new parser
```bash
$ julia cmd/runspec.jl --experimental
```
```bash
$ julia cmd/suggeststeps.jl features/Some.feature features/steps --experimental
```
```bash
$ julia cmd/parseonly.jl features/ --experimental
```
## Using ParseOptions
If you are running Behavior from the `runtests.jl` script, instead create
a `ParseOptions` struct, like
```julia
parseoptions = ParseOptions(use_experimental=true)
runspec("path/to/project", parseoptions=parseoptions)
```
## Progress
The new parser is on par with the current parser in `Behavior.Gherkin`, and
has additional support for `Rule`s. Aside from the flag describe above, the
changes are entirely transparent to the user of Behavior.
The general idea is that the experimental parser will undergo a period of
testing to ensure that no major problems are present, and then it will
replace the current parser in a new release.
While the parser is mostly on par with the current one, there are still some
missing parts, like support for steps `But` and `*`. With the new parser,
they are fortunately trivial to add. | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | docs | 1778 | # Behavior.jl Documentation
BDD is an acronym for _Behaviour Driven Development_. It is a process for creating and verifying
requirements, written in such a way that they can be executed as code. This package aims to be a
tool for executing such requirements, and creating reports.
# Quickstart
Specifications are written in the Gherkin format, such as
```gherkin
Feature: Making coffee
Scenario: Making a cup of coffee
Given that there is a cup in the coffee machine
When the "Coffee" button is pressed
Then the cup is filled with coffee
```
For each `Given`, `When`, and `Then` line, a corresponding method is written, which is executed when
that line is reached.
```julia
using Behavior
using CoffeeMachine
hascoffee(cup::Cup) = cup[:coffee] > 0.0
@given("that there is a cup in the coffee machine") do context
cup = Cup()
machine = Machine()
cupisinthemachine(machine, cup)
context[:cup] = cup
context[:machine] = machine
end
@when("the \"Coffee\" button is pressed") do context
machine = context[:machine]
coffeewaspressed(machine)
end
@then("the cup is filled with coffee") do context
cup = context[:cup]
@expect hascoffee(cup)
end
```
Feature files have extension `.feature`, and are stored in the `features` directory (see
"Current state" for current limitations), and step definitions (the executable code) have the
extension `.jl` and are stored in `features/steps`.
# Example project
The project [CoffeeMachine.jl](https://github.com/erikedin/CoffeeMachine.jl) is an example of how to
use Behavior.jl.
# Running
Run the command line tool `runspec.jl` from the directory containing the `features` directory, or
from the Julia REPL with
```julia
julia> using Behavior
julia> runspec()
``` | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | docs | 20961 | # Tutorial
This is a tutorial style introduction to how Behavior.jl works.
It is not intended as a complete introduction to Behavior Driven Development, but
rather as an introduction to how to start with this package.
This tutorial assumes that you have Julia 1.0 or later installed. It also assumes
you're using Linux, or something similar, but the instructions can be adapted to
Windows.
Here is an overview of the steps we'll take:
1. Create a new package
2. Add some code to test
3. Add Behavior.jl as a dependency
4. Write a Gherkin feature
5. Implement the steps in the feature
6. Test the Gherkin feature
7. Add further scenarios
8. Scenario Outlines
9. Parameters
If you have an existing package you wish to use, skip to step 3, and mentally
replace the package name `CoffeeMachine` with your package name.
## Step 1: Create a new package
Go to a path where you want to create your new package, commonly
`~/.julia/dev`, and start Julia there.
```
$ cd ~/.julia/dev
$ julia
_
_ _ _(_)_ | Documentation: https://docs.julialang.org
(_) | (_) (_) |
_ _ _| |_ __ _ | Type "?" for help, "]?" for Pkg help.
| | | | | | |/ _` | |
| | |_| | | | (_| | | Version 1.6.0 (2021-03-24)
_/ |\__'_|_|_|\__'_| | Official https://julialang.org/ release
|__/ |
julia>
```
To create a new package `CoffeeMachine`, first enter the Pkg mode by pressing
the `]` key.
```julia-repl
julia> ]
# The Julia REPL prompt changes to
(@v1.6) pkg>
```
Create the package by running
```julia-repl
(@v1.6) pkg> generate CoffeeMachine
Generating project CoffeeMachine:
CoffeeMachine/Project.toml
CoffeeMachine/src/CoffeeMachine.jl
(@v1.6) pkg>
```
You now have a brand new package in `~/.julia/dev/CoffeeMachine`.
## Step 2: Add some code
Open the file `~/.julia/dev/CoffeeMachine/src/CoffeeMachine.jl` and add code so that
the `CoffeeMachine` module looks like this (you can remove the default `greet` function):
```julia
module CoffeeMachine
export Machine, Cup, makecoffee!, fillwith!, readdisplay
mutable struct Machine
coffee::Float64
milk::Float64
message::String
Machine(; coffee=0.0, milk=0.0) = new(coffee, milk, "")
end
struct Cup
coffee::Float64
milk::Float64
end
function fillwith!(m::Machine; coffee=0.0, milk=0.0)
m.coffee += coffee
m.milk += milk
end
function makecoffee!(m::Machine; withmilk=false) :: Union{Cup, Nothing}
if m.coffee <= 0.0
display!(m, "Out of coffee")
return nothing
end
if withmilk && m.milk <= 0.0
display!(m, "Out of milk")
return nothing
end
milkincup = if withmilk
1.0
else
0.0
end
m.coffee -= 1.0
m.milk -= milkincup
display!(m, "Enjoy")
Cup(1.0, milkincup)
end
readdisplay(m::Machine) = m.message
display!(m::Machine, msg::String) = m.message = msg
end # module
```
This is a model of a coffee machine, solely for demonstration purposes. It allows you to
make a cup of coffee, optionally with milk. It also has a display that shows messages to
the user.
In later steps, we'll create a Gherkin feature that exercises this code.
## Step 3: Add Behavior as a dependency
NOTE: Behavior is not yet registered as a package, therefore
this tutorial will manually clone the repository from GitHub and add it as a
local development dependency.
In a terminal in `~/.julia/dev`, run
```bash
$ git clone https://github.com/erikedin/Behavior.jl Behavior
```
Note that we're cloning it into a repo without the `.jl` prefix, for consistency with the newly generated package.
Start Julia in `~/.julia/dev` and activate the CoffeeMachine package, by
```
$ julia
_
_ _ _(_)_ | Documentation: https://docs.julialang.org
(_) | (_) (_) |
_ _ _| |_ __ _ | Type "?" for help, "]?" for Pkg help.
| | | | | | |/ _` | |
| | |_| | | | (_| | | Version 1.6.0 (2021-03-24)
_/ |\__'_|_|_|\__'_| | Official https://julialang.org/ release
|__/ |
julia>
```
Go into the package mode and activate CoffeeMachine
```julia-repl
julia> ]
(@v1.6) pkg> activate CoffeeMachine
Activating environment at `~/.julia/dev/CoffeeMachine/Project.toml`
(CoffeeMachine) pkg>
```
To add Behavior as a local development dependency, run
```julia-repl
(CoffeeMachine) pkg> dev Behavior
[ Info: Resolving package identifier `Behavior` as a directory at `~/.julia/dev/Behavior`.
Path `Behavior` exists and looks like the correct package. Using existing path.
Resolving package versions...
Updating `~/.julia/dev/CoffeeMachine/Project.toml`
[7a129280] + Behavior v0.1.0 `../Behavior`
Updating `~/.julia/dev/CoffeeMachine/Manifest.toml`
[7a129280] + Behavior v0.1.0 `../Behavior`
[c27321d9] + Glob v1.3.0
[2a0f44e3] + Base64
[b77e0a4c] + InteractiveUtils
[56ddb016] + Logging
[d6f4376e] + Markdown
[9a3f8284] + Random
[9e88b42a] + Serialization
[8dfed614] + Test
```
We additionally require the standard `Test` module as a dependency, because we'll use the
`@test` macro in the `CoffeeMachine/test/runtests.jl` test file later on.
```julia-repl
(CoffeeMachine) pkg> add Test
[ .. output not shown for brevity .. ]
```
## Step 4: Write a Gherkin feature
Create a directory `CoffeeMachine/features`.
```bash
$ cd ~/.julia/dev/CoffeeMachine
$ mkdir features
```
Add the following Gherkin feature into the file `CoffeeMachine/features/MakingCoffee.feature`:
```Gherkin
Feature: Making Coffee
Scenario: Making a regular coffee
Given a machine filled with coffee beans
When a user makes a cup of coffee
Then the cup contains coffee
```
This file is a simple Gherkin file that contains a requirement that the `CoffeeMachine`
should fulfill.
The file starts by defining a `Feature`, with a short description of what this feature
is about.
A feature file contains one or more `Scenario`s, and each `Scenario` has steps on the form
`Given`, `When`, or `Then`.
In this example, there is one `Scenario`, with three steps.
The above requirement simply states that the machine should dispense coffee into the cup,
under the assumption that there's enough coffee in the machine.
To actually run these requirements as code, we need to add implementations for each step
above.
## Step 5: Implement the steps in the feature
Create a directory `CoffeeMachine/features/steps`.
```bash
$ cd ~/.julia/dev/CoffeeMachine
$ mkdir -p features/steps
```
Add a file `CoffeeMachine/features/steps/makingcoffee.jl`:
```julia
using Behavior
using CoffeeMachine
@given("a machine filled with coffee beans") do context
context[:machine] = Machine(coffee=5.0)
end
@when("a user makes a cup of coffee") do context
m = context[:machine]
cup = makecoffee!(m)
context[:cup] = cup
end
@then("the cup contains coffee") do context
cup = context[:cup]
@expect cup.coffee > 0.0
end
```
This file begins by `using` the `CoffeeMachine` module, which is the thing we wish to
test, and the `Behavior` module, which provides the test functions.
The first step implementation is
```julia
@given("a machine filled with coffee beans") do context
context[:machine] = Machine(coffee=5.0)
end
```
This is a Julia implementation of the `Scenario` step
```Gherkin
Given a machine filled with coffee beans
```
Note that the string provided to the `@given` macro matches that of the `Given` step.
This is how Behavior connects the steps in the Gherkin `.feature` file
with actual code.
The `do context ... end` is the test function that will run for this step.
This snippet of code creates a coffee machine using the `Machine` constructor from the
`CoffeeMachine` module, and provides the coffee machine with 5.0 units of coffee.
It then stores this struct in the `context` dictionary, using the key `:machine`.
The `context` is a dictionary-like object that stores objects between steps. In this
case, the next step will fetch the `Machine` struct from the `context` and perform
operations on it.
The second step implementation is
```julia
@when("a user makes a cup of coffee") do context
m = context[:machine]
cup = makecoffee!(m)
context[:cup] = cup
end
```
This corresponds to the `Scenario` step
```Gherkin
When a user makes a cup of coffee
```
This step retrieves the `Machine` struct from the `context`. The `Machine` struct
was created in the step before this one.
Then we call the `makecoffee!` function, provided by the `CoffeeMachine` module, on this machine.
We store the returned cup in the context, under the key `:cup`.
Note that each step ought to perform a single well-defined action. For instance, this
step does not make any assumption above _what_ the returned cup actually is. In some cases
it will be a `Cup` struct, and in some cases it will be a `Nothing`. This step does not
care about that, but leaves that to later steps.
The third and final step checks that the result is what we expect:
```julia
@then("the cup contains coffee") do context
cup = context[:cup]
@expect cup.coffee > 0.0
end
```
This step retrieves the cup, which was stored in the `context` by the previous step.
We use the `@expect` macro to check that the cup does indeed contain coffee. The
`@expect` macro is provided by `Behavior`, and checks that the
provided expression is true. It is very similar to the `@test` macro in the standard
`Test` module.
If the above expression was false, say that the returned `Cup` struct had `0.0` in its
coffee field, then the `@expect` macro would record a failure, and `Behavior`
would show this step as failed.
## Step 6: Test the Gherkin feature
The above steps have created a Gherkin feature file, and a step implementation file,
but we need to tell `Behavior` to run them.
Julias standard location for tests is in the `test/runtests.jl` file. Add a file
`CoffeeMachine/test/runtests.jl`:
```julia
using Behavior
using CoffeeMachine
using Test
@test runspec(pkgdir(CoffeeMachine))
```
This code calls the `Behavior.runspec` function, which finds all the
feature files and step implementations, and runs all `Scenarios`.
For this example, if will find the `Scenario` "Making a regular coffee", and for each
`Given`/`When`/`Then` step, find the matching step implementation in `CoffeeMachine/features/steps/makingcoffee.jl`, and run it.
The argument `pkgdir(CofeeMachine)` simply passes `runspec` the path to the root of the
`CoffeeMachine` package. From there, it will find the `features` and `features/steps` paths.
Finally, the `@test` macro is used here to ensure that `runspec` returns true, which it does
when all tests pass. If any tests fail, then `runspec` returns false, and the `@test` macro
records a failure, so that Julia knows it failed. Without the `@test` macro here,
`Behavior` will still run all tests, and display them, but the standard
Julia testrunner will not know that any tests failed.
To run the tests, enter the package mode for the `CoffeeMachine` package, and run the `test` command.
```julia
julia> ]
(CoffeeMachine) pkg> test
Testing CoffeeMachine
[ .. some Julia output, ignored for brevity .. ]
Testing Running tests...
Feature: Making Coffee
Scenario: Making a regular coffee
Given a machine filled with coffee beans
When a user makes a cup of coffee
Then the cup contains coffee
| Success | Failure
Feature: Making Coffee | 1 | 0
SUCCESS
Testing CoffeeMachine tests passed
```
`Behavior` will by default print each `Feature`, `Scenario`, and step as they
are being executed, and show a final result of how many scenarios succeeded, and how many
failed as part of each `Feature`. Finally, it says `SUCCESS` to indicate that no errors were
found.
### Optional: Introduce an error to see failures
To see what failures look like, we can intentionally introduce an error into `CoffeeMachine`.
In the file `CoffeeMachine/src/CoffeeMachine.jl`, find the function
```julia
function makecoffee!(m::Machine; withmilk=false) :: Union{Cup, Nothing}
if m.coffee <= 0.0
display!(m, "Out of coffee")
return nothing
end
if withmilk && m.milk <= 0.0
display!(m, "Out of milk")
return nothing
end
milkincup = if withmilk
1.0
else
0.0
end
m.coffee -= 1.0
m.milk -= milkincup
display!(m, "Enjoy")
Cup(1.0, milkincup)
end
```
At the end of this function, change that last line to
```julia
function makecoffee!(m::Machine; withmilk=false) :: Union{Cup, Nothing}
.. keep the rest of the function as is
Cup(0.0, milkincup)
end
```
This ensures that the cup will _not_ contain any coffee.
From the package mode in the `CoffeeMachine` package, run `test` again.
```julia-repl
(CoffeeMachine) pkg> test
Testing CoffeeMachine
[ .. output removed for brevity .. ]
Testing Running tests...
Feature: Making Coffee
Scenario: Making a regular coffee
Given a machine filled with coffee beans
When a user makes a cup of coffee
Then the cup contains coffee
FAILED: cup.coffee > 0.0
| Success | Failure
Feature: Making Coffee | 0 | 1
FAILURE
Test Failed at /home/erik/.julia/dev/CoffeeMachine/test/runtests.jl:5
Expression: runspec(pkgdir(CoffeeMachine))
ERROR: LoadError: There was an error during testing
in expression starting at /home/erik/.julia/dev/CoffeeMachine/test/runtests.jl:5
ERROR: Package CoffeeMachine errored during testing
```
You will see above that while the `Given` and `When` steps were successful, the
`Then` step failed, and it shows the expression that failed `cup.coffee > 0.0`.
Furthermore, the entire feature is marked as failed, and we see that `1` scenario
failed in that feature.
To continue, ensure that you undo the intentional error, so that the tests pass again.
## Step 7: Add further scenarios
Add the following `Scenario` to `CoffeeMachine/features/MakingCoffee.feature`:
```Gherkin
Scenario: Making coffee with milk
Given a machine filled with coffee beans
And that the machine also has milk
When a user makes a cup of coffee with milk
Then the cup contains coffee
And the cup contains milk
```
Note that some of the steps are the same as the previous `Scenario`, while others
are new.
If you run the tests again, you will get a failure saying
```
Scenario: Making coffee with milk
Given a machine filled with coffee beans
Given that the machine also has milk
No match for 'Given that the machine also has milk'
```
This error occurs because we haven't added any step definition for the step
`And that the machine also has milk` yet. The Gherking step type `And` means that
the step type will be whatever came before it, which is a `Given` in this situation.
So, add a step implementation in `CoffeeMachine/features/steps/makingcoffee.jl`:
```julia
@given("that the machine also has milk") do context
m = context[:machine]
fillwith!(m, milk=5.0)
end
```
This expects that a machine has already been constructed, and simply fills
it with milk.
Also add step implementations for the other new steps:
```julia
@when("a user makes a cup of coffee with milk") do context
m = context[:machine]
cup = makecoffee!(m, withmilk=true)
context[:cup] = cup
end
@then("the cup contains milk") do context
cup = context[:cup]
@expect cup.milk > 0.0
end
```
The first one calls `makecoffee!`, but this time with the keyword argument
`withmilk=true`, indicating that we want milk in the coffee.
The second step definition checks that there is milk in the cup.
Runing the tests shows that both scenarios now pass.
```julia-repl
(CoffeeMachine) pkg> test
Testing CoffeeMachine
[ .. removed output for brevity .. ]
Testing Running tests...
Feature: Making Coffee
Scenario: Making a regular coffee
Given a machine filled with coffee beans
When a user makes a cup of coffee
Then the cup contains coffee
Scenario: Making coffee with milk
Given a machine filled with coffee beans
Given that the machine also has milk
When a user makes a cup of coffee with milk
Then the cup contains coffee
Then the cup contains milk
| Success | Failure
Feature: Making Coffee | 2 | 0
SUCCESS
Testing CoffeeMachine tests passed
```
Note that step
```Gherkin
Then the cup contains coffee
```
is reused, as is the initial `Given` that constructs the coffee machine. It is
expected that many, if not most, step definitions will be shared by many scenarios.
## Step 8: Scenario Outlines
`Scenario Outline`s in Gherkin is a way to run one scenario for many similar values.
For instance, say that we want to test the machine's error messages when it is out
of an ingredient. We could write two different scenarios, one for when the machine is
out of coffee, and one for when it is out of milk.
```Gherkin
Feature: Displaying messages
Scenario: Out of coffee
Given a machine without coffee
When a user makes a cup of coffee with milk
Then the machine displays Out of coffee
Scenario: Out of milk
Given a machine without milk
When a user makes a cup of coffee with milk
Then the machine displays Out of milk
```
However, note that the two scenarios above are nearly identical, only differing
in specific values. The sequence of steps are the same, and the type of situation
tested is the same. The only differences are which ingredient is missing and which
error message we expect. This is a situation where you can use a single `Scenario Outline` to
express more than one `Scenario`.
Create a new feature file `CoffeeMachine/features/Display.feature`:
```Gherkin
Feature: Displaying messages
Scenario Outline: Errors
Given a machine without <ingredient>
When a user makes a cup of coffee with milk
Then the machine displays <message>
Examples:
| ingredient | message |
| coffee | Out of coffee |
| milk | Out of milk |
```
The above `Scenario Outline` looks like the above `Scenario`s, but introduces two placeholders,
`<ingredient>` and `<message>` instead of specific values. In the `Examples:` section we
have a table that lists which error message we expect for a given missing ingredient.
The first line in the table has the two placeholders `ingredient` and `message` as
column headers.
This `Scenario Outline` is exactly equivalent to the two `Scenario`s above. To run it,
create a new step definition file `CoffeeMachine/features/steps/display.jl`:
```julia
using Behavior
using CoffeeMachine
@given("a machine without coffee") do context
context[:machine] = Machine(coffee=0.0, milk=5.0)
end
@given("a machine without milk") do context
context[:machine] = Machine(coffee=5.0, milk=0.0)
end
@then("the machine displays Out of coffee") do context
m = context[:machine]
@expect readdisplay(m) == "Out of coffee"
end
@then("the machine displays Out of milk") do context
m = context[:machine]
@expect readdisplay(m) == "Out of milk"
end
```
You can run the tests to ensure that they pass.
## Step 9: Parameters
In the above step, we saw how `Scenario Outline`s can be utilized to reduce otherwise
repetitive `Scenario`s, and improve readability. In the step definition file above, also
note that we have two repetitive steps
```julia
@then("the machine displays Out of coffee") do context
m = context[:machine]
@expect readdisplay(m) == "Out of coffee"
end
@then("the machine displays Out of milk") do context
m = context[:machine]
@expect readdisplay(m) == "Out of milk"
end
```
These two steps check the same aspect of the coffee machine, but for two different values.
While `Scenario Outline`s can be used to reduce repetition in `.feature` files,
parameters can be used to reduce repetition in the step definition `.jl` files.
Both steps above can be reduced to a single step
```julia
@then("the machine displays {String}") do context, message
m = context[:machine]
@expect readdisplay(m) == message
end
```
There are two differences here:
1. The step string has a parameter `{String}` which matches any text.
2. The do-block function now takes two parameters, `context` and `message`.
The value of the `message` argument to the do-block is whatever text is matched by `{String}`.
So, for the first example above
```
Then the machine displays Out of coffee
```
this step will match the step definition `"the machine displays {String}"`, and the variable `message` will take on the value `Out of coffee`.
In this way, we can write a single step definition to match many `Scenario` steps.
Note that while the above example uses a `Scenario Outline` to demonstrate parameters in the
step definition `.jl` file, these are two separate concepts. A step definition with a parameter
like `{String}` can be used for `Scenario`s as well. | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 6836146e6131a4cdc48722eb9700e19603278875 | docs | 15708 | # Usage
## Package layout
A Julia package commonly has the following files and directories:
```
ExamplePackage/
โโโ Manifest.toml
โโโ Project.toml
โโโ src
โย ย โโโ ExamplePackage.jl
โโโ test
โโโ runtests.jl
```
To use Behavior.jl, add inside the package
- a directory `features`
This directory will contain the Gherkin feature files.
- a directory `features/steps`
This directory will contain the code that runs the actual
test steps.
```
ExamplePackage/
โโโ features
โย ย โโโ Example.feature
โย ย โโโ steps
โย ย โโโ ExampleSteps.jl
โโโ Manifest.toml
โโโ Project.toml
โโโ src
โย ย โโโ ExamplePackage.jl
โโโ test
โโโ runtests.jl
```
Above you will see a single Gherkin feature file `features/Example.feature` and a single
step definition file `features/steps/ExampleSteps.jl`.
### Test organization
Behavior searches for both feature files and step files recursively. You may
place them in any subdirectory structure that you like. For instance,
```
ExamplePackage/
โโโ features
โย ย โโโ steps
โย ย โย ย โโโ ExampleSteps.jl
โย ย โย ย โโโ substeps
โย ย โย ย โโโ MoreSteps.jl
โย ย โโโ subfeature1
โย ย โย ย โโโ Example.feature
โย ย โโโ subfeature2
โย ย โโโ Other.feature
โโโ Manifest.toml
โโโ Project.toml
โโโ src
โย ย โโโ ExamplePackage.jl
โโโ test
โโโ runtests.jl
```
## Making assertions
There are currently two ways of making assertions in a step:
- `@expect <expression>`
Checks that some boolean expression is true, and fails if it is not.
- `@fail <string>`
Unconditionally fails a step, with an explanatory string.
Both these macros are exported from the `Behavior` module.
The `@expect` macro should be the primary method used for testing the actual
vs. expected values of your code. The `@fail` macro can be used when the
`@expect` macro is not appropriate, or for checking preconditions in the tests.
Examples:
```julia
using Behavior
@then("one plus one equals two") do context
@expect 1+1 == 2
end
```
```julia
using Behavior
@given("some precondition") do context
if !someprecondition()
# This may not be part of the test, but a precondition to performing the
# actual test you want.
@fail "The tests required this particular precondition to be fulfilled"
end
end
```
## Parameters
NOTE: This is a work in progress, and will see improvement.
A step in Gherkin is matched against step definitions in Julia code. These step definitions
may have parameters, which match against many values. For instance, the Gherkin
```Gherkin
Feature: Demonstrating parameters
Scenario: Value forty-two
Given some value 42
Scenario: Value seventeen
Given some value 17
```
we have two steps. Both of these steps will match the step definition
```julia
using Behavior
@given("value {String}") do context, value
@expect value in ["42", "17"]
end
```
The step definition above has a parameter `{String}`, which matches any string following the
text `value `. The additional argument `value` in the do-block will have the value `"42"` in the
first scenario above, and `"17"` in the second.
In the parameter above we specify the type `String`. One can also use an empty
parameter `{}` which is an alias for `{String}`. The type of the argument `value` will naturally
be `String`.
One can have several parameters in the step definition. For instance, the step definition
```julia
using Behavior
@given("{} {}") do context, key, value
@expect key == "value"
@expect value in ["42", "17"]
end
```
This step definition will also match the above `Given` step, and the first argument `key` will
have the value `"value"` in both the scenarios.
Future work: In the near future, other types will be supported, such as `Int` and `Float`.
### Obsolete
Earlier, parameters were accessible in an object `args` that was provided to all step
implementations, like so
```julia
@given("value {foo}") do context
@expect args[:foo] in ["42", "17"]
end
```
This is no longer supported, and the `args` variable is no longer present.
## Data tables
Gherkin supports tabular data associated with each step. For instance, the scenario
```Gherkin
Feature: Demonstrating data tables
Scenario: Has a table
Given some users
| user id | name |
| 17 | Henry Case |
| 42 | Ainsley Lowbeer |
| 59 | Chevette Washington |
When a search for "Henry Case" is made
Then user id 17 is found
```
The `Given` step above has a data table associated with it. To access the data table
in a step definition, use the `datatable` field on the `context` object:
```julia
using Behavior
@given("some users") do context
users = context.datatable
println(users[1]) # Prints ["user id", "name"]
println(users[2]) # Prints ["17", "Henry Case"]
println(users[3]) # Prints ["42", "Ainsley Lowbeer"]
println(users[4]) # Prints ["59", "Chevette Washington"]
end
```
## Strictness of Gherkin syntax
There are some ways to configure how strict we wish the Gherkin parser to be,
when reading a feature file. For instance, Behavior by default
requires you to only have steps in the order `Given-When-Then`. It fails if it finds,
for instance, a `Given` step after a `When` step in a Scenario. This reflects the
intended use of these steps, but may not be to everyones liking. Therefore, we can
control the strictness of the parser and allow such steps.
```Gherkin
Feature: Demonstrating step order
Scenario: This scenario requires a more lenient parser
Given some precondition
When some action
Then some postcondition
Given some other precondition
When some other action
Then some other postcondition
```
The above feature file will by default fail, as the steps are not strictly in the
order `Given-When-Then`. The error message will look something like
```
ERROR: ./features/DemonstratingStepOrder.feature:7
Line: Given some other precondition
Reason: bad_step_order
Expected: NotGiven
Actual: Given
```
To allow this, create a `Behavior.Gherkin.ParseOptions`
struct, with the keyword `allow_any_step_order = true`.
```julia-repl
julia> using Behavior
julia> using Behavior.Gherkin
julia> p = ParseOptions(allow_any_step_order = true)
julia> runspec(parseoptions=p)
```
Note that at the time of writing, the step order is the only option available for
configuration Gherkin parsing.
## Step implementation suggestions
Behavior can find scenario steps that do not have a corresponding
step implementation, and suggest one. For instance, if you have the feature
```Gherkin
# features/SomeFeature.feature
Feature: Suggestions example
Scenario: Some scenario
Given an existing step
When a step is missing
```
and the step implementation
```julia
# features/steps/somesteps.jl
using Behavior
@given("an existing step") do context
# Some test
end
```
then we can see that the step `When a step is missing` does not have a corresponding
step implementation, like the `Given` step does. To get a suggestion for missing step
implementations in a given feature file, you can run
```julia-repl
julia> using Behavior
julia> suggestmissingsteps("features/SomeFeature.feature", "features/steps")
using Behavior
@when("a step is missing") do context
@fail "Implement me"
end
```
In the code above, we provide `suggestmissingsteps` with a feature file path, and the path
where the step implementations are found. It will find that then `When` step above is missing
and provide you with a sample step implementation. The sample will always initially fail, using
the `@fail` macro, so that it is not accidentally left unimplemented.
Note that `suggestmissingsteps` can also take a `Behavior.Gherkin.ParseOptions` as an optional argument,
which allows you to configure how strict or lenient the parser should be when reading the feature file.
```julia-repl
julia> using Behavior
julia> using Behavior.Gherkin
julia> suggestmissingsteps("features/SomeFeature.feature", "features/steps",
parseoptions=ParseOptions(allow_any_step_order = true))
using Behavior
@when("a step is missing") do context
@fail "Implement me"
end
```
Also note that currently, `suggestmissingsteps` takes only a single feature file. It would of course
be possible to have `suggestmissingsteps` find _all_ feature files in the project, but this could
potentially list too many missing steps to be of use.
### Known limitations
The suggestion method above does not currently generate any step implementations with variables.
This is because the variables are undergoing changes at the time of writing, so generating such
implementations would not be stable for the user.
### Caution
While it's tempting to use this as a means of automatically generating all missing step implementations,
it's important to note that Behavior cannot know how to organize the step implementations.
Oftentimes, many feature files will share common step implementations, so there will not be a
one-to-one correspondence between feature files and the step implementation files. Furthermore,
step implementations with variables will often match many steps for different values of the variables,
but the suggestion method will not be able to determine which steps you want to use variables for.
As an example, in the below feature file, it's quite obvious to a user that a variable step implementation
can be used to match all `Given some value {Int}`, but the suggestion method will not be able to detect this.
```Gherkin
Feature: Demonstrate suggestion limitations
Scenario: Some scenario
Given some value 17
Scenario: Other scenario
Given some value 42
```
## Selecting scenarios by tags
WARNING: At the time of writing the only supported way of selecting tags is a single tag or a
comma-separated list of tags, with an optional "not" expression:
- `@tag`,
- `@tag,@othertag,@thirdtag` matches any of the tags
- `not @tag`
- `not @tag,@othertag` will not match either `@tag` or `@othertag`
The tag selection is a work in progress.
You can select which scenarios to run using the tags specified in the Gherkin files. For example, a feature file can look like this
```Gherkin
@foo
Feature: Describing tags
@bar @baz
Scenario: Some scenario
Given some step
@ignore
Scenario: Ignore this scenario
Given some step
```
Here we have applied the tag `@foo` to the entire feature file. That is, the `@foo` tag is inherited by all scenarios in the feature file.
One scenario has the `@bar` and `@baz` tags, and another has the tag `@ignore`.
You can select to run only the scenarios marked with `@foo` by running
```julia-repl
julia> using Behavior
julia> runspec(tags = "@foo")
```
This will run both scenarios above, as they both inherit the `@foo` tag from the feature level.
You can run only the scenario marked with `@bar` by running
```julia-repl
julia> using Behavior
julia> runspec(tags = "@bar")
```
This will run only the first scenario `Scenario: Some scenario` above, as the second scenario does not have the `@bar` tag.
You can also choose to run scenarios that _do not_ have a given tag, such as `@ignore`.
```julia-repl
julia> using Behavior
julia> runspec(tags = "not @ignore")
```
This will also run only the first scenario, as it does not have the `@ignore` tag, but not the second.
If a feature does not have any matching scenarios, then that feature will be excluded from the results, as it had no bearing
on the result.
### Tag selection syntax
NOTE: The tag selection syntax is a work in progress.
- `@tag`
Select scenarios with the tag `@tag`.
- `not @tag`
Select scenarios that _do not_ have the tag `@tag`.
- `@tag,@othertag,@thirdtag`
Select scenarios that have one or several of the tags `@tag`, `@othertag`, `@thirdtag`.
- `not @tag,@othertag,@thirdtag`
Select scenarios that _do not_ have any of the tags `@tag`, `@othertag`, `@thirdtag`.
### Future syntax
In the future, you will be able to write a more complex expression using `and`, `or`, and parentheses, like
```
@foo and (not @ignore)
```
which will run all scenarios with the `@foo` tag that do not also have the `@ignore` tag.
## Before/after hooks
You can create hooks that execute before and after each scenario, for set up and tear down of test resources.
These must be placed in a file `features/environment.jl` (or some custom features directory you specify).
Note that this is _not_ the `features/steps` directory, where all step definitions are found, but in the
`features` directory.
These are the available hooks:
- `@beforescenario` and `@afterscenario`
- `@beforefeature` and `@afterfeature`
- `@beforeall` and `@afterall`
### Scenario
The `@beforescenario` and `@afterscenario` definitions run before and after each scenario.
```julia
@beforescenario() do context, scenario
# Some code here
end
@afterscenario() do context, scenario
# Some code here
end
```
The intention is that one can place test resources in the `context` object. This is the same object that
the scenario steps will receive as their `context` parameter, so any modifications to it will be
visible in the scenario steps.
The `scenario` parameter allows one to see which scenario is being executed, so test resources can
customized for each scenario.
### Feature
The `@beforefeature` and `@afterfeature` definitions run before and after each feature, respectively.
```julia
@beforefeature() do feature
# Some code here
end
@afterfeature() do feature
# Some code here
end
```
Note that these definitions to not take a `context` parameter. This is because the context is
specific to each scenario. Outside of a scenario, there is no context object defined.
The `feature` parameter contains the feature that is about to/was just executed. One can look
at which tags are available on it, for instance.
Note that today there are no publicly defined methods on the `Behavior.Gherkin.Feature` type. To
determine what can be done with it, you have to consult the source code. This can obviously
be improved.
### All
The `@beforeall` and `@afterall` runs before the first feature, and after the last feature, respectively.
```julia
@beforeall() do
# Some code here
end
@afterall() do
# Some code here
end
```
The hooks take no arguments. As of today, these hooks can only create global resources, as no context
or feature object is available.
### Module scope
The above hooks are evaluated in the `Main` module scope. If you define some function `myfunction`
in the `environment.jl` file, then you can access it by explicitly using `Main.myfunction`.
Here is an `environment.jl` file that stores some data used by the all-hooks.
```julia
using Behavior
myfeatures = []
@beforefeature() do feature
push!(myfeatures, feature)
end
```
In a step definition, you can access the `myfeatures` list by using the `Main` module
```julia
using Behavior
@then("some description") do
latestfeature = Main.myfeatures[end]
@expect latestfeature.header.description == "Some feature description"
end
```
## Breaking on failure, or keep going
During development of a package, it may be advantageous to break on the first failure,
if the execution of all features takes a long time. This can be achieved by running
the `runspec` call with the keyword option `keepgoing=false`.
This means that the execution of features will stop at the first failure. No more scenarios
or features will execute.
For example,
```julia
@test runspec(pkgdir(MyPackage); keepgoing=false)
```
The `keepgoing` flag defaults to true, meaning that all features are executed. | Behavior | https://github.com/erikedin/Behavior.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 11597 |
##############################################################################
##
## Type GLFixedEffectModel
##
##############################################################################
struct GLFixedEffectModel <: RegressionModel
coef::Vector{Float64} # Vector of coefficients
vcov::Matrix{Float64} # Covariance matrix
vcov_type::CovarianceEstimator
nclusters::Union{NamedTuple, Nothing}
iterations::Int64
converged::Bool
esample::BitVector # Is the row of the original dataframe part of the estimation sample?
augmentdf::DataFrame
fekeys::Vector{Symbol}
loglikelihood::Float64
nullloglikelihood::Float64
distribution::Distribution
link::GLM.Link
coefnames::Vector # Name of coefficients
yname::Union{String, Symbol} # Name of dependent variable
formula::FormulaTerm # Original formula
formula_schema
nobs::Int64 # Number of observations
dof::Int64 # Degrees of freedom: nparams (including fixed effects)
dof_residual::Int64 # nobs - degrees of freedoms, adjusted for clustering
deviance::Float64 # Deviance of the fitted model
nulldeviance::Float64 # Null deviance, i.e. deviance of model with constant only
gradient::Matrix{Float64} # concentrated gradient
hessian::Matrix{Float64} # concentrated hessian
end
FixedEffectModels.has_fe(x::GLFixedEffectModel) = FixedEffectModels.has_fe(x.formula)
# Check API at https://github.com/JuliaStats/StatsBase.jl/blob/65351de819ca64941cb81c047e4b77157446f7c5/src/statmodels.jl
# fields
StatsAPI.coef(x::GLFixedEffectModel) = x.coef
StatsAPI.coefnames(x::GLFixedEffectModel) = x.coefnames
StatsAPI.responsename(x::GLFixedEffectModel) = string(x.yname)
StatsAPI.vcov(x::GLFixedEffectModel) = x.vcov
StatsAPI.nobs(x::GLFixedEffectModel) = x.nobs
StatsAPI.dof(x::GLFixedEffectModel) = x.dof
StatsAPI.dof_residual(x::GLFixedEffectModel) = x.dof_residual
StatsAPI.islinear(x::GLFixedEffectModel) = (x.link == IdentityLink() ? true : false)
StatsAPI.deviance(x::GLFixedEffectModel) = x.deviance
StatsAPI.nulldeviance(x::GLFixedEffectModel) = x.nulldeviance
pseudo_r2(x::GLFixedEffectModel) = r2(x, :McFadden)
pseudo_adjr2(x::GLFixedEffectModel) = adjr2(x, :McFadden)
function StatsAPI.confint(x::GLFixedEffectModel, level::Real = 0.95)
scale = quantile(Normal(), 1. - (1. - level)/2.)
se = stderror(x)
hcat(x.coef - scale * se, x.coef + scale * se)
end
StatsAPI.loglikelihood(m::GLFixedEffectModel) = m.loglikelihood
StatsAPI.nullloglikelihood(m::GLFixedEffectModel) = m.nullloglikelihood
glfe_loglik_obs(dist, y, ฮผ, wt, ฯ) = GLM.loglik_obs(dist, y, ฮผ, wt, ฯ)
# GLM loglik_obs for Binomial tries to convert y * wt to an Int, but in this package it is often
# the case that y is not an Int or like an Int
glfe_loglik_obs(::Binomial, y, ฮผ, wt, ฯ) = logpdf(Binomial(Int(wt), ฮผ), y * wt)
# TODO: check whether this is equal to x.gradient
StatsAPI.score(x::GLFixedEffectModel) = error("score is not yet implemented for $(typeof(x)).")
function StatsAPI.predict(x::GLFixedEffectModel)
("mu" โ names(x.augmentdf)) && error("Predicted response `mu` has not been saved. Run nlreg with :mu included in the keyword vector `save`.")
x.augmentdf.mu
end
function StatsAPI.predict(x::GLFixedEffectModel, df::AbstractDataFrame)
error("predict is not yet implemented for $(typeof(x)).")
end
function StatsAPI.residuals(x::GLFixedEffectModel)
("residuals" โ names(x.augmentdf)) && error("Residuals have not been saved. Run nlreg with :fe included in the keyword vector `save`.")
x.augmentdf.residuals
end
# predict, residuals, modelresponse
# function StatsBase.predict(x::FixedEffectModel, df::AbstractDataFrame)
# has_fe(x) && throw("predict is not defined for fixed effect models. To access the fixed effects, run `reg` with the option save = true, and access fixed effects with `fe()`")
# cols, nonmissings = StatsModels.missing_omit(StatsModels.columntable(df), MatrixTerm(x.formula_schema.rhs))
# new_x = modelmatrix(x.formula_schema, cols)
# if all(nonmissings)
# out = new_x * x.coef
# else
# out = Vector{Union{Float64, Missing}}(missing, size(df, 1))
# out[nonmissings] = new_x * x.coef
# end
# return out
# end
# function StatsBase.residuals(x::FixedEffectModel, df::AbstractDataFrame)
# if !has_fe(x)
# cols, nonmissings = StatsModels.missing_omit(StatsModels.columntable(df), x.formula_schema)
# new_x = modelmatrix(x.formula_schema, cols)
# y = response(x.formula_schema, df)
# if all(nonmissings)
# out = y - new_x * x.coef
# else
# out = Vector{Union{Float64, Missing}}(missing, size(df, 1))
# out[nonmissings] = y - new_x * x.coef
# end
# return out
# else
# size(x.augmentdf, 2) == 0 && throw("To access residuals in a fixed effect regression, run `reg` with the option save = true, and then access residuals with `residuals()`")
# residuals(x)
# end
# end
# function StatsBase.residuals(x::FixedEffectModel)
# !has_fe(x) && throw("To access residuals, use residuals(x, df::AbstractDataFrame")
# x.augmentdf.residuals
# end
function FixedEffectModels.fe(x::GLFixedEffectModel)
!has_fe(x) && throw("fe() is not defined for fixed effect models without fixed effects")
x.augmentdf[!, Symbol.( "fe_" .* String.(x.fekeys))]
end
function title(x::GLFixedEffectModel)
return "Generalized Linear Fixed Effect Model"
end
function top(x::GLFixedEffectModel)
# make distribution and link a bit nicer
dist = string(typeof(x.distribution))
m = match(r"\w*",dist)
if !isnothing(m)
dist = m.match
end
link = string(typeof(x.link))
m = match(r"\w*",link)
if !isnothing(m)
link = m.match
end
out = [
"Distribution" sprint(show, dist, context = :compact => true);
"Link" sprint(show, link, context = :compact => true);
"Number of obs" sprint(show, nobs(x), context = :compact => true);
"Degrees of freedom" sprint(show, nobs(x) - dof_residual(x), context = :compact => true);
"Deviance" format_scientific(deviance(x));
"Pseudo-R2" format_scientific(pseudo_r2(x));
"Pseudo-Adj. R2" format_scientific(pseudo_adjr2(x));
]
if has_fe(x)
out = vcat(out,
["Iterations" sprint(show, x.iterations, context = :compact => true);
"Converged" sprint(show, x.converged, context = :compact => true);
])
end
return out
end
function Base.show(io::IO, x::GLFixedEffectModel)
show(io, coeftable(x))
end
function StatsAPI.coeftable(x::GLFixedEffectModel)
ctitle = title(x)
ctop = top(x)
cc = coef(x)
se = stderror(x)
coefnms = coefnames(x)
conf_int = confint(x)
# put (intercept) last
if !isempty(coefnms) && ((coefnms[1] == Symbol("(Intercept)")) || (coefnms[1] == "(Intercept)"))
newindex = vcat(2:length(cc), 1)
cc = cc[newindex]
se = se[newindex]
conf_int = conf_int[newindex, :]
coefnms = coefnms[newindex]
end
tt = cc ./ se
CoefTable2(
hcat(cc, se, tt, ccdf.(Ref(FDist(1, dof_residual(x))), abs2.(tt)), conf_int[:, 1:2]),
["Estimate","Std.Error","t value", "Pr(>|t|)", "Lower 95%", "Upper 95%" ],
["$(coefnms[i])" for i = 1:length(cc)], 4, ctitle, ctop)
end
##############################################################################
##
## Display Result
##
##############################################################################
## Coeftable2 is a modified Coeftable allowing for a top String matrix displayed before the coefficients.
## Pull request: https://github.com/JuliaStats/StatsBase.jl/pull/119
struct CoefTable2
mat::Matrix
colnms::Vector
rownms::Vector
pvalcol::Integer
title::AbstractString
top::Matrix{AbstractString}
function CoefTable2(mat::Matrix,colnms::Vector,rownms::Vector,pvalcol::Int=0,
title::AbstractString = "", top::Matrix = Any[])
nr,nc = size(mat)
0 <= pvalcol <= nc || throw("pvalcol = $pvalcol should be in 0,...,$nc]")
length(colnms) in [0,nc] || throw("colnms should have length 0 or $nc")
length(rownms) in [0,nr] || throw("rownms should have length 0 or $nr")
length(top) == 0 || (size(top, 2) == 2 || throw("top should have 2 columns"))
new(mat,colnms,rownms,pvalcol, title, top)
end
end
## format numbers in the p-value column
function format_scientific(pv::Number)
return @sprintf("%.3f", pv)
end
function Base.show(io::IO, ct::CoefTable2)
mat = ct.mat; nr,nc = size(mat); rownms = ct.rownms; colnms = ct.colnms;
pvc = ct.pvalcol; title = ct.title; top = ct.top
if length(rownms) == 0
rownms = AbstractString[lpad("[$i]",floor(Integer, log10(nr))+3) for i in 1:nr]
end
if length(rownms) > 0
rnwidth = max(4,maximum([length(nm) for nm in rownms]) + 1)
else
# if only intercept, rownms is empty collection, so previous would return error
rnwidth = 4
end
rownms = [rpad(nm,rnwidth) for nm in rownms]
widths = [length(cn)::Int for cn in colnms]
str = [sprint(show, mat[i,j]; context=:compact => true) for i in 1:nr, j in 1:nc]
if pvc != 0 # format the p-values column
for i in 1:nr
str[i, pvc] = format_scientific(mat[i, pvc])
end
end
for j in 1:nc
for i in 1:nr
lij = length(str[i, j])
if lij > widths[j]
widths[j] = lij
end
end
end
widths .+= 1
totalwidth = sum(widths) + rnwidth
if length(title) > 0
halfwidth = div(totalwidth - length(title), 2)
println(io, " " ^ halfwidth * string(title) * " " ^ halfwidth)
end
if length(top) > 0
for i in 1:size(top, 1)
top[i, 1] = top[i, 1] * ":"
end
println(io, "=" ^totalwidth)
halfwidth = div(totalwidth, 2) - 1
interwidth = 2 + mod(totalwidth, 2)
for i in 1:(div(size(top, 1) - 1, 2)+1)
print(io, top[2*i-1, 1])
print(io, lpad(top[2*i-1, 2], halfwidth - length(top[2*i-1, 1])))
print(io, " " ^interwidth)
if size(top, 1) >= 2*i
print(io, top[2*i, 1])
print(io, lpad(top[2*i, 2], halfwidth - length(top[2*i, 1])))
end
println(io)
end
end
println(io,"=" ^totalwidth)
println(io," " ^ rnwidth *
join([lpad(string(colnms[i]), widths[i]) for i = 1:nc], ""))
println(io,"-" ^totalwidth)
for i in 1:nr
print(io, rownms[i])
for j in 1:nc
print(io, lpad(str[i,j],widths[j]))
end
println(io)
end
println(io,"=" ^totalwidth)
end
##############################################################################
##
## Schema
##
##############################################################################
function StatsModels.apply_schema(t::FormulaTerm, schema::StatsModels.Schema, Mod::Type{GLFixedEffectModel}, has_fe_intercept)
schema = StatsModels.FullRank(schema)
if has_fe_intercept
push!(schema.already, InterceptTerm{true}())
end
FormulaTerm(apply_schema(t.lhs, schema.schema, StatisticalModel),
StatsModels.collect_matrix_terms(apply_schema(t.rhs, schema, StatisticalModel)))
end
| GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 1410 |
module GLFixedEffectModels
##############################################################################
##
## Dependencies
##
##############################################################################
using LinearAlgebra
using Statistics
using Printf
using FillArrays
using DataFrames
using Distributions
using Reexport
using FixedEffects
using LoopVectorization
using Vcov
using StatsBase
using StatsModels
using StatsAPI
@reexport using GLM
@reexport using FixedEffectModels
# not necessary to reexport StatsModels since it is reexported by FixedEffectModels
##############################################################################
##
## Exported methods and types
##
##############################################################################
export nlreg,
fe,
GLFixedEffectModel,
has_fe,
Vcov,
VcovData,
responsename,
bias_correction,
pseudo_r2,
pseudo_adjr2
##############################################################################
##
## Load files
##
##############################################################################
include("GLFixedEffectModel.jl")
include("utils/vcov.jl")
include("utils/fixedeffects.jl")
include("utils/basecol.jl")
include("utils/biascorr.jl")
include("fit.jl")
include("presolve.jl")
# precompile script
df = DataFrame(y = rand(10), x = 1:10, id = repeat([1, 2], 5))
nlreg(df, @formula(y ~ x + fe(id)), Binomial(), GLM.LogitLink())
end
| GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 26683 | """
Estimate a generalized linear model with high dimensional categorical variables
### Arguments
* `df`: a Table
* `FormulaTerm`: A formula created using [`@formula`](@ref)
* `distribution`: A `Distribution`. See the documentation of [GLM.jl](https://juliastats.org/GLM.jl/stable/manual/#Fitting-GLM-models-1) for valid distributions.
* `link`: A `Link` function. See the documentation of [GLM.jl](https://juliastats.org/GLM.jl/stable/manual/#Fitting-GLM-models-1) for valid link functions.
* `CovarianceEstimator`: A method to compute the variance-covariance matrix
* `save::Vector{Symbol} = Symbol[]`: Should residuals/predictions/eta's/estimated fixed effects be saved in the dataframe `augmentdf`? Can contain any subset of `[:residuals,:eta,:mu,:fe]`.
* `method::Symbol`: A symbol for the method. Default is :cpu. Alternatively, :gpu requires `CuArrays`. In this case, use the option `double_precision = false` to use `Float32`.
* `contrasts::Dict = Dict()` An optional Dict of contrast codings for each categorical variable in the `formula`. Any unspecified variables will have `DummyCoding`.
* `maxiter::Integer = 1000`: Maximum number of iterations
* `maxiter_center::Integer = 10000`: Maximum number of iterations for centering procedure.
* `double_precision::Bool`: Should the demeaning operation use Float64 rather than Float32? Default to true.
* `dev_tol::Real` : Tolerance level for the first stopping condition of the maximization routine.
* `rho_tol::Real` : Tolerance level for the stephalving in the maximization routine.
* `step_tol::Real` : Tolerance level that accounts for rounding errors inside the stephalving routine
* `center_tol::Real` : Tolerance level for the stopping condition of the centering algorithm. Default to 1e-8 if `double_precision = true`, 1e-6 otherwise.
* `separation::Symbol = :none` : method to detect/deal with separation. Currently supported values are `:none`, `:ignore` and `:mu`. See readme for details.
* `separation_mu_lbound::Real = -Inf` : Lower bound for the Clarkson-Jennrich separation detection heuristic.
* `separation_mu_ubound::Real = Inf` : Upper bound for the Clarkson-Jennrich separation detection heuristic.
* `separation_ReLU_tol::Real = 1e-4` : Tolerance level for the ReLU algorithm.
* `separation_ReLU_maxiter::Integer = 1000` : Maximal number of iterations for the ReLU algorithm.
### Examples
```julia
using GLM, RDatasets, Distributions, Random, GLFixedEffectModels
rng = MersenneTwister(1234)
df = dataset("datasets", "iris")
df.binary = 0.0
df[df.SepalLength .> 5.0,:binary] .= 1.0
df.SpeciesDummy = categorical(df.Species)
m = @formula binary ~ SepalWidth + fe(SpeciesDummy)
x = nlreg(df, m, Binomial(), GLM.LogitLink() , start = [0.2] )
```
"""
function nlreg(@nospecialize(df),
@nospecialize(formula::FormulaTerm),
distribution::Distribution,
link::GLM.Link,
@nospecialize(vcov::CovarianceEstimator = Vcov.simple());
@nospecialize(weights::Union{Symbol, Nothing} = nothing),
@nospecialize(subset::Union{AbstractVector, Nothing} = nothing),
@nospecialize(start::Union{AbstractVector, Nothing} = nothing),
maxiter_center::Integer = 10000, # maximum number of iterations in pseudo-demeaning
maxiter::Integer = 1000, # maximum number of iterations
contrasts::Dict = Dict{Symbol, Any}(),
dof_add::Integer = 0,
save::Vector{Symbol} = Symbol[],
method::Symbol = :cpu,
nthreads::Integer = method == :cpu ? Threads.nthreads() : 256,
drop_singletons = true,
double_precision::Bool = true,
dev_tol::Real = 1.0e-8, # tolerance level for the first stopping condition of the maximization routine.
rho_tol::Real = 1.0e-8, # tolerance level for the stephalving in the maximization routine.
step_tol::Real = 1.0e-8, # tolerance level that accounts for rounding errors inside the stephalving routine
center_tol::Real = double_precision ? 1e-8 : 1e-6, # tolerance level for the stopping condition of the centering algorithm.
separation::Vector{Symbol} = Symbol[], # method to detect and/or deal with separation
separation_mu_lbound::Real = -Inf,
separation_mu_ubound::Real = Inf,
separation_ReLU_tol::Real = 1e-4,
separation_ReLU_maxiter::Integer = 100,
@nospecialize(vcovformula::Union{Symbol, Expr, Nothing} = nothing),
@nospecialize(subsetformula::Union{Symbol, Expr, Nothing} = nothing),
verbose::Bool = false # Print output on each iteration.
)
df = DataFrame(df; copycols = false)
# to deprecate
if vcovformula != nothing
if (vcovformula == :simple) | (vcovformula == :(simple()))
vcov = Vcov.Simple()
elseif (vcovformula == :robust) | (vcovformula == :(robust()))
vcov = Vcov.Robust()
else
vcov = Vcov.cluster(StatsModels.termvars(@eval(@formula(0 ~ $(vcovformula.args[2]))))...)
end
end
if subsetformula != nothing
subset = eval(evaluate_subset(df, subsetformula))
end
if method == :cpu && nthreads > Threads.nthreads()
@warn "Keyword argument nthreads = $(nthreads) is ignored (Julia was started with only $(Threads.nthreads()) threads)."
nthreads = Threads.nthreads()
end
##############################################################################
##
## Parse formula
##
##############################################################################
formula_origin = formula
if !StatsModels.omitsintercept(formula) & !StatsModels.hasintercept(formula)
formula = StatsModels.FormulaTerm(formula.lhs, StatsModels.InterceptTerm{true}() + formula.rhs)
end
formula, formula_endo, formula_iv = FixedEffectModels.parse_iv(formula)
has_iv = formula_iv != StatsModels.FormulaTerm(ConstantTerm(0), ConstantTerm(0))
has_weights = weights !== nothing
if has_iv
error("Instrumental variables are not allowed.")
end
if has_weights
@warn "Weights are not implemented yet, will be ignored."
end
##############################################################################
##
## Save keyword argument
##
##############################################################################
save_residuals = (:residuals โ save)
##############################################################################
##
## Construct new dataframe after removing missing values
##
##############################################################################
# create a dataframe without missing values & negative weights
vars = StatsModels.termvars(formula)
all_vars = unique(vars)
# TODO speedup: this takes 4.8k
esample = completecases(df, all_vars)
# if has_weights
# esample .&= BitArray(!ismissing(x) & (x > 0) for x in df[!, weights])
# end
if subset != nothing
if length(subset) != size(df, 1)
throw("df has $(size(df, 1)) rows but the subset vector has $(length(subset)) elements")
end
esample .&= BitArray(!ismissing(x) && x for x in subset)
end
esample .&= Vcov.completecases(df, vcov)
formula, formula_fes = FixedEffectModels.parse_fe(formula)
has_fes = formula_fes != FormulaTerm(ConstantTerm(0), ConstantTerm(0))
fes, ids, fekeys = FixedEffectModels.parse_fixedeffect(df, formula_fes)
has_fe_intercept = any(fe.interaction isa UnitWeights for fe in fes)
# remove intercept if absorbed by fixed effects
if has_fe_intercept
formula = FormulaTerm(formula.lhs, tuple(InterceptTerm{false}(), (term for term in FixedEffectModels.eachterm(formula.rhs) if !isa(term, Union{ConstantTerm,InterceptTerm}))...))
end
has_intercept = hasintercept(formula)
if has_fes
if drop_singletons
before_n = sum(esample)
for fe in fes
FixedEffectModels.drop_singletons!(esample, fe)
end
after_n = sum(esample)
dropped_n = before_n - after_n
if dropped_n > 0
@info "$(dropped_n) observations detected as singletons. Dropping them ..."
end
end
else
error("No fixed effect specified. Use GLM.jl for the estimation of generalized linear models without fixed effects.")
end
save_fe = (:fe โ save) & has_fes
##############################################################################
##
## Dataframe --> Matrix
##
##############################################################################
exo_vars = unique(StatsModels.termvars(formula))
subdf = Tables.columntable((; (x => disallowmissing(view(df[!, x], esample)) for x in exo_vars)...))
formula_schema = apply_schema(formula, schema(formula, subdf, contrasts), GLFixedEffectModel, has_fe_intercept)
# Obtain y
# for a Vector{Float64}, conver(Vector{Float64}, y) aliases y
y = convert(Vector{Float64}, response(formula_schema, subdf))
oldy = deepcopy(response(formula_schema, df))
#y = y[esample]
all(isfinite, y) || throw("Some observations for the dependent variable are infinite")
# Obtain X
Xexo = convert(Matrix{Float64}, modelmatrix(formula_schema, subdf))
oldX = deepcopy(Xexo)
#Xexo = Xexo[esample,:]
all(isfinite, Xexo) || throw("Some observations for the exogeneous variables are infinite")
basecoef = trues(size(Xexo,2)) # basecoef contains the information of the dropping of the regressors.
# while esample contains the information of the dropping of the observations.
response_name, coef_names = coefnames(formula_schema)
if !(coef_names isa Vector)
coef_names = typeof(coef_names)[coef_names]
end
# Weights are currently not implemented
# if has_weights
# weights = Weights(convert(Vector{Float64}, view(df, esample, weights)))
# else
# weights = Weights(Ones{Float64}(sum(esample)))
# end
# all(isfinite, values(weights)) || throw("Weights are not finite")
########################################################################
##
## Presolve:
## Step 1. Check Multicollinearity among X.
## Step 2. Check Separation
##
########################################################################
# construct fixed effects object and solver
fes = FixedEffect[_subset(fe, esample) for fe in fes]
# pre separation detection check for collinearity
Xexo, basecoef = detect_linear_dependency_among_X!(Xexo, basecoef; coefnames=coef_names)
#####################################################################
##
## checking separation is basically looking for
#####################################################################
if :simplex โ separation
@warn "simplex not implemented, will ignore."
end
if link isa LogLink
if :fe โ separation
esample, y, Xexo, fes = detect_sep_fe!(esample, y, Xexo, fes; sep_at = 0)
end
if :ReLU โ separation
esample, y, Xexo, fes = detect_sep_relu!(esample, y, Xexo, fes;
double_precision = double_precision,
dtol = center_tol, dmaxiter = maxiter,
rtol = separation_ReLU_tol, rmaxiter = separation_ReLU_maxiter,
method = method, verbose = verbose,
)
end
elseif link isa Union{ProbitLink, LogitLink}
@assert all(0 .<= y .<= 1) "Dependent variable is not in the domain of the link function."
if :fe โ separation
esample, y, Xexo, fes = detect_sep_fe!(esample, y, Xexo, fes; sep_at = 0)
esample, y, Xexo, fes = detect_sep_fe!(esample, y, Xexo, fes; sep_at = 1)
end
if :ReLU โ separation
@warn "ReLU separation detection for ProbitLink/LogitLink is expermental, please interpret with caution."
esample, y, Xexo, fes = detect_sep_relu!(esample, y, Xexo, fes;
double_precision = double_precision,
dtol = center_tol, dmaxiter = maxiter,
rtol = separation_ReLU_tol, rmaxiter = separation_ReLU_maxiter,
method = method, verbose = verbose,
)
esample, y, Xexo, fes = detect_sep_relu!(esample, 1 .- y[:], Xexo, fes;
double_precision = double_precision,
dtol = center_tol, dmaxiter = maxiter,
rtol = separation_ReLU_tol, rmaxiter = separation_ReLU_maxiter,
method = method, verbose = verbose,
)
y = 1 .- y
end
else
@warn "Link function type $(typeof(link)) not support for ReLU separation detection. Skip separation detection."
end
# post separation detection check for collinearity
Xexo, basecoef = detect_linear_dependency_among_X!(Xexo, basecoef; coefnames=coef_names)
weights = Weights(Ones{Float64}(sum(esample)))
feM = AbstractFixedEffectSolver{double_precision ? Float64 : Float32}(fes, weights, Val{method}, nthreads)
# make one copy after deleting NAs + dropping singletons + detecting separations (fe + relu)
nobs = sum(esample)
(nobs > 0) || throw("sample is empty")
# compute tss now before potentially demeaning y
tss_total = FixedEffectModels.tss(y, has_intercept | has_fe_intercept, weights)
# Compute data for std errors
vcov_method = Vcov.materialize(view(df, esample, :), vcov) # is earlier in fixedeffectmodels
# mark this as the start of a rerun when collinearity among X and fe is detected, rerun from here.
@label rerun
coeflength = sum(basecoef)
if start !== nothing
(length(start) == coeflength) || error("Invalid length of `start` argument.")
beta = start
else
# beta = zeros(double_precision ? Float64 : Float32, coeflength)
beta = 0.1 .* ones(double_precision ? Float64 : Float32, coeflength)
end
#Xexo = oldX[esample,:]
Xexo = GLFixedEffectModels.getcols(Xexo, basecoef) # get Xexo from oldX and basecoef and esample
eta = Xexo * beta
mu = GLM.linkinv.(Ref(link),eta)
wt = ones(double_precision ? Float64 : Float32, nobs, 1)
dev = sum(devresid.(Ref(distribution), y, mu))
nulldev = sum(devresid.(Ref(distribution), mean(y), mu))
Xhat = Xexo
crossx = Matrix{double_precision ? Float64 : Float32}(undef, nobs, 0)
residuals = y[:] # just for initialization
# Stuff that we need in outside scope
emp = Array{double_precision ? Float64 : Float32,2}(undef,2,2)
score = hessian = emp
outer_iterations = 0
outer_converged = false
for i = 1:maxiter
verbose && println("Iteration $(i)")
# Compute IWLS weights and dependent variable
mymueta = GLM.mueta.(Ref(link),eta)
# Check for separation
# use the bounds to detect
min_mueta = minimum(mymueta)
max_mueta = maximum(mymueta)
min_mu = minimum(mu)
max_mu = maximum(mu)
if (min_mueta < separation_mu_lbound) | (max_mueta > separation_mu_ubound) | (min_mu < separation_mu_lbound) | (max_mu > separation_mu_ubound)
problematic = ((mymueta .< separation_mu_lbound) .| (mymueta .> separation_mu_ubound) .| (mu .< separation_mu_lbound) .| (mu .> separation_mu_ubound))
@warn "$(sum(problematic)) observation(s) exceed the lower or upper bounds. Likely reason is statistical separation."
# deal with it
if :mu โ separation
mymueta[mymueta .< separation_mu_lbound] .= separation_mu_lbound
mymueta[mymueta .> separation_mu_ubound] .= separation_mu_ubound
mu[mu .< separation_mu_lbound] .= separation_mu_lbound
mu[mu .> separation_mu_ubound] .= separation_mu_ubound
end
# The following would remove the observations that are outside of the bounds, and restarts the estimation.
# Inefficient.
# if separation == :restart
# df_new = df[setdiff(1:size(df,1),indices),:]
# println("Separation detected. Restarting...")
# return nlreg(df_new,formula_origin,distribution,link,vcov,
# weights=nothing,subset=subset,start=beta,maxiter_center=maxiter_center, maxiter=maxiter,
# contrasts=contrasts,dof_add=dof_add,save=save,
# method=method,drop_singletons=drop_singletons,double_precision=double_precision,
# dev_tol=dev_tol, rho_tol=rho_tol, step_tol=step_tol, center_tol=center_tol,
# vcovformula=vcovformula,subsetformula=subsetformula,verbose=verbose)
# end
end
wtildesq = mymueta.*mymueta ./ GLM.glmvar.(Ref(distribution),mu)
nu = (y - mu) ./ mymueta
# make a copy of nu because it's being changed by solve_residuals!
nu_orig = deepcopy(nu)
# Update weights and FixedEffectSolver object
weights = Weights(wtildesq)
all(isfinite, weights) || throw("IWLS Weights are not finite. Possible reason is separation.")
sqrtw = sqrt.(weights)
FixedEffects.update_weights!(feM, weights)
# # Pseudo-demean variables
iterations = Int[]
convergeds = Bool[]
nudemean, b, c = FixedEffects.solve_residuals!(nu, feM; maxiter = maxiter_center, tol = center_tol)
append!(iterations, b)
append!(convergeds, c)
Xdemean, b, c = FixedEffects.solve_residuals!(Xexo, feM; maxiter = maxiter_center, tol = center_tol)
append!(iterations, b)
append!(convergeds, c)
# to obtain the correct projections, we need to weigh the demeaned nu and X
nudemean = sqrtw .* nudemean
Xdemean = Diagonal(sqrtw) * Xdemean
iterations = maximum(iterations)
converged = all(convergeds)
if converged == false
@warn "Convergence of annihilation procedure not achieved in $(iterations) iterations; try increasing maxiter_center or decreasing center_tol."
end
basecolXexo = GLFixedEffectModels.basecol(Xdemean)
if all(basecolXexo)
else
remaining_cols = findall(basecoef)
regressor_ind_to_be_dropped = remaining_cols[.~basecolXexo]
basecoef[regressor_ind_to_be_dropped] .= 0 # update basecoef
# throw info
@info "Multicollinearity detected among columns of X and FixedEffects. Dropping regressors: $(join(coef_names[regressor_ind_to_be_dropped]," "))"
@goto rerun
end
Xexo2 = GLFixedEffectModels.getcols(Xdemean, basecolXexo)
Xhat = Xexo2
crossx = cholesky!(Symmetric(Xhat' * Xhat))
beta_update = crossx \ (Xhat' * nudemean)
# # Update \eta
eta_update = nu_orig - (nudemean - Xdemean * beta_update) ./ sqrtw
verbose && println("Old dev: $dev")
devold = dev
rho = 1.0
while true
mu = GLM.linkinv.(Ref(link),eta + rho .* eta_update)
dev = sum(GLM.devresid.(Ref(distribution), y, mu))
verbose && println("dev = $(dev)")
if !isinf(dev) && dev <= devold
eta = eta + rho .* eta_update
beta = beta + rho .* beta_update
verbose && println("beta = $(beta)")
residuals = y - mu
break
end
rho = rho / 2.0
if rho < rho_tol
error("Backtracking failed.")
end
end
if ((devold - dev)/dev < dev_tol)
verbose && println("Iter $i : converged (deviance)")
outer_converged = true
end
if (norm(beta_update) < step_tol )
verbose && println("Iter $i : converged (step size)")
outer_converged = true
end
if outer_converged
# # Compute concentrated Score and Hessian
score = Xdemean .* nudemean
hessian = Symmetric(Xdemean' * Xdemean)
outer_iterations = i
if verbose
println("Xdemean")
display(Xdemean .* nudemean)
display(Xhat .* nu)
end
# if dev > nulldev
# @warn "Final deviance exceeds null deviance. Possibly running into a local maximum. Try restarting with a different starting guess."
# end
break
else
verbose && println("Iter $i : not converged. ฮdev = $((devold - dev)/dev), ||ฮฮฒ|| = $(norm(beta_update))")
verbose && println("---------------------------------")
end
if i == maxiter
@warn "Convergence not achieved in $(i) iterations; try increasing maxiter or dev_tol."
outer_iterations = maxiter
end
end
coef = beta
##############################################################################
##
## Optionally save objects in a new dataframe
##
##############################################################################
augmentdf = DataFrame()
if save_residuals
if nobs < length(esample)
augmentdf.residuals = Vector{Union{double_precision ? Float64 : Float32, Missing}}(missing, length(esample))
augmentdf[esample, :residuals] = residuals
else
augmentdf[!, :residuals] = residuals
end
end
if save_fe
oldX = oldX[esample,:]
oldX = getcols(oldX, basecoef)
# update FixedEffectSolver
weights = Weights(Ones{double_precision ? Float64 : Float32}(sum(esample)))
feM = AbstractFixedEffectSolver{double_precision ? Float64 : Float32}(fes, weights, Val{method})
newfes, b, c = solve_coefficients!(eta - oldX * coef, feM; tol = center_tol, maxiter = maxiter_center)
for fekey in fekeys
augmentdf[!, fekey] = df[:, fekey]
end
for j in 1:length(fes)
if nobs < length(esample)
augmentdf[!, ids[j]] = Vector{Union{double_precision ? Float64 : Float32, Missing}}(missing, length(esample))
augmentdf[esample, ids[j]] = newfes[j]
else
augmentdf[!, ids[j]] = newfes[j]
end
end
end
if :mu โ save
if nobs < length(esample)
augmentdf.mu = Vector{Union{double_precision ? Float64 : Float32, Missing}}(missing, length(esample))
augmentdf[esample, :mu] = mu
else
augmentdf[!, :mu] = mu
end
end
if :eta โ save
if nobs < length(esample)
augmentdf.eta = Vector{Union{double_precision ? Float64 : Float32, Missing}}(missing, length(esample))
augmentdf[esample, :eta] = eta
else
augmentdf[!, :eta] = eta
end
end
##############################################################################
##
## Test Statistics
##
##############################################################################
# Compute degrees of freedom
dof_absorb = 0
dof_coef_and_fe = sum(basecoef) + dof_add - 1 # -1 for the constant
if has_fes
for fe in fes
# adjust degree of freedom only if fe is not fully nested in a cluster variable:
if (vcov isa Vcov.ClusterCovariance) && any(FixedEffectModels.isnested(fe, v.groups) for v in values(vcov_method.clusters))
dof_absorb += 1 # if fe is nested you still lose 1 degree of freedom
else
#only count groups that exists
dof_absorb += FixedEffectModels.nunique(fe)
end
dof_coef_and_fe = dof_coef_and_fe + FixedEffectModels.nunique(fe)
end
end
_n_coefs = sum(basecoef) + dof_absorb + dof_add
dof_residual_ = max(1, nobs - _n_coefs)
nclusters = nothing
if vcov isa Vcov.ClusterCovariance
nclusters = Vcov.nclusters(vcov_method)
end
resid_vcov = if size(score, 2) >= 1
score[:, 1] ./ Xhat[:, 1]
else
residuals
end
vcov_data = VcovDataGLM(Xhat, crossx, inv(crossx), resid_vcov, dof_residual_)#, hessian)
# hessian is unnecessary since in all cases vcov takes the inv(cholesky(hessian)) which is the same as inv(crossx)
"""
This option works if purely using Vcov.jl:
if vcov isa Vcov.ClusterCovariance
vcov_data = Vcov.VcovData(Xhat, crossx, score[:, 1] ./ Xhat[:, 1], dof_residual_)
elseif vcov isa Vcov.RobustCovariance
vcov_data = Vcov.VcovData(Xhat, crossx, score[:, 1] ./ Xhat[:, 1], nobs)
else
vcov_data = Vcov.VcovData(Xhat, crossx, ones(dof_residual_), dof_residual_)
end
"""
# Compute standard error
matrix_vcov = StatsAPI.vcov(vcov_data, vcov_method)
oldy = oldy[esample]
# would need to change if weights are added
ฯ_ll = dev/length(oldy)
ll = sum(glfe_loglik_obs.(Ref(distribution), oldy, mu, 1, ฯ_ll))
ฯ_nll = nulldev/length(oldy)
mu_nll = has_intercept || has_fes ? mean(oldy) : linkinv(link, zero(eltype(oldy))/1)
null_ll = sum(glfe_loglik_obs.(Ref(distribution), oldy, mu_nll, 1, ฯ_nll))
##############################################################################
##
## Return regression result
##
##############################################################################
# add omitted variables
if !all(basecoef)
newcoef = zeros(length(basecoef))
newmatrix_vcov = fill(NaN, (length(basecoef), length(basecoef)))
newindex = [searchsortedfirst(cumsum(basecoef), i) for i in 1:length(coef)]
for i in eachindex(newindex)
newcoef[newindex[i]] = coef[i]
for j in eachindex(newindex)
newmatrix_vcov[newindex[i], newindex[j]] = matrix_vcov[i, j]
end
end
coef = newcoef
matrix_vcov = newmatrix_vcov
end
return GLFixedEffectModel(coef, # Vector of coefficients
matrix_vcov, # Covariance matrix
vcov, # CovarianceEstimator
nclusters,
outer_iterations,
outer_converged,
esample,
augmentdf,
fekeys,
ll,
null_ll,
distribution,
link,
coef_names,
response_name,
formula_origin, # Original formula
formula_schema,
nobs, # Number of observations
dof_coef_and_fe, # Number of coefficients
dof_residual_, # nobs - degrees of freedoms
dev, # Deviance of the fitted model
nulldev, # null deviance
score, # concentrated gradient
hessian # concentrated hessian
)
end
| GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 5571 | function detect_sep_fe!(esample::BitVector, y::Vector{<: Real}, Xexo::Matrix{<: Real}, fes::AbstractVector{<: FixedEffect}; sep_at::Real=0)
# update esample, y, Xexo and fes
before_n = sum(esample)
sub_esample = trues(before_n)
@assert before_n == length(y) "esample and y have different length."
for fe in fes
cache = trues(fe.n)
level_visited = falses(fe.n)
@assert before_n == length(fe.refs) "esample and fe have different length."
for i in 1:before_n
# for cache = true finally, all y[i] must equal to sep_at
if y[i] == sep_at
cache[fe.refs[i]] *= true
else
cache[fe.refs[i]] *= false
end
level_visited[fe.refs[i]] = true
end
for i in 1:before_n
# if all y in this ref are equal to sep_at, exclude these ys
if cache[fe.refs[i]] & level_visited[fe.refs[i]]
sub_esample[i] = false
end
end
end
after_n = sum(sub_esample)
dropped_n = before_n - after_n
if dropped_n > 0
@info "$(dropped_n) observations detected as separated using the FE method. Dropping them ..."
end
# drop them in esample
remaining = findall(esample)
esample[remaining[.~sub_esample]] .= false
# drop them in fe
fes = FixedEffect[_subset(fe, sub_esample) for fe in fes]
return esample, y[sub_esample], Xexo[sub_esample, :], fes
end
function detect_sep_simplex(y::Vector{<: Real}, Xexo::Matrix{<: Real}, fes::AbstractVector{<: FixedEffect})
end
function detect_sep_relu!(esample::BitVector, y::Vector{<: Real}, Xexo::Matrix{<: Real}, fes::AbstractVector{<: FixedEffect};
double_precision::Bool = true,
dtol::Real = sqrt(eps(double_precision ? Float64 : Float32)), # tol for solve_coefficients and solve_residuals
dmaxiter::Integer = 100_000, # maxiter for solve_coefficients and solve_residuals
rtol::Real = 1e-3, # tol for ReLU
rmaxiter::Integer = 1000, # maxiter for ReLU
method::Symbol = :cpu,
verbose::Bool = false
)
verbose && @info "identifying separations using ReLU."
@assert all(GLFixedEffectModels.basecol(Xexo)) "There are Multicollinearity in the data, this should be done with before running ReLU."
before_n = sum(esample)
@assert before_n == length(y)
Xexo_copy = deepcopy(Float64.(Xexo))
u = (y.==0)
K = sum(u) / rtol^2
w = (y.>0) .* K + .~(y.>0)
weights = Weights(w)
feM = AbstractFixedEffectSolver{double_precision ? Float64 : Float32}(fes, weights, Val{method})
outer_converged = false
for iter in 1:rmaxiter
iterations = Int[]
convergeds = Bool[]
verbose && println("* iter $(iter)")
Xexo = deepcopy(Xexo_copy)
Xexo, b, c = solve_residuals!(Xexo, feM; tol = dtol, maxiter = dmaxiter)
Xexo = sqrt.(w) .* Xexo
u = sqrt.(w) .* u
append!(iterations, b)
append!(convergeds, c)
crossx = cholesky!(Symmetric(Xexo' * Xexo))
beta = crossx \ (Xexo' * u)
xb = Xexo_copy * beta
# something is wrong
newfes, b, c = solve_coefficients!(u - xb, feM; tol = dtol, maxiter = dmaxiter)
append!(iterations, b)
append!(convergeds, c)
iterations = maximum(iterations)
converged = all(convergeds)
if converged == false
@warn "Convergence of annihilation procedure not achieved in $(iterations) iterations; try increasing dmaxiter or decreasing dtol."
@warn "Cannot identify separated obs because lsmr wasn't solved properly. Skipping ..."
return esample, y, Xexo_copy, fes
end
xbd = sum(newfes) + xb
xbd[abs.(xbd) .< rtol] .= 0
if all(xbd.>=0)
outer_converged = true
is_sep = xbd .> 0
@info "$(sum(is_sep)) observations detected as separated using the ReLU method. Dropping them ..."
sub_esample = .~is_sep
# drop them in esample
remaining = findall(esample)
esample[remaining[is_sep]] .= false
# drop them in fe
fes = FixedEffect[GLFixedEffectModels._subset(fe, sub_esample) for fe in fes]
# drop them in y
y = y[sub_esample]
# drop them in Xexo_copy
Xexo_copy = Xexo_copy[sub_esample,:]
return esample, y, Xexo_copy, fes
else
verbose && println("negative xbd: $(sum(xbd.<0))")
end
u = xbd
u[u .< 0] .= 0
end
if ~outer_converged
@warn "cannot identify separated obs. Maximal iteration reached. Skipping ..."
return esample, y, Xexo_copy, fes
end
end
function detect_linear_dependency_among_X!(X::Matrix{<: Real}, basecoef::BitVector; coefnames::Vector)
# assert this is a model without IV or weights, this code is written at [email protected]
before_n = sum(basecoef)
@assert before_n == size(X,2) "Dimension of basecoef and X doesn't match"
base = GLFixedEffectModels.basecol(X)
if all(base)
return X, basecoef
else
X = GLFixedEffectModels.getcols(X, base)
remaining_cols = findall(basecoef)
regressor_ind_to_be_dropped = remaining_cols[.~base]
basecoef[regressor_ind_to_be_dropped] .= 0 # update basecoef
@info "Multicollinearity detected among columns of X. Dropping regressors: $(join(coefnames[regressor_ind_to_be_dropped]," "))"
return X, basecoef
end
end
| GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 1611 |
##############################################################################
##
## Crossprod computes [A B C ...]' [A B C ...] without forming it
##
##############################################################################
crossprod(A::AbstractMatrix) = A'A
function crossprod(A::AbstractMatrix, B::AbstractMatrix, C::AbstractMatrix)
u11, u12, u13 = A'A, A'B, A'C
u22, u23 = B'B, B'C
u33 = C'C
hvcat(3, u11, u12, u13,
u12', u22, u23,
u13', u23', u33)
end
##############################################################################
##
## Returns base of [A B C ...]
##
## TODO: You could protect against roundoff error by using a controlled sum algorithm (similar to sum_kbn) to compute elements of X'X, then converting to BigFloat before factoring.
##
##
##############################################################################
# rank(A) == rank(A'A)
function basecol(X::AbstractMatrix...; factorization = :Cholesky)
@static if VERSION >= v"1.7"
cholm = cholesky!(Symmetric(crossprod(X...)), RowMaximum(); tol = -1, check = false)
else
cholm = cholesky!(Symmetric(crossprod(X...)), Val(true); tol = -1, check = false)
end
r = 0
if size(cholm, 1) > 0
r = sum(diag(cholm.factors) .> size(X[1],1)^2 * eps())
# used to be r = rank(cholm) but does not work wiht very high regressors at the same time as intercept
end
invpermute!(1:size(cholm, 1) .<= r, cholm.piv)
end
function getcols(X::AbstractMatrix, basecolX::AbstractVector)
sum(basecolX) == size(X, 2) ? X : X[:, basecolX]
end | GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 30696 | ##############################################
# model types #
##############################################
# (Binomial) all possible FEs in a ijt-structured data set:
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# panel_structure | ?-way | FE notation | need correction ? | is supported ? | corresponding literature
# -----------------+---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# | 2 | i + t | YES | YES | Fernรกndez-Val and Weidner (2016)
# +---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# classic | 2 | i + j (pseudo panel) | YES (?) | ? |
# +---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# | 3 | i + j + t | NO | NO | Fernรกndez-Val and Weidner (2018)
# -----------------+---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# | 2 | it + jt | YES (?) | YES | Hinz, Stammann and Wanner (2020) & Fernรกndez-Val and Weidner (2016)
# +---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# network | 2 | it + ij, jt + ij | ? | ? | ?
# +---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# | 2 | it + jt + ij | YES | YES | Hinz, Stammann and Wanner (2020)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#
# (Poisson) all possible FEs in a ijt-structured data set:
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# panel_structure | ?-way | FE notation | need correction ? | is supported ? | corresponding literature
# -----------------+---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# | 2 | i + t, j + t | NO | NO | Fernรกndez-Val and Weidner (2016)
# +---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# classic | 2 | i + j (pseudo panel) | ? | ? |
# +---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# | 3 | i + j + t | NO | NO | Fernรกndez-Val and Weidner (2018)
# -----------------+---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# | 2 | it + jt | YES (on standard error)| | Weidner and Zylkin (2020)
# +---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# network | 2 | it + ij, jt + ij | ? | ? | ?
# +---------+--------------------------+------------------------+----------------------+-------------------------------------------------------------------
# | 2 | it + jt + ij | YES | | Weidner and Zylkin (2020)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------
###################################################################
# Main Function bias_correction() #
###################################################################
"""
bias_correction(model::GLFixedEffectModel,df::DataFrame;i_symb::Union{Symbol,Nothing}=nothing,j_symb::Union{Symbol,Nothing}=nothing,t_symb::Union{Symbol,Nothing}=nothing,L::Int64=0,panel_structure::Symbol=:classic)
Asymptotic bias correction after fitting binary choice models with a two-/three-way error.
# Arguments
## Required Arguments
- `model::Integer`: a `GLFixedEffectModel` object which can be obtained by using `nlreg()`.
- `df::DataFrame`: the Data Frame on which you just run `nlreg()`.
## Optional Arguments
- `L:Int64`: choice of binwidth, see Hahn and Kuersteiner (2011). The default value is 0.
- `panel_structure`: choose from "classic" or "network". The default value is "classic".
- `i_symb`: the variable name for i index in the data frame `df`
- `j_symb`: the variable name for j index in the data frame `df`
- `t_symb`: the variable name for t index in the data frame `df`
# Available Model
We only support the following models:
- Binomial regression, Logit link, Two-way, Classic
- Binomial regression, Probit link, Two-way, Classic
- Binomial regression, Logit link, Two-way, Network
- Binomial regression, Probit link, Two-way, Network
- Binomial regression, Logit link, Three-way, Network
- Binomial regression, Probit link, Three-way, Network
- Poisson regression, Log link, Three-way, Network
- Poisson regression, Log link, Two-way, Network
"""
function bias_correction(model::GLFixedEffectModel,df::DataFrame;i_symb::Union{Symbol,Nothing}=nothing,j_symb::Union{Symbol,Nothing}=nothing,t_symb::Union{Symbol,Nothing}=nothing,L::Int64=0,panel_structure::Symbol=:classic)
@assert :mu in propertynames(model.augmentdf) && :eta in propertynames(model.augmentdf) "please save :eta and :mu before bias correction"
@assert panel_structure in [:classic, :network] "you can only choose :classic or :network for panel_structure"
@assert typeof(model.distribution) <: Union{Binomial,Poisson} "currently only support binomial regression and poisson regression"
@assert typeof(model.link) <: Union{GLM.LogitLink,GLM.ProbitLink,GLM.LogLink} "currently only support probit and logit link (binomial regression), and log link (poisson regression)"
######################## Parse FEs ########################
# fes_in_formula is the fe symbols that appears in the formula after the "|" which might contain FE interaction
# cleaned_fe_symb is an array of all the single FE that constructs fes_in_formula
fes_in_formula, cleaned_fe_symb = parse_formula_get_FEs(model,panel_structure,df)
@assert length(fes_in_formula) in [2,3] "We only support two-way and three-way FE at the moment"
@assert all([symb โ [i_symb,j_symb,t_symb] for symb in cleaned_fe_symb]) "not all FEs in the formula is mentioned in i_symb, j_symb and t_symb"
@assert all([(symb โ cleaned_fe_symb || symb === nothing) for symb in [i_symb,j_symb,t_symb]]) "redundant FEs in i_symb, j_symb or t_symb (not mentioned in the formula)"
if panel_structure == :classic
fe_dict = Dict(:i => i_symb,:j => j_symb,:t => t_symb)
elseif panel_structure == :network
fe_dict = Dict{Symbol,Union{Array{Symbol,1},Nothing}}()
if Symbol("fe_",i_symb,"&fe_",j_symb) โ fes_in_formula
push!(fe_dict, :ij => [i_symb,j_symb])
else
push!(fe_dict, :ij => nothing)
end
if Symbol("fe_",i_symb,"&fe_",t_symb) โ fes_in_formula
push!(fe_dict, :it => [i_symb,t_symb])
else
push!(fe_dict, :it => nothing)
end
if Symbol("fe_",j_symb,"&fe_",t_symb) โ fes_in_formula
push!(fe_dict, :jt => [j_symb,t_symb])
else
push!(fe_dict, :jt => nothing)
end
end
##########################################################
########## sort df ############
df.old_ind = rownumber.(eachrow(df))
df = sort(df, [t_symb,j_symb,i_symb][[t_symb,j_symb,i_symb] .!== nothing])
###############################
# check if we currently support the combination of the distribution, the link, num of FEs and the panel_structure
available_types = [
(LogitLink, Binomial, 2, :classic), # Fernรกndez-Val and Weidner (2016, 2018)
(ProbitLink, Binomial, 2, :classic), # Fernรกndez-Val and Weidner (2016, 2018)
(LogitLink, Binomial, 2, :network), # Hinz, Stammann and Wanner (2020) & Fernรกndez-Val and Weidner (2016)
(ProbitLink, Binomial, 2, :network), # Hinz, Stammann and Wanner (2020) & Fernรกndez-Val and Weidner (2016)
(LogitLink, Binomial, 3, :network), # Hinz, Stammann and Wanner (2020)
(ProbitLink, Binomial, 3, :network), # Hinz, Stammann and Wanner (2020)
(LogLink, Poisson, 2, :network), # Weidner and Zylkin (2021), JIE
(LogLink, Poisson, 3, :network) # Weidner and Zylkin (2021), JIE
]
this_model_type = (model.link,model.distribution,length(fes_in_formula),panel_structure)
@assert model_type_checker(this_model_type, available_types) "We currently don't support this combination of the distribution, the link, num of FEs and panel_structure"
if model.link isa GLM.LogitLink && model.distribution isa Binomial
return biascorr_logit(model,df,fe_dict,L,panel_structure)
elseif model.link isa GLM.ProbitLink && model.distribution isa Binomial
return biascorr_probit(model,df,fe_dict,L,panel_structure)
elseif model.link isa GLM.LogLink && model.distribution isa Poisson
return biascorr_poisson(model,df,fe_dict,L,panel_structure)
end
end
############################################################
# Bias Correction In Different Models #
############################################################
function biascorr_probit(model::GLFixedEffectModel,df::DataFrame,fes::Dict,L::Int64,panel_structure::Symbol)
link = model.link
y = df[model.esample[df.old_ind],model.yname]
ฮท = model.augmentdf.eta[df.old_ind]
ฮผ = model.augmentdf.mu[df.old_ind]
ฮผฮท = GLM.mueta.(Ref(link),ฮท)
score = model.gradient[df.old_ind,:]
hessian = model.hessian
w = ฮผฮท ./ (ฮผ.*(1.0 .- ฮผ))
v = w .* (y - ฮผ)
w = w .* ฮผฮท
z = - ฮท .* w
if panel_structure == :classic
b = classic_b_binomial(score,v,z,w,L,fes,df)
elseif panel_structure == :network
b = network_b_binomial(score,v,z,w,L,fes,df)
end
ฮฒ = model.coef + hessian \ b
return GLFixedEffectModel(
ฮฒ,
model.vcov,
model.vcov_type,
model.nclusters,
model.iterations,
model.converged,
model.esample,
model.augmentdf,
model.fekeys,
model.loglikelihood,
model.nullloglikelihood,
model.distribution,
model.link,
model.coefnames,
model.yname,
model.formula,
model.formula_schema,
model.nobs,
model.dof,
model.dof_residual,
model.deviance,
model.nulldeviance,
model.gradient,
model.hessian
)
end
function biascorr_logit(model::GLFixedEffectModel,df::DataFrame,fes::Dict,L::Int64,panel_structure::Symbol)
link = model.link
y = df[model.esample[df.old_ind],model.yname]
ฮท = model.augmentdf.eta[df.old_ind]
ฮผ = model.augmentdf.mu[df.old_ind]
ฮผฮท = GLM.mueta.(Ref(link),ฮท)
score = model.gradient[df.old_ind,:]
hessian = model.hessian
v = y .- ฮผ
w = ฮผฮท
z = w .* (1.0 .- 2.0 .* ฮผ)
if panel_structure == :classic
b = classic_b_binomial(score,v,z,w,L,fes,df)
elseif panel_structure == :network
b = network_b_binomial(score,v,z,w,L,fes,df)
end
ฮฒ = model.coef + hessian \ b
return GLFixedEffectModel(
ฮฒ,
model.vcov,
model.vcov_type,
model.nclusters,
model.iterations,
model.converged,
model.esample,
model.augmentdf,
model.fekeys,
model.loglikelihood,
model.nullloglikelihood,
model.distribution,
model.link,
model.coefnames,
model.yname,
model.formula,
model.formula_schema,
model.nobs,
model.dof,
model.dof_residual,
model.deviance,
model.nulldeviance,
model.gradient,
model.hessian
)
end
function biascorr_poisson(model::GLFixedEffectModel,df::DataFrame,fes::Dict,L::Int64,panel_structure::Symbol)
if L > 0
printstyled("bandwidth not allowed in poisson regression bias correction. Treating L as 0...",color=:yellow)
end
# @assert panel_structure == :network
y = df[model.esample[df.old_ind],model.yname] # Do we need to subset y with model.esample? TO-DO: test with cases with esample is not all ones.
ฮป = model.augmentdf.mu[df.old_ind]
@assert all([fe_key==:ij || fe_symb !== nothing for (fe_key,fe_symb) in fes]) "You need either a three-way FE model or a two-way FE model with i#j being left out"
# print("pre-demeaning")
# @time begin
# type: it + jt + ij, Need bias correction and standard error correction
i_ind = df[model.esample[df.old_ind],fes[:it][1]]
j_ind = df[model.esample[df.old_ind],fes[:jt][1]]
t_ind = df[model.esample[df.old_ind],fes[:it][2]]
i_levels = levels(i_ind)
j_levels = levels(j_ind)
t_levels = levels(t_ind)
I = length(i_levels)
J = length(j_levels)
# @assert I==J "number of exporters is different from number of importers"
T = length(t_levels)
# assume balanced panel
y_sum_by_ij = similar(y)
ฮป_sum_by_ij = similar(ฮป)
for groupSeg in get_group_seg(i_ind, j_ind)
y_sum_by_ij[groupSeg] .= sum(y[groupSeg])
ฮป_sum_by_ij[groupSeg] .= sum(ฮป[groupSeg])
end
ฯ = ฮป ./ ฮป_sum_by_ij
y_sum_ij = reshape(y_sum_by_ij,(I,J,T))[:,:,1]
ฮป_sum_ij = reshape(ฮป_sum_by_ij,(I,J,T))[:,:,1]
ฯ_ijt = reshape(ฯ,(I,J,T))
ฮป_ijt = reshape(ฮป,(I,J,T))
# Construct S
# S = y - ฯ .* y_sum_by_ij
S = y - ฮป
S_ijt = reshape(S,(I,J,T))
# println(H)
# Construct G
# Construct xฬ (demeaned x)
# See footnote 33 of Weidner and Zylkin (2020)
X = df[model.esample[df.old_ind], model.coefnames] |> Array{Float64,2}
weights = FixedEffects.Weights(ฮป)
all(isfinite, weights) || throw("Weights are not finite")
fes_fixedeffectarray = Array{FixedEffect,1}()
for (fe_key,fe_symb) in fes
if fe_symb !== nothing
fe_fixedeffectobject = FixedEffect(df[model.esample[df.old_ind], fe_symb[1]], df[model.esample[df.old_ind], fe_symb[2]])
push!(fes_fixedeffectarray,fe_fixedeffectobject)
end
end
feM = AbstractFixedEffectSolver{Float64}(fes_fixedeffectarray, weights, Val{:cpu}) # CPU/GPU??? might need more attention in the future when implement GPU
Xdemean, b, converged = FixedEffects.solve_residuals!(X, feM)
if !all(converged)
@warn "Convergence of annihilation procedure not achieved in default iterations; try increasing maxiter_center or decreasing center_tol."
end
K = size(Xdemean,2)
Xdemean_ijtk = reshape(Xdemean,(I,J,T,K))
if all([fe_symb !== nothing for (fe_key,fe_symb) in fes]) # it + jt + ij
###############################
# Point Estimate Correction #
###############################
# Construct Bฬ, Dฬ, and Wฬ
N = I = J
Bฬ = zeros(K)
##newW## Wฬ = zeros(K,K)
B!(Bฬ,K,I,J,T,ฯ_ijt,ฮป_sum_ij,y_sum_ij,Xdemean_ijtk,S_ijt)
Bฬ = Bฬ ./ (N - 1)
##newW## Wฬ = Wฬ ./ (N*(N-1))
Dฬ = zeros(K)
D!(Dฬ,K,I,J,T,ฯ_ijt,ฮป_sum_ij,y_sum_ij,Xdemean_ijtk,S_ijt)
Dฬ = Dฬ ./ (N - 1)
Wฬ = model.hessian./(N*(N-1))
ฮฒ = model.coef - Wฬ \ (Bฬ + Dฬ) ./ (N-1)
###############################
# Standard Error Correction #
###############################
new_vcov = get_new_vcov(I,J,T,K,Xdemean_ijtk,model.hessian,ฯ_ijt,ฮป_sum_ij,S_ijt)
else # it + jt
ฮฒ = model.coef
new_vcov = get_new_vcov_twoway(I,J,T,K,Xdemean_ijtk,model.hessian,ฮป_ijt,S_ijt)
end
return GLFixedEffectModel(
ฮฒ,
new_vcov,
model.vcov_type,
model.nclusters,
model.iterations,
model.converged,
model.esample,
model.augmentdf,
model.fekeys,
model.loglikelihood,
model.nullloglikelihood,
model.distribution,
model.link,
model.coefnames,
model.yname,
model.formula,
model.formula_schema,
model.nobs,
model.dof,
model.dof_residual,
model.deviance,
model.nulldeviance,
model.gradient,
model.hessian
)
end
###########################################
# Internal Functions #
###########################################
function group_sums(M::Array{Float64,2},w::Array{Float64,1},group_seg::Vector{Vector{Int64}})
P = size(M,2) # number of regressos P
b_temp = zeros(P)
for seg_index in group_seg
numerator = zeros(P)
for p in 1:P
numerator[p] += sum(M[seg_index,p])
end
denominator = sum(w[seg_index])
b_temp += numerator./denominator
end
return b_temp
end
function get_group_seg(fe::Array{T,1} where T <: Any)
p = sortperm(fe)
q = fe[p]
res = Vector{Vector{Int64}}()
grp = Vector{Int64}()
is_first = true
last = fe[end]
for (i,v) in enumerate(q)
if !is_first && last != v
push!(res,grp)
grp = [p[i]]
else
push!(grp, p[i])
end
last = v
is_first = false
end
push!(res,grp)
return res
end
function get_group_seg(fe1::Array{T,1},fe2::Array{T,1}) where T <: Any
fe = collect(zip(fe1,fe2))
p = sortperm(fe)
q = fe[p]
res = Vector{Vector{Int64}}()
grp = Vector{Int64}()
is_first = true
last = q[1]
for (i,v) in enumerate(q)
if !is_first && last != v
push!(res,grp)
grp = [p[i]]
else
push!(grp, p[i])
end
last = v
is_first = false
end
push!(res,grp)
return res
end
function group_sums_spectral(M::Array{Float64,2}, v::Array{Float64,1}, w::Array{Float64,1}, L::Int64, group_seg::Vector{Vector{Int64}})
# TO-DO: Need to make sure the slice M[seg_index,p], v[seg_index] are sorted from early to late observations
P = size(M)[2] # number of regressos P
b_temp = zeros(P)
for seg_index in group_seg
T = length(seg_index)
numerator = zeros(P)
for p in 1:P
for l in 1:L
for t in (l+1):T
numerator[p] += M[seg_index,p][t] * v[seg_index][t-l] * T / (T-l)
end
end
end
denominator = sum(w[seg_index])
b_temp += numerator./denominator
end
return b_temp
end
function model_type_checker(x::Tuple,avail_list::Array{T} where T <: Any)
# x is a 4-d tuple:
# x[1] is the model's link
# x[2] is the model's distribution
# x[3] is the ttl number of FEs
# x[4] is the panel_structure that takes [:classic,:network]
for avail_type in avail_list
if x[1] isa avail_type[1] && x[2] isa avail_type[2] && x[3] == avail_type[3] && x[4] == avail_type[4]
return true
end
end
return false
end
function parse_formula_get_FEs(model,panel_structure::Symbol,df::DataFrame)
# This function returns the set of FE symbols according to the formula
# If the panel_structure is network, it returns the interaction terms only
# If the panel_structure is classic, it checks to see if there is interaction term in the formula. If true, it throws error. It returns all the fe symbols otherwise
vars = StatsModels.termvars(model.formula) # all the vars excluding interactions
vars_name_to_be_set_diff = Symbol.("fe_",vars) # all the vars symbols excluding interactions (add "fe_" in the front)
fes, ids, formula = FixedEffectModels.parse_fixedeffect(df, model.formula) # id: include fe_i, fe_j, fe_t, and possible interactions if panel_structure = :network
if panel_structure == :network
network_fes = setdiff(ids,vars_name_to_be_set_diff)
if length(network_fes) != 0
no_interaction_fe = setdiff(ids,network_fes)
return network_fes, Symbol.(s[4:end] for s in String.(no_interaction_fe))
else
throw("no network fe is found")
end
elseif panel_structure == :classic
classic_fes = intersect(ids,vars_name_to_be_set_diff)
if issetequal(classic_fes, ids)
return ids, Symbol.(s[4:end] for s in String.(ids))
else
throw("no fe interaction is allowed in the classic model")
end
end
end
function classic_b_binomial(score::Array{Float64,2},v::Array{Float64,1},z::Array{Float64,1},w::Array{Float64,1},L::Int64,fes::Dict,df::DataFrame)
P = size(score)[2]
b = zeros(P)
if fes[:t] === nothing
pseudo_panel = true
if L > 0
printstyled("bandwidth not allowed in classic ij-pseudo panel. Treating L as 0...")
end
end
for (fe_key,fe_symb) in fes
if fe_symb !== nothing
b += group_sums(score ./ v .* z, w, get_group_seg(df[!,fe_symb])) ./ 2.0
end
if fe_key != :t && L > 0 && !pseudo_panel
b += group_sums_spectral(score ./ v .* w, v, w, L,get_group_seg(df[!,fe_key]))
end
end
return b
end
function network_b_binomial(score::Array{Float64,2},v::Array{Float64,1},z::Array{Float64,1},w::Array{Float64,1},L::Int64,fes::Dict,df::DataFrame)
P = size(score)[2]
b = zeros(P)
for (fe_key,fe_symb) in fes
if fe_symb !== nothing
b += group_sums(score ./ v .* z, w, get_group_seg(df[!,fe_symb[1]], df[!,fe_symb[2]])) ./ 2.0
end
end
if L > 0 && fes[:ij] !== nothing
b += group_sums_spectral(score ./ v .* w, v, w, L,get_group_seg(df[!,fes[:ij][1]], df[!,fes[:ij][2]]))
end
return b
end
function G_ijtsr(i,j,t,s,r,ฯ_ijt,y_sum_ij)
if r!=s && s!=t && t!=r
return - 2 * ฯ_ijt[i,j,r] * ฯ_ijt[i,j,s] * ฯ_ijt[i,j,t] * y_sum_ij[i,j]
end
if r==t && t!=s
return ฯ_ijt[i,j,t] * (1 - 2*ฯ_ijt[i,j,t]) * ฯ_ijt[i,j,s] * y_sum_ij[i,j]
end
if t==s && s!=r
return ฯ_ijt[i,j,s] * (1 - 2*ฯ_ijt[i,j,s]) * ฯ_ijt[i,j,r] * y_sum_ij[i,j]
end
if s==r && r!=t
return ฯ_ijt[i,j,s] * (1 - 2*ฯ_ijt[i,j,s]) * ฯ_ijt[i,j,t] * y_sum_ij[i,j]
end
if t==s && t==r
return - ฯ_ijt[i,j,t] * (1 - ฯ_ijt[i,j,t]) * (1 - 2*ฯ_ijt[i,j,t]) * y_sum_ij[i,j]
end
end
function H_ij(i,j,ฯ_ijt,y_sum_ij)
# H[i,j,:,:]
return (- ฯ_ijt[i,j,:] * ฯ_ijt[i,j,:]' + Diagonal(ฯ_ijt[i,j,:])) .* y_sum_ij[i,j]
end
function Hฬ_ij(i,j,ฯ_ijt,ฮป_sum_ij)
# Hฬ[i,j,:,:]
return (- ฯ_ijt[i,j,:] * ฯ_ijt[i,j,:]' + Diagonal(ฯ_ijt[i,j,:])) .* ฮป_sum_ij[i,j]
end
function ฮ_ij(i,j,ฮป_ijt)
# Hฬ[i,j,:,:]
return Diagonal(ฮป_ijt[i,j,:])
end
function G_ij_times_x_ijk(i,j,k,Xdemean_ijtk,T,ฯ_ijt,y_sum_ij)
# [Gij * xij,k]_st
result = zeros(T,T)
for s โ 1:T
for t โ 1:T
for r โ 1:T
result[s,t] += G_ijtsr(i,j,r,s,t,ฯ_ijt,y_sum_ij) * Xdemean_ijtk[i,j,r,k]
end
end
end
return result
end
function B!(Bฬ,K,I,J,T,ฯ_ijt,ฮป_sum_ij,y_sum_ij,Xdemean_ijtk,S_ijt)
for k โ 1:K
for i โ 1:I
# Construct: Hฬ_pseudo_inv
# HxฬS'
# Gxฬ
# SS'
# xฬHxฬ
Hฬ_sum_along_j_fix_i = zeros(T,T)
HxฬS_fix_i = zeros(T,T)
Gxฬ_fix_i = zeros(T,T)
SS_fix_i = zeros(T,T)
@turbo for j โ 1:J
#if i != j # uncomment to not include terms where i==j
Hฬ_sum_along_j_fix_i += Hฬ_ij(i,j,ฯ_ijt,ฮป_sum_ij)
HxฬS_fix_i += H_ij(i,j,ฯ_ijt,y_sum_ij) * Xdemean_ijtk[i,j,:,k] * transpose(S_ijt[i,j,:])
Gxฬ_fix_i += G_ij_times_x_ijk(i,j,k,Xdemean_ijtk,T,ฯ_ijt,y_sum_ij)
SS_fix_i += S_ijt[i,j,:] * transpose(S_ijt[i,j,:])
##newW## Wฬ += Xdemean_ijtk[i,j,:,:]' * Hฬ[i,j,:,:] * Xdemean_ijtk[i,j,:,:]
#end
end
Hฬ_pseudo_inv = pinv(Hฬ_sum_along_j_fix_i)
term1 = - Hฬ_pseudo_inv * HxฬS_fix_i
term2 = Gxฬ_fix_i * Hฬ_pseudo_inv * SS_fix_i * Hฬ_pseudo_inv ./ 2.0
Bฬ[k] += tr(term1 + term2)
end
end
end
function D!(Dฬ,K,I,J,T,ฯ_ijt,ฮป_sum_ij,y_sum_ij,Xdemean_ijtk,S_ijt)
for k โ 1:K
for j โ 1:J
# Construct: Hฬ_pseudo_inv
# HxฬS'
# Gxฬ
# SS'
Hฬ_sum_along_i_fix_j = zeros(T,T)
HxฬS_fix_j = zeros(T,T)
Gxฬ_fix_j = zeros(T,T)
SS_fix_j = zeros(T,T)
@turbo for i โ 1:I
#if i != j # uncomment to not include terms where i==j
Hฬ_sum_along_i_fix_j += Hฬ_ij(i,j,ฯ_ijt,ฮป_sum_ij)
HxฬS_fix_j += H_ij(i,j,ฯ_ijt,y_sum_ij) * Xdemean_ijtk[i,j,:,k] * transpose(S_ijt[i,j,:])
Gxฬ_fix_j += G_ij_times_x_ijk(i,j,k,Xdemean_ijtk,T,ฯ_ijt,y_sum_ij)
SS_fix_j += S_ijt[i,j,:] * transpose(S_ijt[i,j,:])
#end
end
Hฬ_pseudo_inv = pinv(Hฬ_sum_along_i_fix_j)
term1 = - Hฬ_pseudo_inv * HxฬS_fix_j
term2 = Gxฬ_fix_j * Hฬ_pseudo_inv * SS_fix_j * Hฬ_pseudo_inv ./ 2.0
Dฬ[k] += tr(term1 + term2)
end
end
end
function ฮฉ(I,J,T,K,Xdemean_ijtk,Wฬ,ฯ_ijt,ฮป_sum_ij,S_ijt)
corrected_ฮฉ = zeros(K,K)
HH = inv_W_ฯ_constructor(I,J,T,ฯ_ijt,ฮป_sum_ij)
for i in 1:I
for j in 1:J
# construct Var(S_ij|x_ij)
# Xdemean_ijtk[i,j,:,:] is T-by-K
bias1 = Xdemean_ijtk[i,j,:,:] * inv(Wฬ) * transpose(Xdemean_ijtk[i,j,:,:])
bias2 = d_ij_constructor(i,j,I,J,T) * HH * transpose(d_ij_constructor(i,j,I,J,T))
lev_correction = LinearAlgebra.I(T) - ( Hฬ_ij(i,j,ฯ_ijt,ฮป_sum_ij) * (bias1+bias2) )
ESฬSฬ = S_ijt[i,j,:] * transpose(S_ijt[i,j,:])
ESS = lev_correction \ ESฬSฬ
corrected_ฮฉ += transpose(Xdemean_ijtk[i,j,:,:]) * ESS * Xdemean_ijtk[i,j,:,:]
end
end
return corrected_ฮฉ ./ (I*J)
end
function ฮฉ_twoway(I,J,T,K,Xdemean_ijtk,Wฬ,ฮป_ijt,S_ijt)
corrected_ฮฉ = zeros(K,K)
HH = inv_W_ฯ_constructor_twoway(I,J,T,ฮป_ijt)
for i in 1:I
for j in 1:J
# construct Var(S_ij|x_ij)
# Xdemean_ijtk[i,j,:,:] is T-by-K
bias1 = Xdemean_ijtk[i,j,:,:] * inv(Wฬ) * transpose(Xdemean_ijtk[i,j,:,:])
bias2 = d_ij_constructor(i,j,I,J,T) * HH * transpose(d_ij_constructor(i,j,I,J,T))
lev_correction = LinearAlgebra.I(T) - (ฮ_ij(i,j,ฮป_ijt) * (bias1+bias2) )
ESฬSฬ = S_ijt[i,j,:] * transpose(S_ijt[i,j,:])
ESS = lev_correction \ ESฬSฬ
corrected_ฮฉ += transpose(Xdemean_ijtk[i,j,:,:]) * ESS * Xdemean_ijtk[i,j,:,:]
end
end
return corrected_ฮฉ ./ (I*J)
end
function inv_W_ฯ_constructor(I,J,T,ฯ_ijt,ฮป_sum_ij)
H_ij_full = Array{Array{Float64,2},2}(undef,I,J)
for i in 1:I
for j in 1:J
H_ij_full[i,j] = Hฬ_ij(i,j,ฯ_ijt,ฮป_sum_ij)
end
end
Hฮฑฮฑ = zeros(I*T,I*T)
Hฮณฮณ = zeros(J*T,J*T)
Hฬ_sum_along_i_fix_j = sum(H_ij_full,dims=1)
Hฬ_sum_along_j_fix_i = sum(H_ij_full,dims=2)
for i in 1:I
Hฮฑฮฑ[((i-1)*T+1) : i*T, ((i-1)*T+1) : i*T] = Hฬ_sum_along_j_fix_i[i]
end
for j in 1:J
Hฮณฮณ[((j-1)*T+1) : j*T, ((j-1)*T+1) : j*T] = Hฬ_sum_along_i_fix_j[j]
end
Hฮฑฮณ = cell2full(H_ij_full)
Hฯฯ = [Hฮฑฮฑ Hฮฑฮณ;transpose(Hฮฑฮณ) Hฮณฮณ]
return pinv(Hฯฯ)
end
function inv_W_ฯ_constructor_twoway(I,J,T,ฮป_ijt)
H_ij_full = Array{Array{Float64,2},2}(undef,I,J)
for i in 1:I
for j in 1:J
H_ij_full[i,j] = ฮ_ij(i,j,ฮป_ijt)
end
end
Hฮฑฮฑ = zeros(I*T,I*T)
Hฮณฮณ = zeros(J*T,J*T)
Hฬ_sum_along_i_fix_j = sum(H_ij_full,dims=1)
Hฬ_sum_along_j_fix_i = sum(H_ij_full,dims=2)
for i in 1:I
Hฮฑฮฑ[((i-1)*T+1) : i*T, ((i-1)*T+1) : i*T] = Hฬ_sum_along_j_fix_i[i]
end
for j in 1:J
Hฮณฮณ[((j-1)*T+1) : j*T, ((j-1)*T+1) : j*T] = Hฬ_sum_along_i_fix_j[j]
end
Hฮฑฮณ = cell2full(H_ij_full)
Hฯฯ = [Hฮฑฮฑ Hฮฑฮณ;transpose(Hฮฑฮณ) Hฮณฮณ]
return pinv(Hฯฯ)
end
function d_ij_constructor(i,j,I,J,T)
i_indicator = falses(1,I)
j_indicator = falses(1,J)
i_indicator[i] = true
j_indicator[j] = true
idenTT = LinearAlgebra.I(T)
return LinearAlgebra.kron(hcat(i_indicator,j_indicator),idenTT)
end
function get_new_vcov(I,J,T,K,Xdemean_ijtk,Wฬ,ฯ_ijt,ฮป_sum_ij,S_ijt)
corrected_ฮฉ = ฮฉ(I,J,T,K,Xdemean_ijtk,Wฬ,ฯ_ijt,ฮป_sum_ij,S_ijt)
inv_Wฬ = inv(Wฬ)
new_vcov = inv_Wฬ * corrected_ฮฉ * inv_Wฬ
return new_vcov .* ((I*J) * (I*J)/(I*J-1))
end
function get_new_vcov_twoway(I,J,T,K,Xdemean_ijtk,Wฬ,ฮป_ijt,S_ijt)
corrected_ฮฉ = ฮฉ_twoway(I,J,T,K,Xdemean_ijtk,Wฬ,ฮป_ijt,S_ijt)
inv_Wฬ = inv(Wฬ)
new_vcov = inv_Wฬ * corrected_ฮฉ * inv_Wฬ
return new_vcov .* ((I*J) * (I*J)/(I*J-1))
end
function cell2full(cell::Matrix{Matrix{Float64}})
sentinel = cell[1,1]
I,J = size(cell)
T = size(sentinel)[1]
full = zeros(I*T,J*T)
for i in 1:I
for j in 1:J
full[ ((i-1)*T+1):(i*T), ((j-1)*T+1):(j*T) ] += cell[i,j]
end
end
return full
end | GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 533 |
##############################################################################
##
## Subet and Make sure Interaction if Vector{Float64} (instead of missing)
##
##############################################################################
# index and convert interaction Vector{Float64,, Missing} to Vector{Missing}
function _subset(fe::FixedEffect, esample)
interaction = convert(AbstractVector{Float64}, fe.interaction[esample])
FixedEffect{typeof(fe.refs), typeof(interaction)}(fe.refs[esample], interaction, fe.n)
end
| GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 1183 | struct VcovDataGLM{T, Tu, N} <: RegressionModel
modelmatrix::Matrix{Float64} # X
crossmodelmatrix::T # X'X in the simplest case. Can be Matrix but preferably Factorization
invcrossmodelmatrix::Tu
residuals::Array{Float64, N} # vector or matrix of residuals (matrix in the case of IV, residuals of Xendo on (Z, Xexo))
dof_residual::Int
end
StatsAPI.modelmatrix(x::VcovDataGLM) = x.modelmatrix
StatsAPI.crossmodelmatrix(x::VcovDataGLM) = x.crossmodelmatrix
invcrossmodelmatrix(x::VcovDataGLM) = x.invcrossmodelmatrix
StatsAPI.residuals(x::VcovDataGLM) = x.residuals
StatsAPI.dof_residual(x::VcovDataGLM) = x.dof_residual
# with clusters, the standard StatsBase.vcov works
function StatsAPI.vcov(x::VcovDataGLM, ::Vcov.RobustCovariance)
A = invcrossmodelmatrix(x)
C = modelmatrix(x) .* residuals(x)
B = C' * C
return Symmetric(A * B * A)
end
function StatsAPI.vcov(x::VcovDataGLM, ::Vcov.SimpleCovariance)
return Symmetric(invcrossmodelmatrix(x))
end
function StatsAPI.vcov(x::VcovDataGLM, v::Vcov.ClusterCovariance)
xtx = invcrossmodelmatrix(x)
Vcov.pinvertible(Symmetric(xtx * Vcov.S_hat(x, v) * xtx))
end | GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 3386 |
#include("../src/GLFixedEffectModels.jl")
# Benchmark on Intel i7-6700 with 4 cores @ 3.40 GHz, 64GB RAM
# running Windows 10 Enterprise, Julia 1.3.1, CUDA 10.2, R 3.6.2
# fixest 0.2.1
using DataFrames, GLM, Random
using GLFixedEffectModels
using BenchmarkTools
BenchmarkTools.DEFAULT_PARAMETERS.samples = 10
BenchmarkTools.DEFAULT_PARAMETERS.seconds = 60
rng = MersenneTwister(1234)
N = 2_000_000
K = 100
id1 = rand(rng, 1:(N/K), N)
id2 = rand(rng, 1:K, N)
x1 = randn(rng, N) ./ 10.0
x2 = randn(rng, N) ./ 10.0
y= exp.(3.0 .* x1 .+ 2.0 .* x2 .+ sin.(id1) .+ cos.(id2).^2 .+ randn(rng, N))
df = DataFrame(id1 = categorical(string.(id1)), id1_noncat = id1, id2 = categorical(string.(id2)), x1 = x1, x2 = x2, y = y)
m = @formula y ~ x1 + x2 + GLFixedEffectModels.fe(id1) + GLFixedEffectModels.fe(id2)
@profile GLFixedEffectModels.nlreg(df, m, Poisson(), GLM.LogLink() , start = [0.2;0.2] )
Profile.print()
Profile.clear()
@time GLFixedEffectModels.nlreg(df, m, Poisson(), GLM.LogLink() , start = [0.2;0.2] )
@profile (for i = 1:5; GLFixedEffectModels.nlreg(df, m, Poisson(), GLM.LogLink() , start = [0.2;0.2] ); end)
# Benchmark GLFixedEffectModels
# Two FE
m = @formula y ~ x1 + x2 + fe(id1) + fe(id2)
@benchmark x = nlreg(df, m, Poisson(), GLM.LogLink() , start = [0.2;0.2] )
# BenchmarkTools.Trial:
# memory estimate: 1.29 GiB
# allocs estimate: 6144
# --------------
# minimum time: 1.953 s (6.00% GC)
# median time: 2.255 s (7.11% GC)
# mean time: 2.225 s (10.03% GC)
# maximum time: 2.529 s (15.10% GC)
# --------------
# samples: 10
# evals/sample: 1
# Two FE
m = GLFixedEffectModels.@formula y ~ x1 + x2 + GLFixedEffectModels.fe(id1) + GLFixedEffectModels.fe(id2)
@benchmark x = GLFixedEffectModels.nlreg(df, m, Poisson(), GLM.LogLink() , start = [0.2;0.2] )
# Set up R
using RCall
R"library(alpaca)"
R"library(fixest)"
df_r = deepcopy(df)
@rput df_r
# Benchmark Alpaca
function runme()
R"res <- alpaca::feglm(y ~ x1 + x2 | id1 , df_r, poisson(), beta.start = c(0.2,0.2))"
end
@benchmark runme()
# BenchmarkTools.Trial:
# memory estimate: 1.09 KiB
# allocs estimate: 32
# --------------
# minimum time: 5.695 s (0.00% GC)
# median time: 6.327 s (0.00% GC)
# mean time: 6.552 s (0.00% GC)
# maximum time: 8.360 s (0.00% GC)
# --------------
# samples: 10
# evals/sample: 1
# BENCHMARK FIXEST
# One thread
function runme()
R"res <- fixest::feglm(y ~ x1 + x2 | id1, df_r, family = \"poisson\", nthreads = 1, start = c(0.2,0.2))"
end
function runme()
R"res <- fixest::feglm(y ~ x1 + x2 | id1 + id2, df_r, family = \"poisson\", nthreads = 1, start = c(0.2,0.2))"
end
# Two FE's
# BenchmarkTools.Trial:
# memory estimate: 1.09 KiB
# allocs estimate: 32
# --------------
# minimum time: 3.201 s (0.00% GC)
# median time: 3.393 s (0.00% GC)
# mean time: 3.392 s (0.00% GC)
# maximum time: 3.725 s (0.00% GC)
# --------------
# samples: 10
# evals/sample: 1
@benchmark runme()
# As many threads as you like
function runme()
R"res <- fixest::feglm(y ~ x1 + x2 | id1, df_r, family = \"poisson\", start = c(0.2,0.2))"
end
function runme()
R"res <- fixest::feglm(y ~ x1 + x2 | id1 + id2, df_r, family = \"poisson\", start = c(0.2,0.2))"
end
@benchmark runme()
R"rm(list = ls())"
| GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 5872 | using GLFixedEffectModels
using Distributions, CategoricalArrays
using RDatasets, Test, Random
using StableRNGs
using GLM: LogitLink, ProbitLink, LogLink
# using RCall
rng = StableRNG(1234)
df = dataset("datasets", "iris")
df.binary = zeros(Float64, size(df,1))
df[df.SepalLength .> 5.0,:binary] .= 1.0
df.SpeciesDummy = string.(df.Species)
idx = rand(rng,1:3,size(df,1),1)
a = ["A","B","C"]
df.Random = vec([a[i] for i in idx])
df.RandomCategorical = df.Random
# Test 1: Two-way Logit
# See if the coefficients after bias correction match with the results obtained from R package alpaca
#= R"""
res1 <- alpaca::feglm(binary ~ SepalWidth | SpeciesDummy + RandomCategorical , df_r, binomial(), beta.start = c(0.2))
res_bc1 <- alpaca::biasCorr(res1)
coef1 <- res_bc1[["coefficients"]]
"""
@rget coef1
###############################
coef1 = 7.214197357443702
###############################
=#
m = GLFixedEffectModels.@formula binary ~ SepalWidth + GLFixedEffectModels.fe(SpeciesDummy) + GLFixedEffectModels.fe(RandomCategorical)
x = GLFixedEffectModels.nlreg(df, m, Binomial(), LogitLink(), start = [0.2], save = [:mu,:eta])
x_afterbc = GLFixedEffectModels.bias_correction(x, df; i_symb = :SpeciesDummy, j_symb = :RandomCategorical)
@test x_afterbc.coef โ [7.214197357443702] atol = 1e-4
# Test 2: Two-way Probit
#=R"""
control <- do.call(feglmControl,list())
control[["dev.tol"]] <- 1e-10
res2 <- alpaca::feglm(binary ~ SepalWidth | SpeciesDummy + RandomCategorical , df_r, binomial(link = "probit"), beta.start = c(0.2), control = control)
res_bc2 <- alpaca::biasCorr(res2)
coef2_afterbc <- res_bc2[["coefficients"]]
"""
@rget coef2_afterbc
###############################
coef2_afterbc = 4.1962783532153605
###############################
=#
x = GLFixedEffectModels.nlreg(df, m, Binomial(), ProbitLink(), start = [0.2], save=[:mu,:eta])
x_afterbc = GLFixedEffectModels.bias_correction(x, df; i_symb = :SpeciesDummy, j_symb = :RandomCategorical)
@test x_afterbc.coef โ [4.1962783532153605] atol = 1e-4
# Test 3: Three-way Logit (I = 5, J = 6, T = 7) Network
I, J, T = 5, 6, 7
i_index = repeat(1:I,inner = J*T)
j_index = repeat(1:J,outer = I, inner = T)
t_index = repeat(1:T,outer = I*J)
# Reset rng
rng = StableRNG(1234)
data = DataFrame(i = i_index, j = j_index, t = t_index, x = rand(rng, I*J*T), y = rand(rng,Bernoulli(), I*J*T))
#= @rput data
R"""
data <- data %>%
mutate(it = as.factor(paste(i,t))) %>%
mutate(jt = as.factor(paste(j,t))) %>%
mutate(ij = as.factor(paste(i,j)))
res3 <- alpaca::feglm(y ~ x | it + jt + ij, data)
res3.bc_L0 <- alpaca::biasCorr(res3,panel.structure = 'network',L = 0)
res3.bc_L3 <- alpaca::biasCorr(res3,panel.structure = 'network',L = 3)
coef_L0 <- res3.bc_L0[["coefficients"]]
coef_L3 <- res3.bc_L3[["coefficients"]]
"""
@rget coef_L0 coef_L3
###############################
coef_L0 = -0.5478161609879237
coef_L3 = -0.6077559017896819
###############################
=#
m = GLFixedEffectModels.@formula y ~ x + GLFixedEffectModels.fe(i)*GLFixedEffectModels.fe(t) + GLFixedEffectModels.fe(j)*GLFixedEffectModels.fe(t) + GLFixedEffectModels.fe(i)*GLFixedEffectModels.fe(j)
x = GLFixedEffectModels.nlreg(data, m, Binomial(), LogitLink(), start = [0.2], save=[:mu,:eta])
x_bc_L0 = GLFixedEffectModels.bias_correction(x, data; i_symb = :i, j_symb = :j, t_symb = :t, panel_structure = :network, L = 0)
x_bc_L3 = GLFixedEffectModels.bias_correction(x, data; i_symb = :i, j_symb = :j, t_symb = :t, panel_structure = :network, L = 3)
@test x_bc_L0.coef โ [-0.5478161609879237] atol = 1e-4
@test x_bc_L3.coef โ [-0.5950043813922534] atol = 1e-4
# we didn't use -0.6077559017896819 because in this network specification, alpaca will perform bandwidth on a refined dataset that deletes observation(s) due to perfect classification.
# we perform the bandwidth adjustment on the original dataset.
# Test 4: Three-way Poisson, Network structure
I, J, T = 9, 9, 6
i_index = repeat(1:I,inner = J*T)
j_index = repeat(1:J,outer = I, inner = T)
t_index = repeat(1:T,outer = I*J)
# Reset rng
rng = StableRNG(1234)
data = DataFrame(i = i_index, j = j_index, t = t_index, x1 = rand(rng, I*J*T), x2= rand(rng,I*J*T))
data.y = exp.(data.x1 + data.x2) .* rand(rng, Poisson(), I*J*T)
# data = sort(data, [:t,:j,:i])
# using CSV
# CSV.write("test4.csv",data)
m = GLFixedEffectModels.@formula y ~ x1 + x2 + GLFixedEffectModels.fe(i)*GLFixedEffectModels.fe(t) + GLFixedEffectModels.fe(j)*GLFixedEffectModels.fe(t) + GLFixedEffectModels.fe(i)*GLFixedEffectModels.fe(j)
x = GLFixedEffectModels.nlreg(data, m, Poisson(), LogLink(), start = [0.2,0.2], save=[:mu,:eta])
x_afterbc = GLFixedEffectModels.bias_correction(x, data; i_symb = :i, j_symb = :j, t_symb = :t, panel_structure = :network)
@test x_afterbc.coef โ [1.0005518;0.8922288] atol = 1e-4
@test sqrt.([x_afterbc.vcov[1,1], x_afterbc.vcov[2,2]]) โ [0.2221267;0.2214389] atol = 1e-4
# Test 5: Two-way poisson, Network structure
m = GLFixedEffectModels.@formula y ~ x1 + x2 + GLFixedEffectModels.fe(i)*GLFixedEffectModels.fe(t) + GLFixedEffectModels.fe(j)*GLFixedEffectModels.fe(t)
x = GLFixedEffectModels.nlreg(data, m, Poisson(), LogLink(), start = [0.2,0.2], save=[:mu,:eta])
x_afterbc = GLFixedEffectModels.bias_correction(x, data; i_symb = :i, j_symb = :j, t_symb = :t, panel_structure = :network)
@test sqrt.([x_afterbc.vcov[1,1], x_afterbc.vcov[2,2]]) โ [0.1930318;0.1652417] atol = 1e-4
#= Test 6, example from ppml_fe_bias
using CSV
df_pois = CSV.read("PPMLFEBIAS_EXAMPLE_DATA.csv",DataFrame)
m = GLFixedEffectModels.@formula trade ~ fta + GLFixedEffectModels.fe(imp) * GLFixedEffectModels.fe(year) + GLFixedEffectModels.fe(exp) * GLFixedEffectModels.fe(year) + GLFixedEffectModels.fe(imp) * GLFixedEffectModels.fe(exp)
x = GLFixedEffectModels.nlreg(df_pois, m, Poisson(), LogLink(), start = [0.2], save=true, GLFixedEffectModels.Vcov.cluster(:imp,:exp); rho_tol = 1e-8)
=#
| GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 6968 | using GLFixedEffectModels
using Distributions, CategoricalArrays
using RDatasets, Test, Random
using StableRNGs
using GLM
# using Alpaca
rng = StableRNG(1234)
df = dataset("datasets", "iris")
df.binary = zeros(Float64, size(df,1))
df[df.SepalLength .> 5.0,:binary] .= 1.0
df.SpeciesDummy = string.(df.Species)
idx = rand(rng,1:3,size(df,1),1)
a = ["A","B","C"]
df.Random = vec([a[i] for i in idx])
df.RandomCategorical = df.Random
# result = Alpaca.feglm(df, Alpaca.@formula(binary ~ SepalWidth),
# Binomial(),
# fe = :SpeciesDummy
# )
# @test StatsBase.coef(result) โ [-0.221486] atol = 1e-4
#
# result = Alpaca.feglm(df, Alpaca.@formula(binary ~ SepalWidth),
# Binomial(),
# fe = :SpeciesDummy,
# start = [0.2], trace = 2
# )
# # glm
# gm1 = fit(GeneralizedLinearModel, @formula(binary ~ SepalWidth),
# df, Poisson())
# PROBIT ------------------------------------------------------------------
# One FE, Probit
m = @formula binary ~ SepalWidth + fe(SpeciesDummy)
x = nlreg(df, m, Binomial(), GLM.ProbitLink(), start = [0.2], save = [:fe])
@test x.coef โ [4.7793003788996895] atol = 1e-4
# Two FE, Probit
m = @formula binary ~ SepalWidth + fe(SpeciesDummy) + fe(RandomCategorical)
x = nlreg(df, m, Binomial(), ProbitLink(), start = [0.2] )
@test x.coef โ [4.734428099238226] atol = 1e-4
# test target value obtained from alpaca::feglm with `dev.tol <- 1e-10`
# LOGIT ------------------------------------------------------------------
# One FE, Logit
m = @formula binary ~ SepalWidth + fe(SpeciesDummy)
x = nlreg(df, m, Binomial(), LogitLink(), start = [0.2] )
@test coef(x) โ [8.144352] atol = 1e-4
# Two FE, Logit
m = @formula binary ~ SepalWidth + fe(SpeciesDummy) + fe(RandomCategorical)
x = nlreg(df, m, Binomial(), LogitLink(), start = [0.2] )
@test coef(x) โ [8.05208] atol = 1e-4
# make sure that showing works
@show x
# VCov
m = @formula binary ~ SepalWidth + fe(SpeciesDummy)
x = nlreg(df, m, Binomial(), LogitLink(), Vcov.simple() , start = [0.2] )
# result = Alpaca.feglm(df, Alpaca.@formula(binary ~ SepalWidth),
# Binomial(),
# fe = :SpeciesDummy,
# start = [0.2], trace = 2)
@test vcov(x) โ [3.585929] atol = 1e-4
m = @formula binary ~ SepalWidth + PetalLength + fe(SpeciesDummy)
x = nlreg(df, m, Binomial(), LogitLink(), Vcov.robust() , start = [0.2, 0.2] )
@test vcov(x) โ [ 2.28545 0.35542; 0.35542 3.65724] atol = 1e-4
m = @formula binary ~ SepalWidth + PetalLength + fe(SpeciesDummy)
x = nlreg(df, m, Binomial(), LogitLink(), Vcov.cluster(:SpeciesDummy) , start = [0.2, 0.2] )
@test vcov(x) โ [ 1.48889 0.464914; 0.464914 3.07176 ] atol = 1e-4
m = @formula binary ~ SepalWidth + PetalLength + fe(SpeciesDummy)
x = nlreg(df, m, Binomial(), LogitLink(), Vcov.cluster(:SpeciesDummy,:RandomCategorical) , start = [0.2, 0.2] )
@test vcov(x) โ [0.43876 0.315690; 0.315690 1.59676] atol = 1e-4
# Save fe
m = @formula binary ~ SepalWidth + fe(SpeciesDummy)
x = nlreg(df, m, Binomial(), LogitLink(), start = [0.2] , save = [:fe] )
fes = Float64[]
for c in levels(df.SpeciesDummy)
push!(fes, x.augmentdf[df.SpeciesDummy .== c, :fe_SpeciesDummy][1])
end
@test fes[1] โ -28.3176042490 atol = 1e-4
@test fes[2] โ -17.507252832 atol = 1e-4
@test fes[3] โ -17.851658274 atol = 1e-4
# loglikelihood and psuedo r2
m = @formula binary ~ SepalWidth + fe(Species)
x = nlreg(df, m, Binomial(), LogitLink(), Vcov.cluster(:Species) , start = [0.2] )
@test loglikelihood(x) โ -27.58750 atol = 1e-4
@test pseudo_r2(x) โ 0.64518 atol = 1e-4
@test pseudo_adjr2(x) โ 0.60660 atol = 1e-4
m = @formula binary ~ SepalWidth + PetalLength + fe(Species)
x = nlreg(df, m, Binomial(), LogitLink(), Vcov.cluster(:Species) , start = [0.2, 0.2] )
@test loglikelihood(x) โ -21.35416 atol = 1e-4
@test pseudo_r2(x) โ 0.72535 atol = 1e-4
@test pseudo_adjr2(x) โ 0.67391 atol = 1e-4
m = @formula binary ~ SepalWidth + fe(Species)
x = nlreg(df, m, Poisson(), LogLink(), Vcov.cluster(:Species) , start = [0.2] )
@test loglikelihood(x) โ -137.19554 atol = 1e-4
@test pseudo_r2(x) โ 0.06232 atol = 1e-4
@test pseudo_adjr2(x) โ 0.04182 atol = 1e-4
m = @formula binary ~ SepalWidth + PetalLength + fe(Species)
x = nlreg(df, m, Poisson(), LogLink(), Vcov.cluster(:Species) , start = [0.2, 0.2] )
@test loglikelihood(x) โ -137.19365 atol = 1e-4
@test pseudo_r2(x) โ 0.06234 atol = 1e-4
@test pseudo_adjr2(x) โ 0.03500 atol = 1e-4
# For comparison with Alpaca.jl
# result = Alpaca.feglm(df, Alpaca.@formula(binary ~ SepalWidth + PetalLength),
# Binomial(),
# fe = :SpeciesDummy,
# start = [0.2, 0.2], trace = 2, vcov = :robust)
# result = Alpaca.feglm(df, Alpaca.@formula(binary ~ SepalWidth + PetalLength),
# Binomial(),
# fe = :SpeciesDummy,
# start = [0.2, 0.2], trace = 2, vcov = :(cluster(SpeciesDummy)))
# result = Alpaca.feglm(df, Alpaca.@formula(binary ~ SepalWidth + PetalLength),
# Binomial(),
# fe = :SpeciesDummy,
# start = [0.2, 0.2], trace = 2, vcov = :(cluster(SpeciesDummy + RandomCategorical)))
# POISSON ----------------------------------------------------------------
rng = StableRNG(1234)
N = 1_000_000
K = 100
id1 = rand(rng, 1:(round(Int64,N/K)), N)
id2 = rand(rng, 1:K, N)
x1 = randn(rng, N) ./ 10.0
x2 = randn(rng, N) ./ 10.0
y= exp.(3.0 .* x1 .+ 2.0 .* x2 .+ sin.(id1) .+ cos.(id2).^2 .+ randn(rng, N))
df = DataFrame(id1_noncat = id1, id2_noncat = id2, x1 = x1, x2 = x2, y = y)
df.id1 = id1
df.id2 = id2
# One FE, Poisson
m = @formula y ~ x1 + x2 + fe(id1)
x = nlreg(df, m, Poisson(), LogLink() , start = [0.2;0.2] )
# result = Alpaca.feglm(df, Alpaca.@formula(y ~ x1 + x2),
# Poisson(),
# fe =:(id1)
# )
@test coef(x) โ [2.9912251435680237; 2.002088081633829] atol = 1e-4
# Two FE, Poisson
m = @formula y ~ x1 + x2 + fe(id1) + fe(id2)
x = nlreg(df, m, Poisson(), LogLink() , start = [0.2;0.2] )
# result = Alpaca.feglm(df, Alpaca.@formula(y ~ x1 + x2),
# Poisson(),
# fe =:(id1 + id2)
# )
@test coef(x) โ [ 2.987722385633501; 2.0056217356569155] atol = 1e-4
# Separation: based on Sergio Correia's example (https://github.com/sergiocorreia/ppmlhdfe/blob/master/guides/separation_primer.md) but with logit (easier to generate)
rng = StableRNG(1234)
df_sep = DataFrame(y = [[0.0, 0.0, 0.0, 1.0, 1.0, 1.0];rand(rng,[0.0,1.0],500)], x1 = [[1, 1, 0, 0, 0, 0];zeros(Float64,500)], x = collect(1.0:(6.0+500.0)))
m = @formula y ~ x + fe(x1)
try
# this should fail
local x = nlreg(df_sep, m, Binomial(), LogitLink() , start = [0.1], separation = Symbol[], separation_mu_lbound=1e-10, separation_mu_ubound=1.0-1e-10, verbose=true, rho_tol=1e-12 )
catch ex
@test !isnothing(ex)
end
# with cutoff on mu, it converges
try
# this should pass
local x = nlreg(df_sep, m, Binomial(), LogitLink() , start = [0.1], separation = [:mu], separation_mu_lbound=1e-10, separation_mu_ubound=1.0-1e-10, verbose=true, rho_tol=1e-12 )
@test x.coef โ [-0.0005504145168443688] atol = 1e-4
catch ex
@test isnothing(ex)
end
| GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 337 | using GLFixedEffectModels
tests = ["nlreg.jl",
"biascorr_test.jl",
"separation.jl"]
println("Running tests:")
for test in tests
try
include(test)
println("\t\033[1m\033[32mPASSED\033[0m: $(test)")
catch e
println("\t\033[1m\033[31mFAILED\033[0m: $(test)")
showerror(stdout, e, backtrace())
rethrow(e)
end
end
| GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | code | 9151 | using DataFrames
# include("../src/GLFixedEffectModels.jl")
using GLFixedEffectModels
using Test
using Downloads, CSV, Random
Random.seed!(1234)
# test1 for collinearity
df = DataFrame(y = rand(6), x1 = [1;0.5;0.8;1;0;0], x2 = [0;0.5;0.2;0;0;0], id = [1;1;1;1;2;2])
# y ~ x1 + x2 + fe(id), will drop x2
res1 = nlreg(df, @formula(y ~ x1 + x2 + fe(id)), Poisson(), LogLink())
@test 0 โ res1.coef
#@show res1
# y ~ x1 + fe(id)
res2 = nlreg(df, @formula(y ~ x2 + fe(id)), Poisson(), LogLink())
@test 0 โ res2.coef
#@show res2
# ---------------------------------------------------------------------------------------------------------------- #
# test2 for ReLU separation
df = DataFrame(y = [0.0;1;0;0;5], x = [0;0;0;0;0], id1 = [1;1;2;2;2], id2 = [1;1;1;2;2])
res1 = nlreg(df, @formula(y ~ x + fe(id1) + fe(id2)), Poisson(), LogLink() ; separation = [:ReLU])
@test res1.nobs == 4
# test3 for FE separation
df = DataFrame(y = [0.0;0;0;1;2;3], id = [1;1;2;2;3;3])
res1 = nlreg(df, @formula(y ~ fe(id)), Poisson(), LogLink() ; separation = [:fe])
@test res1.nobs == 4
# ---------------------------------------------------------------------------------------------------------------- #
# benchmark from sergio correia's ppmlhdfe repo
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/guides/csv/example1.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
# add one fixed effect that is basically a intercept, because nlreg won't run without fe
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + x2 + x3 + x4 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/guides/csv/example2.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
# add one fixed effect that is basically a intercept, because nlreg won't run without fe
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + x2 + x3 + x4 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/guides/csv/fe1.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
res1 = nlreg(df, @formula(y ~ x1 + x2 + fe(i) + fe(j)), Poisson(), LogLink() ; separation = [:ReLU])
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/guides/csv/fe2.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
res1 = nlreg(df, @formula(y ~ x1 + fe(i) + fe(j)), Poisson(), LogLink() ; separation = [:ReLU])
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/guides/csv/fe3.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
res1 = nlreg(df, @formula(y ~ x1 + fe(i) + fe(j)), Poisson(), LogLink() ; separation = [:ReLU])
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/01.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
res1 = nlreg(df, @formula(y ~ x1 + x2 + fe(id1) + fe(id2)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/02.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
res1 = nlreg(df, @formula(y ~ fe(id1) + fe(id2)), Poisson(), LogLink() ; drop_singletons = false, separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/03.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
res1 = nlreg(df, @formula(y ~ fe(id1) + fe(id2) + fe(id3)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/04.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
res1 = nlreg(df, @formula(y ~ fe(id1) + fe(id2)), Poisson(), LogLink() ; separation = [:ReLU])
# don't test on the last ob because it was a singleton instead of a separation
@test all(df.separated[1:end-1] .== .~res1.esample[1:end-1])
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/05.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
# add one fixed effect that is basically a intercept, because nlreg won't run without fe
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + x2 + x3 + x4 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/06.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
# add one fixed effect that is basically a intercept, because nlreg won't run without fe
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + x2 + x3 + x4 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
#= something wrong with this one
i think the reason is that ppml calls :simplex before :ReLU.
ppmlhdfe's output is:
(simplex method dropped 4 separated observations)
(dropped 1 singleton observations)
something was dropped before calling ReLU.
when setting ppmlhdfe sep as sep(ir), the output is:
(ReLU method dropped 1 separated observation in 2 iterations)
and the output gives ill results too.
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/07.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
res1 = nlreg(df, @formula(y ~ x1 + x2 + fe(id1) + fe(id2)), Poisson(), LogLink() ; drop_singletons = false, separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
=#
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/08.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
res1 = nlreg(df, @formula(y ~ x1 + x2 + fe(id1) + fe(id2)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/09.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
# add one fixed effect that is basically a intercept, because nlreg won't run without fe
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + x2 + x3 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/10.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + x2 + x3 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/11.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/12.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
res1 = nlreg(df, @formula(y ~ fe(id1) + fe(id2)), Poisson(), LogLink() ; drop_singletons = false , separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/13.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
res1 = nlreg(df, @formula(y ~ fe(id1) + fe(id2)), Poisson(), LogLink() ; drop_singletons = false , separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/14.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + x2 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/15.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + x2 + x3 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/16.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + x2 + x3 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/17.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + x2 + x3 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample)
url = "https://raw.githubusercontent.com/sergiocorreia/ppmlhdfe/master/test/separation_datasets/18.csv"
df = DataFrame(CSV.File(Downloads.download(url)))
df.id = ones(size(df,1))
res1 = nlreg(df, @formula(y ~ x1 + x2 + x3 + fe(id)), Poisson(), LogLink() ; separation = [:ReLU])
@test all(df.separated .== .~res1.esample) | GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 0.5.4 | 0ed56184e260d9037708eaad246727bab0169332 | docs | 8633 | # GLFixedEffectModels.jl
<!--




 -->
 [](http://codecov.io/github/jmboehm/GLFixedEffectModels.jl?branch=master) [](https://zenodo.org/badge/latestdoi/164128032)
This package estimates generalized linear models with high dimensional categorical variables. It builds on Matthieu Gomez's [FixedEffects.jl](https://github.com/FixedEffects/FixedEffects.jl), Amrei Stammann's [Alpaca](https://github.com/amrei-stammann/alpaca), and Sergio Correia's [ppmlhdfe](https://github.com/sergiocorreia/ppmlhdfe).
## Installation
```
] add GLFixedEffectModels
```
## Example use
```julia
using GLFixedEffectModels, GLM, Distributions
using RDatasets
df = dataset("datasets", "iris")
df.binary = zeros(Float64, size(df,1))
df[df.SepalLength .> 5.0,:binary] .= 1.0
df.SpeciesStr = string.(df.Species)
idx = rand(1:3,size(df,1),1)
a = ["A","B","C"]
df.Random = vec([a[i] for i in idx])
m = @formula binary ~ SepalWidth + fe(Species)
x = nlreg(df, m, Binomial(), LogitLink(), start = [0.2] )
m = @formula binary ~ SepalWidth + PetalLength + fe(Species)
nlreg(df, m, Binomial(), LogitLink(), Vcov.cluster(:SpeciesStr,:Random) , start = [0.2, 0.2] )
```
## Documentation
The main function is `nlreg()`, which returns a `GLFixedEffectModel <: RegressionModel`.
```julia
nlreg(df, formula::FormulaTerm,
distribution::Distribution,
link::GLM.Link,
vcov::CovarianceEstimator; ...)
```
The required arguments are:
* `df`: a Table
* `formula`: A formula created using `@formula`.
* `distribution`: A `Distribution`. See the documentation of [GLM.jl](https://juliastats.org/GLM.jl/stable/manual/#Fitting-GLM-models-1) for valid distributions.
* `link`: A `GLM.Link` function. See the documentation of [GLM.jl](https://juliastats.org/GLM.jl/stable/manual/#Fitting-GLM-models-1) for valid link functions.
* `vcov`: A `CovarianceEstimator` to compute the variance-covariance matrix.
The optional arguments are:
* `save::Union{Bool, Symbol} = false`: Should residuals and eventual estimated fixed effects saved in a dataframe? Use `save = :residuals` to only save residuals. Use `save = :fe` to only save fixed effects.
* `method::Symbol`: A symbol for the method. Default is `:cpu`. Alternatively, `:gpu` requires `CuArrays`. In this case, use the option `double_precision = false` to use `Float32`. This option is the same as for the [FixedEffectModels.jl](https://github.com/FixedEffects/FixedEffectModels.jl) package.
* `double_precision::Bool = true`: Uses 64-bit floats if `true`, otherwise 32-bit.
* `drop_singletons = true` : drop observations that are perfectly classified.
* `contrasts::Dict = Dict()` An optional Dict of contrast codings for each categorical variable in the `formula`. Any unspecified variables will have `DummyCoding`.
* `maxiter::Integer = 1000`: Maximum number of iterations in the Newton-Raphson routine.
* `maxiter_center::Integer = 10000`: Maximum number of iterations for centering procedure.
* `double_precision::Bool`: Should the demeaning operation use Float64 rather than Float32? Default to true.
* `dev_tol::Real` : Tolerance level for the first stopping condition of the maximization routine.
* `rho_tol::Real` : Tolerance level for the stephalving in the maximization routine.
* `step_tol::Real` : Tolerance level that accounts for rounding errors inside the stephalving routine
* `center_tol::Real` : Tolerance level for the stopping condition of the centering algorithm. Default to 1e-8 if `double_precision = true`, 1e-6 otherwise.
* `separation::Vector{Symbol} = Symbol[]` : Method to detect/deal with [separation](https://github.com/sergiocorreia/ppmlhdfe/blob/master/guides/separation_primer.md). Supported elements are `:mu`, `:fe`, `:ReLU`, and in the future, `:simplex`. `:mu` truncates mu at `separation_mu_lbound` or `separation_mu_ubound`. `:fe` finds categories of the fixed effects that only exist when y is at the separation point. `ReLU` detects separation using ReLU, with the maxiter being `separation_ReLU_maxiter` and tolerance being `separation_ReLU_tol`.
* `separation_mu_lbound::Real = -Inf` : Lower bound for the separation detection/correction heuristic (on mu). What a reasonable value would be depends on the model that you're trying to fit.
* `separation_mu_ubound::Real = Inf` : Upper bound for the separation detection/correction heuristic.
* `separation_ReLU_tol::Real = 1e-4` : Tolerance level for the ReLU algorithm.
* `separation_ReLU_maxiter::Integer = 1000` : Maximal number of iterations for the ReLU algorithm.
* `verbose::Bool = false` : If `true`, prints output on each iteration.
The function returns a `GLFixedEffectModel` object which supports the `StatsBase.RegressionModel` abstraction. It can be displayed in table form by using [RegressionTables.jl](https://github.com/jmboehm/RegressionTables.jl).
## Bias correction methods
The package experimentally supports bias correction methods for the following models:
- Binomial regression, Logit link, Two-way, Classic (Fernรกndez-Val and Weidner (2016, 2018))
- Binomial regression, Probit link, Two-way, Classic (Fernรกndez-Val and Weidner (2016, 2018))
- Binomial regression, Logit link, Two-way, Network (Hinz, Stammann and Wanner (2020) & Fernรกndez-Val and Weidner (2016))
- Binomial regression, Probit link, Two-way, Network (Hinz, Stammann and Wanner (2020) & Fernรกndez-Val and Weidner (2016))
- Binomial regression, Logit link, Three-way, Network (Hinz, Stammann and Wanner (2020))
- Binomial regression, Probit link, Three-way, Network (Hinz, Stammann and Wanner (2020))
- Poisson regression, Log link, Three-way, Network (Weidner and Zylkin (2021))
- Poisson regression, Log link, Two-way, Network (Weidner and Zylkin (2021))
## Things that still need to be implemented
- Better default starting values
- Weights
- Better StatsBase interface & prediction
- Better benchmarking
## Related Julia packages
- [FixedEffectModels.jl](https://github.com/FixedEffects/FixedEffectModels.jl) estimates linear models with high dimensional categorical variables (and with or without endogeneous regressors).
- [FixedEffects.jl](https://github.com/FixedEffects/FixedEffects.jl) is a package for fast pseudo-demeaning operations using LSMR. Both this package and [FixedEffectModels.jl](https://github.com/FixedEffects/FixedEffectModels.jl) build on this.
- [Alpaca.jl](https://github.com/jmboehm/Alpaca.jl) is a wrapper to the [Alpaca R package](https://github.com/amrei-stammann/alpaca), which solves the same tasks as this package.
- [GLM.jl](https://github.com/JuliaStats/GLM.jl) estimates generalized linear models, but without explicit support for categorical regressors.
- [Econometrics.jl](https://github.com/Nosferican/Econometrics.jl) provides routines to estimate multinomial logit and other models.
- [RegressionTables.jl](https://github.com/jmboehm/RegressionTables.jl) supports pretty printing of results from this package.
## References
Correia, S. and Guimarรฃes, P, and Zylkin, T., 2019. Verifying the existence of maximum likelihood estimates for generalized linear models. Working paper, https://arxiv.org/abs/1903.01633
Fernรกndez-Val, I. and Weidner, M., 2016. Individual and time effects in nonlinear panel models with large N, T. Journal of Econometrics, 192(1), pp.291-312.
Fernรกndez-Val, I. and Weidner, M., 2018. Fixed effects estimation of large-T panel data models. Annual Review of Economics, 10, pp.109-138.
Fong, DC. and Saunders, M. (2011) *LSMR: An Iterative Algorithm for Sparse Least-Squares Problems*. SIAM Journal on Scientific Computing
Hinz, J., Stammann, A. and Wanner, J., 2021. State dependence and unobserved heterogeneity in the extensive margin of trade.
Stammann, A. (2018) *Fast and Feasible Estimation of Generalized Linear Models with High-Dimensional k-way Fixed Effects*. Mimeo, Heinrich-Heine University Dรผsseldorf
Weidner, M. and Zylkin, T., 2021. Bias and consistency in three-way gravity models. Journal of International Economics, 132, p.103513.
| GLFixedEffectModels | https://github.com/jmboehm/GLFixedEffectModels.jl.git |
|
[
"MIT"
] | 1.1.0 | 5f5ca27f3ca25e4f7f9d6f7d1052961e923ad61a | code | 687 | using ManifestUtilities
using Documenter
DocMeta.setdocmeta!(ManifestUtilities, :DocTestSetup, :(using ManifestUtilities); recursive=true)
makedocs(;
modules=[ManifestUtilities],
authors="Dilum Aluthge and contributors",
repo="https://github.com/bcbi/ManifestUtilities.jl/blob/{commit}{path}#{line}",
sitename="ManifestUtilities.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://bcbi.github.io/ManifestUtilities.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
strict=true,
)
deploydocs(;
repo="github.com/bcbi/ManifestUtilities.jl",
devbranch="main",
)
| ManifestUtilities | https://github.com/bcbi/ManifestUtilities.jl.git |
|
[
"MIT"
] | 1.1.0 | 5f5ca27f3ca25e4f7f9d6f7d1052961e923ad61a | code | 193 | module ManifestUtilities
import LightGraphs
import MetaGraphs
import Pkg
import TOML
export prune_manifest
include("types.jl")
include("pkg.jl")
include("prune_manifest.jl")
end # module
| ManifestUtilities | https://github.com/bcbi/ManifestUtilities.jl.git |
|
[
"MIT"
] | 1.1.0 | 5f5ca27f3ca25e4f7f9d6f7d1052961e923ad61a | code | 471 | @static if Base.VERSION >= v"1.7-"
const write_manifest = Pkg.Types.write_manifest
else
function write_manifest(io::IO, manifest::Dict)
print(io, "# This file is machine-generated - editing it directly is not advised\n\n")
TOML.print(io, manifest, sorted=true) do x
x isa UUID || x isa SHA1 || x isa VersionNumber || pkgerror("unhandled type `$(typeof(x))`")
return string(x)
end
return nothing
end
end
| ManifestUtilities | https://github.com/bcbi/ManifestUtilities.jl.git |
|
[
"MIT"
] | 1.1.0 | 5f5ca27f3ca25e4f7f9d6f7d1052961e923ad61a | code | 5779 | const _prune_manifest_kwargs_docstring = """
## Required Keyword Arguments
You must specify one (and exactly one) of `project` and `project_filename`.
Similarly, you must specify one (and exactly one) of `manifest` and
`manifest_filename`.
- `project::Union{AbstractString, IO}`: the contents of the input `Project.toml` file
- `project_filename::AbstractString`: the filename of the input `Project.toml` file
- `manifest::Union{AbstractString, IO}`: the contents of the input `Manifest.toml` file
- `manifest_filename::AbstractString`: the filename of the input `Manifest.toml` file
"""
"""
prune_manifest(; kwargs...) --> new_manifest::AbstractString
Parse the given project and manifest, and generate a new manifest that only
includes packages that are direct or indirect (recursive) dependencies of the
given project. The new manifest is returned as an `AbstractString`.
$(_prune_manifest_kwargs_docstring)
"""
function prune_manifest(; kwargs...)
return sprint(io -> prune_manifest(io; kwargs...))
end
"""
prune_manifest(io::IO; kwargs...)
Parse the given project and manifest, and generate a new manifest that only
includes packages that are direct or indirect (recursive) dependencies of the
given project. The new manifest is printed to the given `IO`.
$(_prune_manifest_kwargs_docstring)
"""
function prune_manifest(io::IO;
project::Union{AbstractString, IO, Nothing} = nothing,
project_filename::Union{AbstractString, Nothing} = nothing,
manifest::Union{AbstractString, IO, Nothing} = nothing,
manifest_filename::Union{AbstractString, Nothing} = nothing)
if (project !== nothing) && (project_filename !== nothing)
throw(ArgumentError("You may not specify both `project` or `project_filename`; you must only specify one"))
elseif (project !== nothing) && (project_filename === nothing)
project_dict = TOML.parse(project)
elseif (project === nothing) && (project_filename !== nothing)
project_dict = TOML.parsefile(project_filename)
else
throw(ArgumentError("You must specify either `project` or `project_filename`"))
end
if (manifest !== nothing) && (manifest_filename !== nothing)
throw(ArgumentError("You may not specify both `manifest` or `manifest_filename`; you must only specify one"))
elseif (manifest !== nothing) && (manifest_filename === nothing)
manifest_dict = TOML.parse(manifest)
elseif (manifest === nothing) && (manifest_filename !== nothing)
manifest_dict = TOML.parsefile(manifest_filename)
else
throw(ArgumentError("You must specify either `manifest` or `manifest_filename`"))
end
project_struct = ProjectTOMLDict(project_dict)
manifest_struct = ManifestTOMLDict(manifest_dict)
return prune_manifest(io, project_struct, manifest_struct)
end
function prune_manifest(io::IO,
project::ProjectTOMLDict,
manifest::ManifestTOMLDict)
name_to_uuid = Dict{String, Base.UUID}()
for (name, infos_vec) in pairs(manifest.manifest)
info = only(infos_vec)
uuid_string = info["uuid"]
uuid = Base.UUID(uuid_string)
condition = !haskey(name_to_uuid, name)
msg = "duplicate package with name `$(name)`"
condition || throw(ErrorException(msg))
name_to_uuid[name] = uuid
end
for (name, uuid_string_from_project) in pairs(project.project["deps"])
uuid_from_project = Base.UUID(uuid_string_from_project)
condition_1 = haskey(name_to_uuid, name)
msg_1 = "Manifest does not have a dep with name $(name)"
condition_1 || throw(ErrorException(msg_1))
uuid_from_manifest = name_to_uuid[name]
condition_2 = uuid_from_project == uuid_from_manifest
msg_2 = "For dep $(name), project UUID $(uuid_from_project) does not match manifest UUID $(uuid_from_manifest)"
condition_2 || throw(ErrorException(msg_2))
end
graph = MetaGraphs.MetaDiGraph()
MetaGraphs.set_indexing_prop!(graph, :uuid)
for uuid in values(name_to_uuid)
MetaGraphs.add_vertex!(graph, :uuid, uuid)
end
for (name, infos_vec) in pairs(manifest.manifest)
info = only(infos_vec)
uuid_string = info["uuid"]
uuid = Base.UUID(uuid_string)
deps_names = get(info, "deps", String[])
for dep_name in deps_names
dep_uuid = name_to_uuid[dep_name]
a = graph[uuid, :uuid]
b = graph[dep_uuid, :uuid]
MetaGraphs.add_edge!(
graph,
graph[uuid, :uuid],
graph[dep_uuid, :uuid],
)
end
end
project_recursive_dependencies_uuids = Set{Base.UUID}()
for (name, uuid_string_from_project) in pairs(project.project["deps"])
uuid_from_project = Base.UUID(uuid_string_from_project)
out_pars_indices = MetaGraphs.bfs_parents(graph, graph[uuid_from_project, :uuid]; dir = :out)
nonzero_out_pars_indices = findall(x -> x != 0, out_pars_indices)
nonzero_out_pars_uuids = getindex.(Ref(graph), nonzero_out_pars_indices, :uuid)
for nonzero_out_par_uuid in nonzero_out_pars_uuids
push!(project_recursive_dependencies_uuids, nonzero_out_par_uuid)
end
end
output_manifest_dict = Dict{String, Any}()
for (name, infos_vec) in pairs(manifest.manifest)
info = only(infos_vec)
uuid_string = info["uuid"]
uuid = Base.UUID(uuid_string)
if uuid in project_recursive_dependencies_uuids
output_manifest_dict[name] = infos_vec
end
end
write_manifest(io, output_manifest_dict)
return nothing
end
| ManifestUtilities | https://github.com/bcbi/ManifestUtilities.jl.git |
|
[
"MIT"
] | 1.1.0 | 5f5ca27f3ca25e4f7f9d6f7d1052961e923ad61a | code | 174 | struct ProjectTOMLDict{T <: AbstractDict}
project::T
end
struct ManifestTOMLDict{T <: AbstractDict}
manifest::T
end
@enum ManifestFormatVersion manifest_format_1_0
| ManifestUtilities | https://github.com/bcbi/ManifestUtilities.jl.git |
|
[
"MIT"
] | 1.1.0 | 5f5ca27f3ca25e4f7f9d6f7d1052961e923ad61a | code | 2177 | using ManifestUtilities
using Test
@testset "manifest_format_1_0" begin
format_dir = joinpath(@__DIR__, "manifest_format_1_0")
@testset "test_1" begin
test_dir = joinpath(format_dir, "test_1")
project_filename = joinpath(test_dir, "Project_in.toml")
manifest_filename = joinpath(test_dir, "Manifest_in.toml")
project = read(project_filename, String)
manifest = read(manifest_filename, String)
kwargs_list = [
(;
project,
manifest,
),
(;
project = IOBuffer(project),
manifest = IOBuffer(manifest),
),
(;
project_filename,
manifest_filename,
),
]
for kwargs in kwargs_list
x_1_a = prune_manifest(; deepcopy(kwargs)...)
x_1_b = sprint(io -> prune_manifest(io; deepcopy(kwargs)...))
@test strip(x_1_a) == strip(x_1_b)
x_2 = read(joinpath(test_dir, "Manifest_out_correct.toml"), String)
x_3 = read(joinpath(test_dir, "Manifest_out_incorrect.toml"), String)
for x_1 in [x_1_a, x_1_b]
@test strip(x_1) == strip(x_2)
@test strip(x_2) != strip(x_3)
end
end
end
end
@testset "errors" begin
format_dir = joinpath(@__DIR__, "manifest_format_1_0")
test_dir = joinpath(format_dir, "test_1")
project_filename = joinpath(test_dir, "Project_in.toml")
manifest_filename = joinpath(test_dir, "Manifest_in.toml")
project = read(project_filename, String)
manifest = read(manifest_filename, String)
kwargs_and_exceptions = [
(;) => ArgumentError,
(;
project,
) => ArgumentError,
(;
project,
project_filename,
) => ArgumentError,
(;
manifest,
manifest_filename,
) => ArgumentError,
]
for (kwargs, ex) in kwargs_and_exceptions
@test_throws ex prune_manifest(; deepcopy(kwargs)...)
@test_throws ex sprint(io -> prune_manifest(io; deepcopy(kwargs)...))
end
end
| ManifestUtilities | https://github.com/bcbi/ManifestUtilities.jl.git |
|
[
"MIT"
] | 1.1.0 | 5f5ca27f3ca25e4f7f9d6f7d1052961e923ad61a | docs | 525 | # ManifestUtilities
[](https://bcbi.github.io/ManifestUtilities.jl/stable)
[](https://bcbi.github.io/ManifestUtilities.jl/dev)
[](https://github.com/bcbi/ManifestUtilities.jl/actions)
[](https://codecov.io/gh/bcbi/ManifestUtilities.jl)
| ManifestUtilities | https://github.com/bcbi/ManifestUtilities.jl.git |
|
[
"MIT"
] | 1.1.0 | 5f5ca27f3ca25e4f7f9d6f7d1052961e923ad61a | docs | 247 | ```@meta
CurrentModule = ManifestUtilities
```
# ManifestUtilities
Documentation for [ManifestUtilities](https://github.com/bcbi/ManifestUtilities.jl).
```@index
```
```@autodocs
Modules = [ManifestUtilities]
Public = true
Private = false
```
| ManifestUtilities | https://github.com/bcbi/ManifestUtilities.jl.git |
|
[
"MIT"
] | 0.1.1 | 6189369b2b3870d762c714f02a62b7097f2adcb3 | code | 970 | module MajoranaReps_MathLink
if !@isdefined(TabLevel)
TabLevel = ""
end
println(TabLevel*"Open MajoranaReps_MathLink.jl")
TabLevel=TabLevel*" "
using MathLink
using MajoranaReps
import Base.*
*(x::MathLink.WTypes,y::MajoranaReps.MajTypes) = OpScale(x,y)
*(x::MajoranaReps.MajTypes,y::MathLink.WTypes) = OpScale(y,x)
function ScaleToStr(scale::MathLink.WTypes,tex::Bool,KetState::Bool,NumOps::Int,FirstTerm::Bool)
if FirstTerm
return "$(scale)*"
else
return "+ $(scale)*"
end
end
import Base.isreal
isreal(::MathLink.WTypes) = false ##Do not assume real
import Base.length
length(::MathLink.WTypes) = 1 ##We treat them as scalars
import Base.iterate
iterate(x::MathLink.WTypes) = (x, nothing)
function iterate(x::MathLink.WTypes, state)
if state == nothing
return nothing
else
error("state = $state")
end
end
TabLevel=TabLevel[1:end-4]
println(TabLevel*"Close MajoranaReps_MathLink.jl")
end
| MajoranaReps_MathLink | https://github.com/fremling/MajoranaReps_MathLink.jl.git |
|
[
"MIT"
] | 0.1.1 | 6189369b2b3870d762c714f02a62b7097f2adcb3 | code | 988 | #include("../src/MajoranaReps.jl")
using MajoranaReps
using MathLink
using MajoranaReps_MathLink
using Test
MathLink.set_GreedyEval(true)
@testset "Majorana Operators and MathLink Expressions" begin
@test W"a" * Ket() == Ket() * W"a"
@test W"a" * c(1) == c(1) * W"a"
@test (W"a" * c(1) + W"b" * c(2)) == ( W"b" * c(2) + W"a" * c(1))
@test W"a"*bx(1) == W"a"*bx(1)
@test W"a"*W"b"*bx(1) == W`a b`*bx(1)
@test W"a"*bx(1)*Ket() == W"a"*bx(1)*Ket()
@test W"a"*W"b"*bx(1)*Ket() == W`a b`*bx(1)*Ket()
@test "$(W"a"*Ket())" == "(a) |0>"
@test "$((W"a"+W"a")*Ket())" == "(W\"Times\"(2, W\"a\")) |0>"
@test "$((W"a"+W"b")*Ket())" == "(W\"Plus\"(W\"a\", W\"b\")) |0>"
@test length(W"a") == 1
@test length(W"a"+W"b") == 1
@test collect(W"a") == Any[W"a"]
@test collect(W"a"+W"b") == Any[W"a"+W"b"]
@test W"a" .* fill(1) == [W"a"]
@test W"a" .* fill(1,1) == [W"a"]
@test W"a" .* fill(1,2) == [W"a",W"a"]
end
| MajoranaReps_MathLink | https://github.com/fremling/MajoranaReps_MathLink.jl.git |
|
[
"MIT"
] | 0.1.1 | 6189369b2b3870d762c714f02a62b7097f2adcb3 | docs | 1576 | This README gives a brief introduction to the MajoranaReps package.
## The Majoranas ##
For the time being there are only 4 types of majoranas `bx`, `by`, `bz` and `c`,
since the package was developed with the Kitaev Honeycomb Model in mind.
The vacuums is thus organised such that `by(j)|0> == -i*bx(j)|0>`, `bz(j)|0> == -i*c(j)|0>`. There transforamtions are applied automatically.
This both of these lines will evaluate to true
im*bx(1)*Ket() == -by(1)*Ket()
im*c(1)*Ket() == -bz(1)*Ket()
## Inner products ##
The package supports inner products through the constructions
IP = OpInnerProd(State1,State2)
IP = OpInnerProd(State1,Operator,State2)
The inner product automatically handles vector quantities. For instance, if one defines two different bases
Basis1 = [ bx(1)*Ket(), bx(2)*Ket(), bx(3)*Ket(),bx(1)*bx(2)*bx(3)*Ket()]
Basis2 = [ by(1)*Ket(), by(2)*Ket(), by(3)*Ket(),by(1)*by(2)*by(3)*Ket()]
and then apply `OpInnerProd(Basis1,Basis2)` one will as a result get the 4x4 array
4ร4 Array{Complex{Int64},2}:
0-1im 0+0im 0+0im 0+0im
0+0im 0-1im 0+0im 0+0im
0+0im 0+0im 0-1im 0+0im
0+0im 0+0im 0+0im 0+1im
## MathLink ##
The package also supports the use of [MathLink](https://github.com/JuliaInterop/MathLink.jl) and [MathLinkExtras](https://github.com/fremling/MathLinkExtras.jl) syntax for algebraic prefactors.
This allows for constructions like `W"a"*bx(1)` or ```W`-I b`*by(1)```
NB: `MathLink` needs to be loaded before invoking `MajoranaReps` the first time.
# MajoranaReps_MathLink.jl
| MajoranaReps_MathLink | https://github.com/fremling/MajoranaReps_MathLink.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 1130 | using Documenter, MultivariateAnomalies
makedocs(
modules = [MultivariateAnomalies],
clean = false,
format = :html,
sitename = "MultivariateAnomalies.jl",
pages = Any[ # Compat: `Any` for 0.4 compat
"Home" => "index.md",
"Manual" => Any[
"High Level Functions" => "man/HighLevelFunctions.md",
"Anomaly Detection Algorithms" => "man/DetectionAlgorithms.md",
"Distance and Densities" => "man/DistancesDensity.md",
"Postprocessing" => "man/Postprocessing.md",
"Preprocessing" => "man/Preprocessing.md",
"AUC" => "man/AUC.md",
"OnlineAlgorithms" => "man/OnlineAlgorithms.md"
]
]
)
deploydocs(
repo = "github.com/milanflach/MultivariateAnomalies.jl.git",
#julia = "0.7",
#deps = nothing,
#make = nothing,
target = "build"
)
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 1512 | #using Base.Cartesian
"""
auc(scores, events, increasing = true)
compute the Area Under the receiver operator Curve (AUC), given some output `scores` array and some ground truth (`events`).
By default, it is assumed, that the `scores` are ordered increasingly (`increasing = true`), i.e. high scores represent events.
AUC is computed according to Fawcett, T. (2006). An introduction to ROC analysis. Pattern Recognition Letters, 27(8), 861โ874. http://doi.org/10.1016/j.patrec.2005.10.010
# Examples
```
julia> scores = rand(10, 2)
julia> events = rand(0:1, 10, 2)
julia> auc(scores, events)
julia> auc(scores, boolevents(events))
```
"""
function auc(scores,events; increasing::Bool = true)
s = sortperm(reshape(scores,length(scores)),rev=increasing);
length(scores) == length(events) || error("Scores and events must have same number of elements")
f=scores[s]
L=events[s]
fp=0
tp=0
fpprev=0
tpprev=0
A=0.0
fprev=-Inf
P=sum(L)
N=length(L)-P
for i=1:length(L)
if f[i]!=fprev
A+=trap_area(fp,fpprev,tp,tpprev)
@inbounds fprev=f[i]
fpprev=fp
tpprev=tp
end
if isextreme(L[i])
tp+=1
else
fp+=1
end
end
A+=trap_area(N,fpprev,P,tpprev)
A=A/(P*N)
end
function trap_area(x1,x2,y1,y2)
b=abs(x1-x2)
h=0.5*(y1+y2)
return b*h
end
isextreme(l::Bool)=l
isextreme(l::Integer)=l>0
###################################
# end
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 20574 | using LIBSVM
# function input D is a distance matrix, K is a Kernel matrix
# count number of recurrences per time point i
"""
REC(D::AbstractArray, rec_threshold::Float64, temp_excl::Int = 0)
Count the number of observations (recurrences) which fall into a radius `rec_threshold` of a distance matrix `D`. Exclude steps which are closer than `temp_excl` to be count as recurrences (default: `temp_excl = 5`)
Marwan, N., Carmen Romano, M., Thiel, M., & Kurths, J. (2007). Recurrence plots for the analysis of complex systems. Physics Reports, 438(5-6), 237โ329. http://doi.org/10.1016/j.physrep.2006.11.001
"""
function REC(D::AbstractArray, rec_threshold::Float64, temp_excl::Int = 0)
rec_out = init_REC(D)
REC!(rec_out, D, rec_threshold, temp_excl)
return(rec_out)
end
"""
init_REC(D::Array{Float64, 2})
init_REC(T::Int)
get object for memory efficient `REC!()` versions. Input can be a distance matrix `D` or the number of timesteps (observations) `T`.
"""
function init_REC(T::Int)
rec_out = zeros(Float64, T)
end
function init_REC(D::Array{Float64, 2})
rec_out = zeros(Float64, size(D, 1))
end
"""
REC!(rec_out::AbstractArray, D::AbstractArray, rec_threshold::Float64, temp_excl::Int = 0)
Memory efficient version of `REC()` for use within a loop. `rec_out` is preallocated output, should be initialised with `init_REC()`.
"""
function REC!(rec_out::AbstractArray, D::AbstractArray, rec_threshold::Float64, temp_excl::Int = 0)
N = size(D, 1)
@assert temp_excl < N - 1
@inbounds for i = 1:N
rec_out[i] = 0.0
if(i-temp_excl-1 >= 1)
for j = 1:(i-temp_excl-1)
if D[i, j] < rec_threshold && i != j
rec_out[i] = rec_out[i] + 1
end
end
end
if(i+temp_excl+1 <= N)
for j = (i+temp_excl+1):N
if D[i, j] < rec_threshold && i != j
rec_out[i] = rec_out[i] + 1
end
end
end
end
broadcast!(/, rec_out, rec_out, N)
broadcast!(-, rec_out, 1.0, rec_out)
return(rec_out)
end
"""
KDE(K)
Compute a Kernel Density Estimation (the Parzen sum), given a Kernel matrix `K`.
Parzen, E. (1962). On Estimation of a Probability Density Function and Mode. The Annals of Mathematical Statistics, 33, 1โ1065โ1076.
"""
function KDE(K::AbstractArray)
KDEout = init_KDE(size(K, 1))
KDE!(KDEout, K)
return(KDEout)
end
"""
init_KDE(K::Array{Float64, 2})
init_KDE(T::Int)
Returns `KDE_out` object for usage in `KDE!()`. Use either a Kernel matrix `K` or the number of time steps/observations `T` as argument.
"""
function init_KDE(T::Int)
KDE_out = zeros(Float64, T)
end
function init_KDE(K::Array{Float64, 2})
KDE_out = zeros(Float64, size(K, 1))
end
"""
KDE!(KDE_out, K)
Memory efficient version of `KDE()`. Additionally uses preallocated `KDE_out` object for writing the results. Initialize `KDE_out` with `init_KDE()`.
"""
function KDE!(KDE_out, K::AbstractArray)
@assert size(K, 1) == size(KDE_out, 1)
mean!(KDE_out, K)
broadcast!(-, KDE_out, 1.0, KDE_out)
return(KDE_out)
end
"""
init_T2(VAR::Int, T::Int)
init_T2{tp}(data::AbstractArray{tp,2})
initialize `t2_out` object for `T2!` either with number of variables `VAR` and observations/time steps `T` or with a two dimensional `data` matrix (time * variables)
"""
function init_T2(VAR::Int, T::Int)
diagS = zeros(Float64, VAR, VAR);
Qinv = zeros(Float64, VAR, VAR);
data_norm = zeros(Float64, T, VAR);
cdata = zeros(Float64, T, VAR);
maha = zeros(Float64, T);
t2_out = (maha, diagS, cdata, Qinv, data_norm)
return(t2_out)
end
function init_T2(data::AbstractArray{tp,N}) where {tp, N}
VAR = size(data, N)
T = size(data, 1)
diagS = zeros(tp, VAR, VAR);
Qinv = zeros(tp, VAR, VAR);
data_norm = zeros(tp, T, VAR);
cdata = zeros(tp, T, VAR);
maha = zeros(tp, T);
t2_out = (maha, diagS, cdata, Qinv, data_norm)
return(t2_out)
end
"""
T2!(t2_out, data, Q[, mv])
Memory efficient version of `T2()`, for usage within a loop etc. Initialize the `t2_out` object with `init_T2()`.
`t2_out[1]` contains the squred Mahalanobis distance after computation.
"""
function T2!(t2_out::Tuple{Array{tp,1},Array{tp,2},Array{tp,2},Array{tp,2},Array{tp,2}}
, data::AbstractArray{tp,2}, Q::AbstractArray{tp,2}, mv = 0) where {tp}
(maha, diagS, cdata, Qinv, data_norm) = t2_out
if(mv == 0)
copyto!(cdata, data)
elseif(size(mv, 1) == 1)
copyto!(cdata, data .- mv)
elseif(size(mv, 1) != 1)
copyto!(cdata, data .- mv')
end
USVt = svd(Q)
copyto!(view(diagS, diagind(diagS)), (USVt.S .+ 1e-10) .^ (-0.5))
transpose!(Qinv, USVt.U * diagS * USVt.Vt)
copyto!(data_norm, cdata * Qinv)
broadcast!(*, data_norm, data_norm, data_norm)
sum!(maha, data_norm)
return(t2_out[1])
end
# Hotelling's T^2 (Mahalanobis distance to the data mean)
# input is time * var data matrix
"""
T2{tp}(data::AbstractArray{tp,2}, Q::AbstractArray[, mv])
Compute Hotelling's T2 control chart (the squared Mahalanobis distance to the data's mean vector (`mv`), given the covariance matrix `Q`).
Input data is a two dimensional data matrix (observations * variables).
Lowry, C. A., & Woodall, W. H. (1992). A Multivariate Exponentially Weighted Moving Average Control Chart. Technometrics, 34, 46โ53.
"""
function T2(data::AbstractArray{tp,2}, Q::AbstractArray{tp, 2}, mv = 0) where {tp}
t2_out = init_T2(data)
T2!(t2_out, data, Q, mv)
return(t2_out[1])
end
# mean of k nearest neighbor distances
"""
KNN_Gamma(knn_dists_out)
This function computes the mean distance of the K nearest neighbors given a `knn_dists_out` object from `knn_dists()` as input argument.
Harmeling, S., Dornhege, G., Tax, D., Meinecke, F., & Mรผller, K.-R. (2006). From outliers to prototypes: Ordering data. Neurocomputing, 69(13-15), 1608โ1618. http://doi.org/10.1016/j.neucom.2005.05.015
"""
function KNN_Gamma(knn_dists_out::Tuple{Int64,Array{Int64,1},Array{Float64,1},Array{Int64,2},Array{Float64,2}})
NNdists = knn_dists_out[5]
N = size(NNdists,1)
KNN_Gamma_out = zeros(Float64, N)
mean!(KNN_Gamma_out, NNdists)
return(KNN_Gamma_out)
end
#T: number of timesteps in the datacube
"""
init_KNN_Gamma(T::Int)
init_KNN_Gamma(knn_dists_out)
initialize a `KNN_Gamma_out` object for `KNN_Gamma!` either with `T`, the number of observations/time steps or with a `knn_dists_out` object.
"""
function init_KNN_Gamma(T::Int)
KNN_Gamma_out = zeros(Float64, T)
end
function init_KNN_Gamma(knn_dists_out::Tuple{Int64,Array{Int64,1},Array{Float64,1},Array{Int64,2},Array{Float64,2}})
KNN_Gamma_out = zeros(Float64, size(knn_dists_out[2], 1))
end
"""
KNN_Gamma!(KNN_Gamma_out, knn_dists_out)
Memory efficient version of `KNN_Gamma`, to be used in a loop. Initialize `KNN_Gamma_out` with `init_KNN_Gamma()`.
"""
function KNN_Gamma!(KNN_Gamma_out, knn_dists_out::Tuple{Int64,Array{Int64,1},Array{Float64,1},Array{Int64,2},Array{Float64,2}})
NNdists = knn_dists_out[5]
@assert size(NNdists,1) == size(KNN_Gamma_out,1) || error("input size KNN_Gamma_out and NNdists not equal")
N = size(NNdists,1)
mean!(KNN_Gamma_out, NNdists)
return(KNN_Gamma_out)
end
"""
init_KNN_Delta(T, VAR, k)
return a `KNN_Delta_out` object to be used for `KNN_Delta!`. Input: time steps/observations `T`, variables `VAR`, number of K nearest neighbors `k`.
"""
function init_KNN_Delta(T::Int, VAR::Int, k::Int)
r = Array{Float64}(undef, T)
x_i = Array{Float64}(undef, 1, VAR)
d_x = Array{Float64}(undef, k, VAR)
KNN_Delta_out = (r, x_i, d_x)
return(KNN_Delta_out)
end
function init_KNN_Delta(knn_dists_out::Tuple{Int64,Array{Int64,1},Array{Float64,1},Array{Int64,2},Array{Float64,2}}, VAR::Int)
T = size(knn_dists_out[2], 1)
K = knn_dists_out[1]
r = Array{Float64}(undef, T)
x_i = Array{Float64}(undef, 1, VAR)
d_x = Array{Float64}(undef, K, VAR)
KNN_Delta_out = (r, x_i, d_x)
return(KNN_Delta_out)
end
"""
KNN_Delta!(KNN_Delta_out, knn_dists_out, data)
Memory Efficient Version of `KNN_Delta()`. `KNN_Delta_out[1]` is the vector difference of the k-nearest neighbors.
"""
function KNN_Delta!(KNN_Delta_out::Tuple{Array{Float64,1},Array{Float64,2},Array{Float64,2}}
, knn_dists_out::Tuple{Int64,Array{Int64,1},Array{Float64,1},Array{Int64,2},Array{Float64,2}}
, data::AbstractArray{Float64,2})
(r, x_i, d_x) = KNN_Delta_out
indices = knn_dists_out[4]
K = size(indices, 2)
T = size(data,1) #dimensions
VAR = size(data, 2)
@assert size(x_i, 2) == size(d_x, 2) == VAR || error("size of d_X and x_i not size of size(datacube, 4)")
@assert size(r, 1) == T || error("size of r not of size(datacube ,1)")
dists = 0.0
for i = 1:T
dists = 0.0
inds=indices[i,:]
for k=1:length(inds), j=1:size(data,2)
d_x[k,j] = data[i,j] - data[inds[k],j]
end
sum!(x_i, d_x)
for j = 1:VAR
dists += (x_i[1,j] / K)^2
end
r[i] = sqrt.(dists)
end
return(r)
end
"""
KNN_Delta(knn_dists_out, data)
Compute Delta as vector difference of the k-nearest neighbors. Arguments are a `knn_dists()` object (`knn_dists_out`) and a `data` matrix (observations * variables)
Harmeling, S., Dornhege, G., Tax, D., Meinecke, F., & Mรผller, K.-R. (2006). From outliers to prototypes: Ordering data. Neurocomputing, 69(13-15), 1608โ1618. http://doi.org/10.1016/j.neucom.2005.05.015
"""
function KNN_Delta(knn_dists_out::Tuple{Int64,Array{Int64,1},Array{Float64,1},Array{Int64,2},Array{Float64,2}}
, data::AbstractArray{Float64,2})
KNN_Delta_out = init_KNN_Delta(knn_dists_out, size(data, 2))
KNN_Delta!(KNN_Delta_out, knn_dists_out, data)
return(KNN_Delta_out[1])
end
"""
init_UNIV(T::Int, VAR::Int)
init_UNIV{tp}(data::AbstractArray{tp, 2})
initialize a `univ_out` object to be used in `UNIV!()` either with number of time steps/observations `T` and variables `VAR` or with a `data` matrix observations * variables.
"""
function init_UNIV(T::Int, VAR::Int)
var_dat = zeros(tp, T)
varquants = zeros(Float64,T, VAR)
univ_out = (var_dat, varquants)
return univ_out
end
function init_UNIV(data::AbstractArray{tp, N}) where {tp,N}
T = size(data, 1)
VAR = size(data, N)
var_dat = zeros(tp, T)
varquants = zeros(Float64,T, VAR)
univ_out = (var_dat, varquants)
return univ_out
end
"""
UNIV!(univ_out, data)
Memory efficient version of `UNIV()`, input an `univ_out` object from `init_UNIV()` and some `data` matrix observations * variables
"""
function UNIV!(univ_out::Tuple{Array{tp,1},Array{Float64,2}}, data::AbstractArray{tp, N}) where {tp, N}
(var_dat, varquants) = univ_out
@assert size(var_dat, 1) == size(varquants, 1) == size(data, 1)
@assert size(varquants, 2) == size(data, 2)
for variable = 1:size(data, 2)
# copy with little allocation
copyto!(var_dat, view(data, :, variable))
get_quantile_scores!(view(varquants, :, variable), var_dat)
end
maximum!(var_dat, varquants)
return var_dat
end
"""
UNIV(data)
order the values in each varaible and return their maximum, i.e. any of the variables in `data` (observations * variables) is above a given quantile,
the highest quantile will be returned.
"""
function UNIV(data::AbstractArray{tp, N}) where {tp, N}
univ_out = init_UNIV(data)
return UNIV!(univ_out, data)
end
"""
SVDD_train(K, nu)
train a one class support vecort machine model (i.e. support vector data description), given a kernel matrix K and and the highest possible percentage of outliers `nu`.
Returns the model object (`svdd_model`). Requires LIBSVM.
Tax, D. M. J., & Duin, R. P. W. (1999). Support vector domain description. Pattern Recognition Letters, 20, 1191โ1199.
Schรถlkopf, B., Williamson, R. C., & Bartlett, P. L. (2000). New Support Vector Algorithms. Neural Computation, 12, 1207โ1245.
"""
function SVDD_train(K::AbstractArray, nu::Float64)
# function in LIBSVM.jl for optional parameter settings.
#svdd_model = svmtrain(fill(1, size(K, 1)), K
# , kernel_type = Int32(4), svm_type = Int32(2), nu = nu
# , probability_estimates = false);
svdd_model = svmtrain(K, svmtype = OneClassSVM, nu = nu, kernel = Kernel.Precomputed);
return(svdd_model)
end
"""
SVDD_predict(svdd_model, K)
predict the outlierness of an object given the testing Kernel matrix `K` and the `svdd_model` from SVDD_train(). Requires LIBSVM.
Tax, D. M. J., & Duin, R. P. W. (1999). Support vector domain description. Pattern Recognition Letters, 20, 1191โ1199.
Schรถlkopf, B., Williamson, R. C., & Bartlett, P. L. (2000). New Support Vector Algorithms. Neural Computation, 12, 1207โ1245.
"""
function SVDD_predict(svdd_model::LIBSVM.SVM, K::AbstractArray)
(predicted_labels, decision_values) = svmpredict(svdd_model, K)
end
"""
KNFST_predict(model, K)
predict the outlierness of some data (represented by the kernel matrix `K`), given some KNFST `model` from `KNFST_train(K)`. Compute `K`with `kernel_matrix()`.
Paul Bodesheim and Alexander Freytag and Erik Rodner and Michael Kemmler and Joachim Denzler:
"Kernel Null Space Methods for Novelty Detection". Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2013.
"""
function KNFST_predict(KNFST_mod, K)
knfst_out = init_KNFST(size(K,2),KNFST_mod)
KNFST_predict!(knfst_out, KNFST_mod, K)
return(knfst_out)
end
"""
init_KNFST(T, KNFST_mod)
initialize a `KNFST_out`object for the use with `KNFST_predict!`, given `T`, the number of observations and the model output `KNFST_train(K)`.
"""
function init_KNFST(T::Int, KNFST_mod::Tuple{Array{Float64,2},Array{Float64,2}})
diffs = zeros(Float64, T, size(KNFST_mod[2], 2));
scores = zeros(Float64, T);
Ktransposed = zeros(Float64, T, size(KNFST_mod[1],1));
KNFST_out = (scores, diffs, Ktransposed)
return(KNFST_out)
end
"""
KNFST_predict!(KNFST_out, KNFST_mod, K)
predict the outlierness of some data (represented by the kernel matrix `K`), given a `KNFST_out` object (`init_KNFST()`), some KNFST model (`KNFST_mod = KNFST_train(K)`)
and the testing kernel matrix K.
Paul Bodesheim and Alexander Freytag and Erik Rodner and Michael Kemmler and Joachim Denzler:
"Kernel Null Space Methods for Novelty Detection". Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2013.
"""
function KNFST_predict!(scores, diffs, Ktransposed, proj, targetValue, K)
@assert size(scores, 1) == size(K, 2) == size(diffs, 1)
@assert size(Ktransposed, 1) == size(K, 2) && size(Ktransposed, 2) == size(K, 1)
transpose!(Ktransposed, K)
# projected test samples: Ktransposed * model["proj"]
mul!(diffs, Ktransposed, proj)
# differences to the target value:
broadcast!(-,diffs,diffs, targetValue)
broadcast!(*,diffs, diffs, diffs)
sum!(scores, diffs)
broadcast!(sqrt, scores, scores)
return(scores)
end
function KNFST_predict!(KNFST_out::Tuple{Array{Float64,1},Array{Float64,2},Array{Float64,2}}
, KNFST_mod::Tuple{Array{Float64,2},Array{Float64,2}}, K::Array{Float64,2})
(scores, diffs, Ktransposed) = KNFST_out
(proj, targetValue) = KNFST_mod
KNFST_predict!(scores, diffs, Ktransposed, proj, targetValue, K)
return(scores)
end
# Learning method for novelty detection with KNFST according to the work:
#
# Paul Bodesheim and Alexander Freytag and Erik Rodner and Michael Kemmler and Joachim Denzler:
# "Kernel Null Space Methods for Novelty Detection".
# Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2013.
#
# Please cite that paper if you are using this code!
#
# proj = calculateKNFST(K, labels)
#
# calculates projection matrix of KNFST
#
# INPUT:
# K -- (n x n) kernel matrix containing similarities of n training samples
# labels -- (n x 1) vector containing (multi-class) labels of the n training samples
#
# OUTPUT:
# proj -- projection matrix for data points (project x via kx*proj,
# where kx is row vector containing kernel values of x and
# training data)
#
#
function calculateKNFST(K, labels)
classes = unique(labels);
### check labels
if length(unique(labels)) == 1
error("calculateKNFST.jl: not able to calculate a nullspace from data of a single class using KNFST (input variable 'labels' only contains a single value)");
end
### check kernel matrix
(n,m) = size(K);
if n != m
error("calculateKNFST.jl: kernel matrix must be quadratic");
end
### calculate weights of orthonormal basis in kernel space
centeredK = copy(K); # because we need original K later on again
centerKernelMatrix(centeredK);
(basisvecsValues,basisvecs) = eigen(centeredK);
basisvecs = basisvecs[:,basisvecsValues .> 1e-12];
basisvecsValues = basisvecsValues[basisvecsValues .> 1e-12];
basisvecsValues = Diagonal(1 ./ sqrt.(basisvecsValues));
basisvecs = basisvecs*basisvecsValues;
### calculate transformation T of within class scatter Sw:
### T= B'*Sw*B = H*H' and H = B'*K*(I-L) and L a block matrix
L = zeros(n,n);
for i=1:length(classes)
L[labels .== classes[i], labels .== classes[i]] .= 1.0 / sum(labels .== classes[i]);
end
### need Matrix M with all entries 1/m to modify basisvecs which allows usage of
### uncentered kernel values: (eye(size(M))-M)*basisvecs
M = ones(m,m)./m;
### compute helper matrix H
H = ((I - M)*basisvecs)' * K * (I - L);
### T = H*H' = B'*Sw*B with B=basisvecs
T = H*H';
### calculate weights for null space
eigenvecs = nullspace(T);
if size(eigenvecs,2) < 1
(eigenvals,eigenvecs) = eigen(T);
(min_val,min_ID) = findmin(eigenvals);
eigenvecs = eigenvecs[:,min_ID];
end
### calculate null space projection and return it
proj = ((I - M) * basisvecs) * eigenvecs;
end
#############################################################################################################
function centerKernelMatrix(kernelMatrix)
# centering the data in the feature space only using the (uncentered) Kernel-Matrix
#
# INPUT:
# kernelMatrix -- uncentered kernel matrix
# OUTPUT:
# centeredKernelMatrix -- centered kernel matrix
### get size of kernelMatrix
n = size(kernelMatrix, 1);
### get mean values of each row/column
columnMeans = mean(kernelMatrix, dims = 2); ### NOTE: columnMeans = rowMeans because kernelMatrix is symmetric
matrixMean = mean(columnMeans);
centeredKernelMatrix = kernelMatrix;
for k=1:n
for j=1:n
centeredKernelMatrix[k,j] -= columnMeans[j];
centeredKernelMatrix[j,k] -= columnMeans[j];
end
end
#This line will not have any effect
centeredKernelMatrix .+= matrixMean;
end
"""
KNFST_train(K)
train a one class novelty KNFST model on a Kernel matrix `K` according to
Paul Bodesheim and Alexander Freytag and Erik Rodner and Michael Kemmler and Joachim Denzler:
"Kernel Null Space Methods for Novelty Detection". Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2013.
# Output
`(proj, targetValue)`
`proj` -- projection vector for data points (project x via kx*proj, where kx is row vector containing kernel values of x and training data)
`targetValue` -- value of all training samples in the null space
"""
function KNFST_train(K)
# get number of training samples
n = size(K,1);
# include dot products of training samples and the origin in feature space (these dot products are always zero!)
K_ext = [K zeros(n,1); zeros(1,n) 0];
# create one-class labels + a different label for the origin
labels = push!(ones(n),0);
# get model parameters
proj = calculateKNFST(K_ext,labels);
targetValue = mean(K_ext[labels.==1,:]*proj, dims = 1);
proj = proj[1:n,:];
# return both variables
return proj, targetValue
end
"""
Dist2Centers(centers::AbstractArray{tp, 2}) where {tp}
Compute the distance to the nearest centers of i.e. a K-means clustering output.
Large Distances to the nearest center are anomalies. data: Observations * Variables.
# Example
`(proj, targetValue)`
"""
function Dist2Centers(data::AbstractArray{tp,2}, centers::AbstractArray{tp,2}) where {tp}
(minD, minDIdx) = findmin(pairwise(Euclidean(), data', centers'), 2)
clustassgin = zeros(Int, size(data, 1))
for i = 1:size(data, 1)
(u, clustassgin[i]) = ind2sub((size(data, 1), size(centers, 1)), minDIdx[i])
end
return minD, clustassgin
end
###################################
# end
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 9927 | using Distances
"""
init_dist_matrix(data)
init_dist_matrix(data, training_data)
initialize a `D_out` object for `dist_matrix!()`.
"""
function init_dist_matrix(data::AbstractArray{tp, N}) where {tp, N}
T = size(data, 1)
dat = zeros(tp, T, size(data, N))
tdat = zeros(tp, size(data, N), T)
D = zeros(tp, T, T)
D_out = (D, dat, tdat)
return(D_out)
end
function init_dist_matrix(data::AbstractArray{tp, N}, training_data::AbstractArray{tp, N}) where {tp, N}
T = size(data, 1)
Ttrain = size(training_data, 1)
dat = zeros(tp, T, size(data, N))
traindat = zeros(tp, Ttrain, size(training_data, N))
tdat = zeros(tp, size(data, N), T)
ttraindat = zeros(tp, size(training_data, N), Ttrain)
D = zeros(tp, Ttrain, T)
D_out = (D, dat, tdat, traindat, ttraindat)
return(D_out)
end
"""
dist_matrix!(D_out, data, ...)
compute the distance matrix of `data`, similar to `dist_matrix()`. `D_out` object has to be preallocated, i.e. with `init_dist_matrix`.
# Examples
```
julia> dc = randn(10,4, 4,3)
julia> D_out = init_dist_matrix(dc)
julia> dist_matrix!(D_out, dc, lat = 2, lon = 2)
julia> D_out[1]
```
"""
function dist_matrix!(D_out::Tuple{Array{tp,2},Array{tp,2},Array{tp,2}}, data::AbstractArray{tp, N}; dist::String = "Euclidean", space::Int = 0, lat::Int = 0, lon::Int = 0, Q = 0, dims = 2) where {tp, N}
#@assert N == 2 || N == 3 || N = 4
(D, dat, tdat) = D_out
if N == 2 copyto!(dat, data) end
if N == 3 copyto!(dat, view(data, :, space, :)) end
if N == 4 copyto!(dat, view(data, :, lat, lon, :)) end
transpose!(tdat, dat)
if(dist == "Euclidean") pairwise!(D, Euclidean(), tdat, dims = dims)
elseif(dist == "SqEuclidean") pairwise!(D, SqEuclidean(), tdat, dims = dims)
elseif(dist == "Chebyshev") pairwise!(D, Chebyshev(), tdat, dims = dims)
elseif(dist == "Cityblock") pairwise!(D, Cityblock(), tdat, dims = dims)
elseif(dist == "JSDivergence") pairwise!(D, JSDivergence(), tdat, dims = dims)
elseif(dist == "Mahalanobis") pairwise!(D, Mahalanobis(Q), tdat, dims = dims)
elseif(dist == "SqMahalanobis") pairwise!(D, SqMahalanobis(Q), tdat, dims = dims)
else print("$dist is not a defined distance metric, has to be one of 'Euclidean', 'SqEuclidean', 'Chebyshev', 'Cityblock' or 'JSDivergence'")
end
return(D_out[1])
end
function dist_matrix!(D_out::Tuple{Array{tp,2},Array{tp,2},Array{tp,2},Array{tp,2},Array{tp,2}},
data::AbstractArray{tp, N}, training_data::AbstractArray{tp, N}; dist::String = "Euclidean", space::Int = 0, lat::Int = 0, lon::Int = 0, Q = 0, dims = 2) where {tp, N}
#@assert N == 2 || N == 3 || N = 4
(D, dat, tdat, traindat, ttraindat) = D_out
if N == 2 copyto!(dat, data) end
if N == 3 copyto!(dat, view(data, :, space, :)) end
if N == 4 copyto!(dat, view(data, :, lat, lon, :)) end
if N == 2 copyto!(traindat, training_data) end
if N == 3 copyto!(traindat, view(training_data, :, space, :)) end
if N == 4 copyto!(traindat, view(training_data, :, lat, lon, :)) end
transpose!(tdat, dat)
transpose!(ttraindat, traindat)
if(dist == "Euclidean") pairwise!(D, Euclidean(), ttraindat, tdat, dims = dims)
elseif(dist == "SqEuclidean") pairwise!(D, SqEuclidean(), ttraindat, tdat, dims = dims)
elseif(dist == "Chebyshev") pairwise!(D, Chebyshev(), ttraindat, tdat, dims = dims)
elseif(dist == "Cityblock") pairwise!(D, Cityblock(), ttraindat, tdat, dims = dims)
elseif(dist == "JSDivergence") pairwise!(D, JSDivergence(), ttraindat, tdat, dims = dims)
elseif(dist == "Mahalanobis") pairwise!(D, Mahalanobis(Q), ttraindat, tdat, dims = dims)
elseif(dist == "SqMahalanobis") pairwise!(D, SqMahalanobis(Q), ttraindat, tdat, dims = dims)
else print("$dist is not a defined distance metric, has to be one of 'Euclidean', 'SqEuclidean', 'Chebyshev', 'Cityblock' or 'JSDivergence'")
end
return(D_out[1])
end
"""
dist_matrix(data::AbstractArray{tp, N}; dist::String = "Euclidean", space::Int = 0, lat::Int = 0, lon::Int = 0, Q = 0) where {tp, N}
dist_matrix(data::AbstractArray{tp, N}, training_data; dist::String = "Euclidean", space::Int = 0, lat::Int = 0, lon::Int = 0, Q = 0) where {tp, N}
compute the distance matrix of `data` or the distance matrix between data and training data i.e. the pairwise distances along the first dimension of data, using the last dimension as variables.
`dist` is a distance metric, currently `Euclidean`(default), `SqEuclidean`, `Chebyshev`, `Cityblock`, `JSDivergence`, `Mahalanobis` and `SqMahalanobis` are supported.
The latter two need a covariance matrix `Q` as input argument.
# Examples
```
julia> dc = randn(10, 4,3)
julia> D = dist_matrix(dc, space = 2)
```
"""
function dist_matrix(data::AbstractArray{tp, N}; dist::String = "Euclidean", space::Int = 0, lat::Int = 0, lon::Int = 0, Q = 0, dims::Int = 2) where {tp, N}
D_out = init_dist_matrix(data)
dist_matrix!(D_out, data, dist = dist, space = space, lat = lat, lon = lon ,Q = Q, dims = dims)
return(D_out[1])
end
function dist_matrix(data::AbstractArray{tp, N}, training_data::AbstractArray{tp, N}; dist::String = "Euclidean", space::Int = 0, lat::Int = 0, lon::Int = 0, Q = 0, dims::Int = 2) where {tp, N}
D_out = init_dist_matrix(data, training_data)
dist_matrix!(D_out, data, training_data, dist = dist, space = space, lat = lat, lon = lon ,Q = Q, dims = dims)
return(D_out[1])
end
"""
knn_dists(D, k::Int, temp_excl::Int = 0)
returns the k-nearest neighbors of a distance matrix `D`. Excludes `temp_excl` (default: `temp_excl = 0`) distances
from the main diagonal of `D` to be also nearest neighbors.
# Examples
```
julia> dc = randn(20, 4,3)
julia> D = dist_matrix(dc, space = 2)
julia> knn_dists_out = knn_dists(D, 3, 1)
julia> knn_dists_out[5] # distances
julia> knn_dists_out[4] # indices
```
"""
function knn_dists(D::AbstractArray, k::Int, temp_excl::Int = 0)
T = size(D,1)
if ((k + temp_excl) > T-1) print("k has to be smaller size(D,1)") end
knn_dists_out = init_knn_dists(T, k)
knn_dists!(knn_dists_out, D, temp_excl)
return(knn_dists_out)
end
"""
init_knn_dists(T::Int, k::Int)
init_knn_dists(datacube::AbstractArray, k::Int)
initialize a preallocated `knn_dists_out` object. `k`is the number of nerarest neighbors, `T` the number of time steps (i.e. size of the first dimension) or a multidimensional `datacube`.
"""
function init_knn_dists(T::Int, k::Int)
ix = zeros(Int64, T)
v = zeros(Float64, T)
indices = zeros(Int64, T, k)
nndists = zeros(Float64, T, k)
knn_dists_out = (k, ix, v, indices, nndists)
return(knn_dists_out)
end
function init_knn_dists(datacube::AbstractArray, k::Int)
T = size(datacube, 1)
ix = zeros(Int64, T)
v = zeros(Float64, T)
indices = zeros(Int64, T, k)
nndists = zeros(Float64, T, k)
knn_dists_out = (k, ix, v, indices, nndists)
return(knn_dists_out)
end
"""
knn_dists!(knn_dists_out, D, temp_excl::Int = 0)
returns the k-nearest neighbors of a distance matrix `D`. Similar to `knn_dists()`, but uses preallocated input object `knn_dists_out`, initialized with `init_knn_dists()`.
Please note that the number of nearest neighbors `k` is not necessary, as it is already determined by the `knn_dists_out` object.
# Examples
```
julia> dc = randn(20, 4,3)
julia> D = dist_matrix(dc, space = 2)
julia> knn_dists_out = init_knn_dists(dc, 3)
julia> knn_dists!(knn_dists_out, D)
julia> knn_dists_out[5] # distances
julia> knn_dists_out[4] # indices
```
"""
function knn_dists!(knn_dists_out::Tuple{Int64,Array{Int64,1},Array{Float64,1},Array{Int64,2},Array{Float64,2}}, D::AbstractArray, temp_excl::Int = 0)
(k, ix, v, indices, nndists) = knn_dists_out
T = size(D,1)
if ((k + temp_excl) > T-1) print("k has to be smaller size(D,1)") end
maxD = maximum(D)
for i = 1:T
copyto!(v, view(D,:,i))
for excl = -temp_excl:temp_excl
if(i+excl > 0 && i+excl <= T)
v[i+excl]= maxD
end
end
sortperm!(ix, v)
for j = 1:k
indices[i,j] = ix[j]
nndists[i,j] = v[ix[j]]
end
end
return(knn_dists_out)
end
# compute kernel matrix from distance matrix
"""
kernel_matrix(D::AbstractArray, ฯ::Float64 = 1.0[, kernel::String = "gauss", dimension::Int64 = 1])
compute a kernel matrix out of distance matrix `D`, given `ฯ`. Optionally normalized by the `dimension`, if `kernel = "normalized_gauss"`.
compute `D` with `dist_matrix()`.
# Examples
```
julia> dc = randn(20, 4,3)
julia> D = dist_matrix(dc, space = 2)
julia> K = kernel_matrix(D, 2.0)
```
"""
function kernel_matrix(D::AbstractArray, ฯ::Float64 = 1.0, kernel::String = "gauss", dimension::Int64 = 1)
K = similar(D)
kernel_matrix!(K, D, ฯ, kernel, dimension)
return K
end
# compute kernel matrix from distance matrix
"""
kernel_matrix!(K, D::AbstractArray, ฯ::Float64 = 1.0[, kernel::String = "gauss", dimension::Int64 = 1])
compute a kernel matrix out of distance matrix `D`. Similar to `kernel_matrix()`, but with preallocated Array K (`K = similar(D)`) for output.
# Examples
```
julia> dc = randn(20, 4,3)
julia> D = dist_matrix(dc, space = 2)
julia> kernel_matrix!(D, D, 2.0) # overwrites distance matrix
```
"""
function kernel_matrix!(K::AbstractArray{T,N}, D::AbstractArray{T,N}, ฯ::Real = 1.0, kernel::String = "gauss", dimension::Int64 = 10) where {T,N}
#if(size(D, 1) != size(D, 2)) print("D is not a distance matrix with equal dimensions")
ฯ = convert(T, ฯ)
if(kernel == "normalized_gauss") # k integral gets one
for i in eachindex(K)
@inbounds K[i] = exp.(-0.5 * D[i]./(ฯ*ฯ))./(ฯ * (2 *pi).^(dimension/2))#exp.(-0.5 * D[i]./(ฯ*ฯ))./((2 *pi*ฯ*ฯ).^(dimension/2))
end
elseif (kernel == "gauss")
for i in eachindex(K)
@inbounds K[i] = exp.(-0.5 * D[i]./(ฯ*ฯ))
end
end
return(K)
end
###################################
#end
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 21461 | #import MultivariateStats
import Combinatorics
"""
sMSC(datacube, cycle_length)
subtract the median seasonal cycle from the datacube given the length of year `cycle_length`.
# Examples
```
julia> dc = hcat(rand(193) + 2* sin.(0:pi/24:8*pi), rand(193) + 2* sin.(0:pi/24:8*pi))
julia> sMSC_dc = sMSC(dc, 48)
```
"""
function sMSC(datacube, cycle_length)
x_out = similar(datacube)
removeMSC!(datacube, x_out, cycle_length, 1)
return(x_out)
end
function removeMSC!(xin::AbstractArray{Tp,ndim},xout::AbstractArray{Tp,ndim},NpY::Integer,itimedim::Integer;imscstart::Int=1) where {Tp,ndim}
# Determine length of reshape dimensions
s=size(xin)
l1=itimedim==1 ? 1 : prod(s[1:(itimedim-1)])
l2=itimedim==ndim ? 1 : prod(s[(itimedim+1):end])
ltime=s[itimedim]
#Reshape the cube to squeeze unimportant variables
xin2=reshape(xin,l1,ltime,l2);
xout2=reshape(xout,l1,ltime,l2);
msc=zeros(Float64,NpY); # This is the array where the temp msc is stored
nmsc=zeros(Int,NpY); # This is for counting how many values were added
#Start loop through all other variables
for i2=1:l2,i1=1:l1
imsc=imscstart
fill!(msc,zero(Float64))
fill!(nmsc,zero(Int))
for itime=1:ltime
curval=xin2[i1,itime,i2]
if !isnan(curval)
msc[imsc] += curval
nmsc[imsc] += 1
end
imsc =imsc==NpY ? 1 : imsc+1 # Increase msc time step counter
end
for i in 1:NpY msc[i] = nmsc[i] > 0 ? msc[i]/nmsc[i] : NaN end # Get MSC by dividing by number of points
imsc=imscstart
for i in 1:ltime
xout2[i1,i,i2] = xin2[i1,i,i2]-msc[imsc]
imsc =imsc==NpY ? 1 : imsc+1 # Increase msc time step counter
end
end
#Copy data if necessary
pointer(xout2) != pointer(xout) && copyto!(xout,xout2)
return(xout)
end
#small helper
function func(i,x)
return(x[i])
end
"""
TDE(datacube::Array{tp, 4}, ฮT::Integer, DIM::Int = 3) where {tp}
TDE(datacube::Array{tp, 3}, ฮT::Integer, DIM::Int = 3) where {tp}
returns an embedded datacube by concatenating lagged versions of the 2-, 3- or 4-dimensional datacube with `ฮT` time steps in the past up to dimension `DIM` (presetting: `DIM = 3`)
# Examples
```
julia> dc = randn(50,3)
julia> TDE(dc, 3, 2)
```
"""
function TDE(datacube::AbstractArray{tp, 4}, ฮT::Integer, DIM::Int = 3) where {tp}
start = ((DIM-1)*ฮT+1)
embedded_datacube = zeros(tp, (size(datacube, 1) - start +1, size(datacube, 2), size(datacube, 3), size(datacube, 4) * DIM))
for dim = 1:DIM
embedded_datacube[:,:,:,((dim-1)*size(datacube, 4)+1):(dim*size(datacube, 4))] =
datacube[(start - ((dim-1) * ฮT)) : (size(datacube, 1) - ((dim-1) * ฮT)),:,:,:]
end
return(embedded_datacube)
end
function TDE(datacube::AbstractArray{tp, 3}, ฮT::Integer, DIM::Int = 3) where {tp}
start = ((DIM-1)*ฮT+1)
embedded_datacube = zeros(tp, (size(datacube, 1) - start +1, size(datacube, 2), size(datacube, 3) * DIM))
for dim = 1:DIM
embedded_datacube[:,:,((dim-1)*size(datacube, 3)+1):(dim*size(datacube, 3))] =
datacube[(start - ((dim-1) * ฮT)) : (size(datacube, 1) - ((dim-1) * ฮT)),:,:]
end
return(embedded_datacube)
end
function TDE(datacube::AbstractArray{tp, 2}, ฮT::Integer, DIM::Int = 3) where {tp}
start = ((DIM-1)*ฮT+1)
embedded_datacube = zeros(tp, (size(datacube, 1) - start +1, size(datacube, 2) * DIM))
for dim = 1:DIM
embedded_datacube[:,((dim-1)*size(datacube, 2)+1):(dim*size(datacube, 2))] =
datacube[(start - ((dim-1) * ฮT)) : (size(datacube, 1) - ((dim-1) * ฮT)),:]
end
return(embedded_datacube)
end
function TDE(datacube::AbstractArray{tp, 1}, ฮT::Integer, DIM::Int = 3) where {tp}
start = ((DIM-1)*ฮT+1)
embedded_datacube = zeros(tp, (size(datacube, 1) - start +1, DIM))
for dim = 1:DIM
embedded_datacube[:,((dim-1)+1):(dim)] =
datacube[(start - ((dim-1) * ฮT)) : (size(datacube, 1) - ((dim-1) * ฮT))]
end
return(embedded_datacube)
end
"""
mw_VAR(datacube::Array{tp,N}, windowsize::Int = 10) where {tp,N}
compute the variance in a moving window along the first dimension of the datacube (presetting: `windowsize = 10`).
Accepts N dimensional datacubes.
# Examples
```
julia> dc = randn(50,3,3,3)
julia> mw_VAR(dc, 15)
```
"""
function mw_VAR(datacube::AbstractArray{<:AbstractFloat,N}, windowsize::Int = 10) where {N}
out = zeros(eltype(datacube), size(datacube))
for k = 1:length(out) out[k] = NaN end
datacube0mean = datacube .- mean(datacube, dims = 1)
mw_VAR!(out, datacube0mean, windowsize)
return(out)
end
"""
mw_VAR!(out::Array{tp, N}, datacube0mean::Array{tp,N}, windowsize::Int = 10) where {tp,N}
mutating version for `mw_VAR()`. The mean of the input data `datacube0mean` has to be 0. Initialize out properly: `out = datacube0mean` leads to wrong results.
"""
function mw_VAR!(out::AbstractArray{tp, N}, datacube0mean::AbstractArray{tp,N}, windowsize::Int = 10) where {tp, N}
T = size(datacube0mean, 1)
for beg = 1:T:length(datacube0mean)
inner_mw_VAR!(out, datacube0mean, windowsize, beg, T)
end
broadcast!(/, out, out, windowsize)
return(out)
end
"""
inner_mw_VAR!(out::Array{tp, N}, datacube0mean::Array{tp,N}, windowsize::Int = 10) where {tp,N}
internal function for mw_VAR!()
"""
function inner_mw_VAR!(out, datacube, windowsize, beg, T) # mean already subtracted
out_beg = floor(Int, windowsize * 0.5) + beg
for notavailable = (beg):(out_beg-1)
out[notavailable] = NaN
end
# init x
x = sum(unsafe_wrap(Array, pointer(datacube, beg), windowsize).^2)
out[out_beg] = x
for i = 1:(T-windowsize)
x = x - datacube[beg + i - 1]^2 + datacube[beg + i - 1 + windowsize]^2
out[out_beg + i] = x
end
for notavailable = (out_beg + (T-windowsize +1) + 1):(beg+T-1)
out[notavailable] = NaN
end
return(out)
end
"""
mw_AVG(datacube::AbstractArray{tp,N}, windowsize::Int = 10) where {tp,N}
compute the average in a moving window along the first dimension of the datacube (presetting: `windowsize = 10`).
Accepts N dimensional datacubes.
# Examples
```
julia> dc = randn(50,3,3,3)
julia> mw_AVG(dc, 15)
```
"""
function mw_AVG(datacube::AbstractArray{<:AbstractFloat,N}, windowsize::Int = 10) where {N}
out = zeros(eltype(datacube), size(datacube))
mw_AVG!(out, datacube, windowsize)
return(out)
end
"""
mw_AVG!(out::Array{tp, N}, datacube::Array{tp,N}, windowsize::Int = 10) where {tp,N}
internal and mutating version for `mw_AVG()`.
"""
function mw_AVG!(out::AbstractArray{tp,N}, datacube::AbstractArray{tp,N}, windowsize::Int = 10) where {tp,N}
for k = 1:length(out) out[k] = NaN end
T = size(datacube, 1)
for beg = 1:T:length(datacube)
inner_mw_AVG!(out, datacube, windowsize, beg, T)
end
broadcast!(/, out, out, windowsize)
return(out)
end
function inner_mw_AVG!(out::AbstractArray{tp, N}, datacube::AbstractArray{tp, N}, windowsize::Int, beg::Int, T::Int) where {tp,N}
out_beg = floor(Int, windowsize * 0.5) + beg
for notavailable = (beg):(out_beg-1)
out[notavailable] = NaN
end
# init x
x = sum(unsafe_wrap(Array, pointer(datacube, beg), windowsize))
out[out_beg] = x
for i = 1:(T - windowsize)
x = x - datacube[beg + i - 1] + datacube[beg + i - 1 + windowsize]
out[out_beg + i] = x
end
for notavailable = (out_beg + (T-windowsize +1) + 1):(beg+T-1)
out[notavailable] = NaN
end
return(out)
end
"""
mw_COR(datacube::Array{tp, 4}, windowsize::Int = 10) where {tp}
compute the correlation in a moving window along the first dimension of the datacube (presetting: `windowsize = 10`).
Accepts 4-dimensional datacubes.
"""
function mw_COR(datacube::AbstractArray{tp, 4}, windowsize::Int = 10) where {tp}
comb = collect(Combinatorics.combinations(1:size(datacube, 4), 2))
out = zeros(Float64, size(datacube, 1), size(datacube, 2), size(datacube, 3), size(comb, 1))
x = zeros(Float64, windowsize, 2)
for icomb = 1:size(comb, 1)
for lon = 1:size(datacube, 3)
for lat = 1:size(datacube, 2)
for t = 1:(size(datacube, 1)-windowsize)
x = copyto!(x, datacube[t:(t+windowsize-1),lat,lon,comb[icomb]])
copyto!(view(out, t + round.(Int, windowsize * 0.5),lat,lon,icomb)
, cor(view(x,:,1), view(x,:,2)))
end
end
end
end
return(comb, out)
end
function innerst_ewma!(z::AbstractArray{tp, N}, dat::AbstractArray{tp, N}, beg::Int, len::Int, lambda::Float64 = 0.15) where {tp, N}
z[beg] = dat[beg]*lambda
one_m_lambda = 1.0-lambda
for i = (beg+1):(beg+len-1)
z[i] = dat[i] * lambda + z[i-1] * one_m_lambda
end
return(z)
end
"""
EWMA(dat, ฮป)
Compute the exponential weighted moving average (EWMA) with the weighting parameter `ฮป` between 0 (full weighting) and 1 (no weighting) along the first dimension of `dat`.
Supports N-dimensional Arrays.
Lowry, C. A., & Woodall, W. H. (1992). A Multivariate Exponentially Weighted Moving Average Control Chart. Technometrics, 34, 46โ53.
# Examples
```
julia> dc = rand(100,3,2)
julia> ewma_dc = EWMA(dc, 0.1)
```
"""
function EWMA(dat::AbstractArray{tp, N}, lambda::Float64 = 0.15) where {tp, N}
Z = similar(dat)
EWMA!(Z, dat, lambda)
return(Z)
end
"""
EWMA!(Z, dat, ฮป)
use a preallocated output Z. `Z = similar(dat)` or `dat = dat` for overwriting itself.
# Examples
```
julia> dc = rand(100,3,2)
julia> EWMA!(dc, dc, 0.1)
```
"""
function EWMA!(Z::AbstractArray{tp, N}, dat::AbstractArray{tp, N}, lambda::Float64 = 0.15) where {tp, N}
T = size(dat, 1)
LENGTH = length(dat)
for istart = 1:T:LENGTH
innerst_ewma!(Z, dat, istart, T, lambda)
end
return(Z)
end
"""
init_MedianCycle(dat::Array{tp}, cycle_length::Int = 46)
init_MedianCycle(temporal_length::Int[, cycle_length::Int = 46])
initialises an init_MC object to be used as input for `get_MedianCycle!()`. Input is either some sample data or the temporal lenght of the expected input vector
and the length of the annual cycle (presetting: `cycle_length = 46`)
"""
function init_MedianCycle(dat::AbstractArray{tp}, cycle_length::Int = 46) where {tp}
complete_years = Int(floor(size(dat, 1)/cycle_length))
cycle_dat = zeros(tp, cycle_length, complete_years)
cycle_medians = zeros(tp, cycle_length)
init_MC = (cycle_length, complete_years, cycle_medians, cycle_dat)
return(init_MC)
end
function init_MedianCycle(temporal_length::Int, cycle_length::Int = 46)
complete_years = Int(floor(temporal_length/cycle_length))
cycle_dat = zeros(tp, cycle_length, complete_years)
cycle_medians = zeros(tp, cycle_length)
init_MC = (cycle_length, complete_years, cycle_medians, cycle_dat)
return(init_MC)
end
"""
get_MedianCycle!(init_MC, dat::Array{tp,1})
Memory efficient version of `get_MedianCycle()`, returning the median cycle in `init_MC[3]`. The `init_MC` object should be created with `init_MedianCycle`.
Can deal with some NaN values.
# Examples
```
julia> using MultivariateAnomalies
julia> dat = rand(193) + 2* sin(0:pi/24:8*pi)
julia> dat[100] = NaN
julia> init_MC = init_MedianCycle(dat, 48)
julia> get_MedianCycle!(init_MC, dat)
julia> init_MC[3]
```
"""
function get_MedianCycle!(init_MC::Tuple{Int64,Int64,Array{tp,1},Array{tp,2}}, dat::Array{tp,1}) where {tp}
copyto!(init_MC[4], view(dat, 1:(init_MC[2] * init_MC[1])))
for i = 1:init_MC[1]
if(all(.!isnan.(view(init_MC[4], i, :))))
copyto!(view(init_MC[3], i), median(view(init_MC[4], i, :)))
elseif(all(isnan.(view(init_MC[4], i, :))))
copyto!(view(init_MC[3], i), NaN)
else
copyto!(view(init_MC[3], i), median(view(init_MC[4], i, squeeze(!isnan.(view(init_MC[4], i,:)), 1))))
end
end
return(init_MC[3])
end
"""
get_MedianCycle(dat::Array{tp,1}, cycle_length::Int = 46)
returns the median annual cycle of a one dimensional data array, given the length of the annual cycle (presetting: `cycle_length = 46`).
Can deal with some NaN values.
# Examples
```
julia> using MultivariateAnomalies
julia> dat = randn(90) + x = sind.(0:8:719)
julia> cycles = get_MedianCycle(dat, 48)
```
"""
function get_MedianCycle(dat::AbstractArray{tp,1}, cycle_length::Int = 46) where {tp}
init_MC = init_MedianCycle(dat, cycle_length)
get_MedianCycle!(init_MC, dat)
return(init_MC[3])
end
"""
get_MedianCycles(datacube, cycle_length::Int = 46)
returns the median annual cycle of a datacube, given the length of the annual cycle (presetting: `cycle_length = 46`).
The datacube can be 2, 3, 4-dimensional, time is stored along the first dimension.
# Examples
```
julia> using MultivariateAnomalies
julia> dc = hcat(rand(193) + 2* sin(0:pi/24:8*pi), rand(193) + 2* sin(0:pi/24:8*pi))
julia> cycles = get_MedianCycles(dc, 48)
```
"""
function get_MedianCycles(datacube::AbstractArray{tp,4}, cycle_length::Int = 46) where {tp}
dat = zeros(Float64, size(datacube, 1));
init_MC = init_MedianCycle(dat, cycle_length);
med_cycles_out = zeros(Float64, cycle_length, size(datacube, 2), size(datacube, 3), size(datacube, 4));
# loop
for var = 1:size(datacube, 4)
for lon = 1:size(datacube, 3)
for lat = 1:size(datacube, 2)
copyto!(dat, view(datacube, :,lat,lon,var))
copyto!(view(med_cycles_out, :, lat, lon, var), get_MedianCycle!(init_MC, dat))
end
end
end
return(med_cycles_out)
end
function get_MedianCycles(datacube::AbstractArray{tp,3}, cycle_length::Int = 46) where {tp}
dat = zeros(tp, size(datacube, 1));
init_MC = init_MedianCycle(dat, cycle_length);
med_cycles_out = zeros(tp, cycle_length, size(datacube, 2), size(datacube, 3));
# loop
for var = 1:size(datacube, 3)
for lat = 1:size(datacube, 2)
copyto!(dat, view(datacube, :, lat,var))
copyto!(view(med_cycles_out, :, lat, var), get_MedianCycle!(init_MC, dat))
end
end
return(med_cycles_out)
end
function get_MedianCycles(datacube::AbstractArray{tp,2}, cycle_length::Int = 46) where {tp}
dat = zeros(tp, size(datacube, 1));
init_MC = init_MedianCycle(dat, cycle_length);
med_cycles_out = zeros(tp, cycle_length, size(datacube, 2));
# loop
for var = 1:size(datacube, 2)
copyto!(dat, view(datacube, :, var))
copyto!(view(med_cycles_out, :, var), get_MedianCycle!(init_MC, dat))
end
return(med_cycles_out)
end
# get moving window loop with MWobj
mutable struct MWobj
ObsPerYear::Int
windowsize::Int
edgecut::Int
startidx::Int
numoutvars::Int
minwindow::Int
maxwindow::Int
count::Array{Int, 1}
iterator_windowcenter::StepRange{Int,Int}
mw_indat
mw_outdat
mw_idx::Array{Int, 1}
xin
xout
end
function init_MovingWindow(xin::AbstractArray{tp, 2}; ObsPerYear::Int = 46, windowsize::Int = 11, edgecut::Int = 0, startidx::Int = 1, numoutvars::Int = 0) where {tp}
if 2*edgecut >= windowsize error("2 * edgecut has to be smaller windowsize, but is $edgecut, windowsize = $windowsize") end
if !isodd(windowsize) error("windowsize has to be odd, but is $windowsize") end
if round(Int, size(xin, 1) / ObsPerYear) * ObsPerYear != size(xin, 1) error("ObsPerYear multiplied by some integer is not matching size(xin, 1)") end
if numoutvars == 0 numoutvars = size(xin, 2) end
if numoutvars > 1
mwobj = MWobj(
ObsPerYear
,windowsize
,edgecut
,startidx
,numoutvars
,-floor(Int, windowsize / 2.0) # minwindow
,floor(Int, windowsize / 2.0) # maxwindow
, [0] # count
,startidx:(windowsize-2*edgecut):ObsPerYear #iterator_windowcenter
,zeros(eltype(xin), windowsize * floor(Int, size(xin, 1) / ObsPerYear), size(xin, 2)) #mw_indat
,zeros(eltype(xin), windowsize * floor(Int, size(xin, 1) / ObsPerYear), numoutvars) #mw_outdat
,zeros(Int, windowsize * floor(Int, size(xin, 1) / ObsPerYear)) # mw_idx
,xin
,zeros(eltype(xin), size(xin, 1), numoutvars) #xout
)
else
mwobj = MWobj(
ObsPerYear
,windowsize
,edgecut
,startidx
,numoutvars
,-floor(Int, windowsize / 2.0) # minwindow
,floor(Int, windowsize / 2.0) # maxwindow
, [0] # count
,startidx:(windowsize-2*edgecut):ObsPerYear #iterator_windowcenter
,zeros(eltype(xin), windowsize * floor(Int, size(xin, 1) / ObsPerYear), size(xin, 2)) #mw_indat
,zeros(eltype(xin), windowsize * floor(Int, size(xin, 1) / ObsPerYear)) #mw_outdat
,zeros(Int, windowsize * floor(Int, size(xin, 1) / ObsPerYear)) # mw_idx
,xin
,zeros(eltype(xin), size(xin, 1), numoutvars) #xout
)
end
return mwobj
end
function getMWData!(mwobj::MWobj, windowcenter::Int)
xin = mwobj.xin
mwdata = mwobj.mw_indat
mwidx = mwobj.mw_idx
ObsPerYear = mwobj.ObsPerYear
windowsize = mwobj.windowsize
minwindow = mwobj.minwindow
maxwindow = mwobj.maxwindow
count = mwobj.count
count[1] = 0
for i = windowcenter:ObsPerYear:(size(xin, 1)+windowsize)
for j = minwindow:maxwindow
tempidx = i+j
if tempidx > 0 && tempidx <= size(xin, 1)
count[1] += 1
mwidx[count[1]] = tempidx
for varidx = 1:size(xin, 2)
mwdata[count[1], varidx] = xin[tempidx, varidx]
end
end
end
end
return view(mwobj.mw_indat,1:mwobj.count[1],:)
end
function pushMWResultsBack!(mwobj::MWobj, windowcenter::Int)
mwdata = mwobj.mw_outdat
mwidx = mwobj.mw_idx
xout = mwobj.xout
ObsPerYear=mwobj.ObsPerYear
windowsize = mwobj.windowsize
edgecut = mwobj.edgecut
minwindow = mwobj.minwindow
maxwindow = mwobj.maxwindow
count = mwobj.count
# assumes Time * Vars
count[1] = 0
for i = windowcenter:ObsPerYear:(size(xout, 1)+windowsize)
for j = minwindow:maxwindow
tempidx = i+j
if tempidx > 0 && tempidx <= size(xout, 1)
count[1] += 1
if mwidx[count[1]] != tempidx error("not matching mwidx[count] = $(mwidx[count]), check windowsize") end
if j >= (minwindow + edgecut) && j <= (maxwindow - edgecut)
for varidx = 1:size(xout, 2)
xout[tempidx, varidx] = mwdata[count[1], varidx]
end
end
end
end
end
return view(mwobj.mw_outdat, 1:mwobj.count[1], :)
end
"""
mapMovingWindow(function2mw, x, args...; ObsPerYear::Int = 46, windowsize::Int = 9, edgecut::Int = 0, startidx::Int = 1, numoutvars::Int = 0)
mapMovingWindow(function2mw, x; ...)
apply a function (function2mw) in a moving window encompassing all years and running along the time, e.g. apply the function to all summers, then summers + 1 timestep ...
results are written to the center of the respective windowsize. Input axes are time or time-variables. The number of output variables can be different from the number of input variables, specified in numoutvars.
e.g. transforming the variables in normalised ranks between zero and one to get rid of heteroscedasticity would look like:
using MultivariateAnomalies
x = randn(10*46, 3)
mapMovingWindow(get_quantile_scores, x, numoutvars = size(x, 2))
"""
function mapMovingWindow(function2mw, x, args...; ObsPerYear::Int = 46, windowsize::Int = 9, edgecut::Int = 0, startidx::Int = 1, numoutvars::Int = 0)
mwobj = init_MovingWindow(x, ObsPerYear = ObsPerYear, windowsize = windowsize, edgecut = edgecut, numoutvars = numoutvars, startidx = startidx)
if numoutvars > 1
for windowcenter = mwobj.iterator_windowcenter
getMWData!(mwobj, windowcenter)
# do something with mwobj.mw_indat and write the results to mwobj.mw_outdat
xout = view(mwobj.mw_outdat, 1:mwobj.count[1], :)
xin = view(mwobj.mw_indat, 1:mwobj.count[1], :)
xout[:] = function2mw(xin, args...)
pushMWResultsBack!(mwobj, windowcenter)
end
else
for windowcenter = mwobj.iterator_windowcenter
getMWData!(mwobj, windowcenter)
# do something with mwobj.mw_indat and write the results to mwobj.mw_outdat
xout = view(mwobj.mw_outdat, 1:mwobj.count[1])
xin = view(mwobj.mw_indat, 1:mwobj.count[1], :)
xout[:] = function2mw(xin, args...)
pushMWResultsBack!(mwobj, windowcenter)
end
end
return mwobj.xout
end
function mapMovingWindow(function2mw, x; ObsPerYear::Int = 46, windowsize::Int = 9, edgecut::Int = 0, startidx::Int = 1, numoutvars::Int = 0)
mwobj = init_MovingWindow(x, ObsPerYear = ObsPerYear, windowsize = windowsize, edgecut = edgecut, numoutvars = numoutvars, startidx = startidx)
if numoutvars > 1
for windowcenter = mwobj.iterator_windowcenter
getMWData!(mwobj, windowcenter)
# do something with mwobj.mw_indat and write the results to mwobj.mw_outdat
xout = view(mwobj.mw_outdat, 1:mwobj.count[1], :)
xin = view(mwobj.mw_indat, 1:mwobj.count[1], :)
xout[:] = function2mw(xin)
pushMWResultsBack!(mwobj, windowcenter)
end
else
for windowcenter = mwobj.iterator_windowcenter
getMWData!(mwobj, windowcenter)
# do something with mwobj.mw_indat and write the results to mwobj.mw_outdat
xout = view(mwobj.mw_outdat, 1:mwobj.count[1])
xin = view(mwobj.mw_indat, 1:mwobj.count[1], :)
xout[:] = function2mw(xin)
pushMWResultsBack!(mwobj, windowcenter)
end
end
return mwobj.xout
end
###################################
#end
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 15479 | mutable struct PARAMS
algorithms::Array{String,1}
training_data::Array{Float64,2}
dist::String
K_sigma::Float64
KNN_k::Int64
REC_varepsilon::Float64
Q::Array{Float64,2} #covariance matrix
mv::Array{Float64,2} # mean vector
SVDD_nu::Float64
SVDD_model
KNFST_model
# initialisation
D::Tuple{Array{Float64,2},Array{Float64,2},Array{Float64,2}}
K::Array{Float64,2}
# algorithms
KDE::Array{Float64,1}
REC::Array{Float64,1}
KNN #::Tuple{Int64,Array{Int64,1},Array{Float64,1},Array{Int64,2},Array{Float64,2}}
KNN_Gamma::Array{Float64,1}
KNN_Delta# ::Tuple{Array{Float64,1},Array{Float64,2},Array{Float64,2}}
T2 #::Tuple{Array{Float64,1},Array{Float64,2},Array{Float64,2},Array{Float64,2},Array{Float64,2}}
D_train::Tuple{Array{Float64,2},Array{Float64,2},Array{Float64,2}}
D_test::Tuple{Array{Float64,2},Array{Float64,2},Array{Float64,2},Array{Float64,2},Array{Float64,2}}
KNFST
SVDD # ::Tuple{Array{Any,1},Array{Float64,2},Array{LIBSVM.SVMNode,2},Array{Ptr{LIBSVM.SVMNode},1}}
REC_quantiles::Array{Float64,1}
KDE_quantiles::Array{Float64,1}
KNN_Gamma_quantiles::Array{Float64,1}
KNN_Delta_quantiles::Array{Float64,1}
T2_quantiles::Array{Float64,1}
KNFST_quantiles::Array{Float64,1}
SVDD_quantiles::Array{Float64,1}
data::Array{Float64,2}
temp_excl::Int64
ensemble_method::String
ensemble
quantiles::Bool
end
function reshape_dims_except_N(datacube::AbstractArray{tp,N}) where {tp, N}
dims = 1
for i = 1:(N-1) # multiply dimensions except the last one and save as dims
dims = size(datacube, i) * dims
end
X = reshape(datacube, dims, size(datacube, N))
return(X)
end
function ispartof(needle::Array{String,1}, haystack::Array{String,1})
partof = fill(false, size(haystack, 1))
for i = 1:size(needle, 1)
partof[needle[i] .== haystack] .= true
end
return(partof)
end
"""
getParameters(algorithms::Array{String,1} = ["REC", "KDE"], training_data::AbstractArray{tp, 2} = [NaN NaN])
return an object of type PARAMS, given the `algorithms` and some `training_data` as a matrix.
# Arguments
- `algorithms`: Subset of `["REC", "KDE", "KNN_Gamma", "KNN_Delta", "SVDD", "KNFST", "T2"]`
- `training_data`: data for training the algorithms / for getting the Parameters.
- `dist::String = "Euclidean"`
- `sigma_quantile::Float64 = 0.5` (median): quantile of the distance matrix, used to compute the weighting parameter for the kernel matrix (`algorithms = ["SVDD", "KNFST", "KDE"]`)
- `varepsilon_quantile` = `sigma_quantile` by default: quantile of the distance matrix to compute the radius of the hyperball in which the number of reccurences is counted (`algorihtms = ["REC"]`)
- `k_perc::Float64 = 0.05`: percentage of the first dimension of `training_data` to estimmate the number of nearest neighbors (`algorithms = ["KNN-Gamma", "KNN_Delta"]`)
- `nu::Float64 = 0.2`: use the maximal percentage of outliers for `algorithms = ["SVDD"]`
- `temp_excl::Int64 = 0`. Exclude temporal adjacent points from beeing count as recurrences of k-nearest neighbors `algorithms = ["REC", "KNN-Gamma", "KNN_Delta"]`
- `ensemble_method = "None"`: compute an ensemble of the used algorithms. Possible choices (given in `compute_ensemble()`) are "mean", "median", "max" and "min".
- `quantiles = false`: convert the output scores of the algorithms into quantiles.
# Examples
```
julia> using MultivariateAnomalies
julia> training_data = randn(100, 2); testing_data = randn(100, 2);
julia> P = getParameters(["REC", "KDE", "SVDD"], training_data, quantiles = false);
julia> detectAnomalies(testing_data, P)
```
"""
function getParameters(algorithms::Array{String,1} = ["REC", "KDE"], training_data::AbstractArray{tp, N} = [NaN NaN]; dist::String = "Euclidean", sigma_quantile::Float64 = 0.5, varepsilon_quantile::Float64 = NaN, k_perc::Float64 = 0.05, nu::Float64 = 0.2, temp_excl::Int64 = 0, ensemble_method = "None", quantiles = false) where {tp, N}
allalgorithms = ["KDE", "REC", "KNN_Delta", "KNN_Gamma", "T2", "SVDD", "KNFST"]
@assert any(ispartof(algorithms, allalgorithms))
if(length(algorithms) != sum(ispartof(algorithms, allalgorithms)))
error("one or more of algorithms $algorithms are not within the defined possibilites $allalgorithms")
end
T = size(training_data, 1)
if(length(size(training_data)) > 2) training_data = reshape_dims_except_N(training_data) end
#Parameters = PARAMS(algorithms, training_data, dist, NaN, 0, NaN, [NaN NaN], [NaN NaN], NaN, NaN, NaN)
P = PARAMS(algorithms, training_data, "Euclidean"
, NaN, 0, NaN, [NaN NaN], [NaN NaN], nu, NaN, NaN
, ([NaN NaN], [NaN NaN], [NaN NaN]) #D
, [NaN NaN] # K
, [NaN] # KDE
, [NaN] # REC
, NaN #KNN
, [NaN] # KNN_Gamma
, NaN # KNN_Delta
, NaN # T2
, ([NaN NaN], [NaN NaN], [NaN NaN]) # D_train
, ([NaN NaN], [NaN NaN], [NaN NaN], [NaN NaN], [NaN NaN]) # D test
, NaN # KNFST
, NaN # SVDD
, [NaN], [NaN], [NaN], [NaN], [NaN], [NaN], [NaN] # quantiles
, [NaN NaN]# data
, temp_excl, ensemble_method, NaN, quantiles
)
if(any(dist .== ["Mahalanobis", "SqMahalanobis"]) || any(algorithms .== "T2"))
P.Q = cov(training_data)
P.mv = mean(training_data, dims = 1)
end
if(any(algorithms .== "REC") || any(algorithms .== "KDE") || any(algorithms .== "SVDD") || any(algorithms .== "KNFST"))
D_train = dist_matrix(training_data, dist = P.dist, Q = P.Q)
if(isnan(varepsilon_quantile)) varepsilon_quantile = sigma_quantile end
(P.K_sigma, P.REC_varepsilon) = quantile(unsafe_wrap(Array,pointer(D_train), length(D_train)), [sigma_quantile, varepsilon_quantile])
end
if(any(algorithms .== "KNN_Gamma") || any(algorithms .== "KNN_Delta"))
P.KNN_k = Int(ceil(k_perc * T))
end
if(any(algorithms .== "SVDD") || any(algorithms .== "KNFST"))
kernel_matrix!(D_train, D_train, P.K_sigma)
if(any(algorithms .== "KNFST")) P.KNFST_model = KNFST_train(D_train) end
if(any(algorithms .== "SVDD")) P.SVDD_model = SVDD_train(D_train, nu) end
end
return(P)
end
"""
init_detectAnomalies{tp, N}(data::AbstractArray{tp, N}, P::PARAMS)
initialize empty arrays in `P` for detecting the anomalies.
"""
function init_detectAnomalies(data::AbstractArray{tp, N}, P::PARAMS) where {tp, N}
# initialisation
T = size(data, 1)
VARs = size(data, N)
P.data = zeros(tp, T, VARs)
P.D = init_dist_matrix(data)
P.K = similar(P.D[1])
if(any(P.algorithms .== "KDE")) P.KDE = init_KDE(T) end
if(any(P.algorithms .== "REC")) P.REC = init_REC(T) end
if(any(P.algorithms .== "KNN_Gamma") || any(P.algorithms .== "KNN_Delta"))
P.KNN = init_knn_dists(data, P.KNN_k)
if(any(P.algorithms .== "KNN_Gamma")) P.KNN_Gamma = init_KNN_Gamma(T) end
if(any(P.algorithms .== "KNN_Delta")) P.KNN_Delta = init_KNN_Delta(T, VARs, P.KNN_k) end
end
if(any(P.algorithms .== "T2")) P.T2 = init_T2(data) end
if(any(P.algorithms .== "SVDD") || any(P.algorithms .== "KNFST"))
P.D_train = init_dist_matrix(P.training_data)
P.D_test = init_dist_matrix(data, P.training_data)
if(any(P.algorithms .== "KNFST")) P.KNFST = init_KNFST(T, P.KNFST_model) end
#if(any(P.algorithms .== "SVDD")) P.SVDD = init_SVDD_predict(T, size(P.training_data, 1)) end
end
return(P)
end
"""
detectAnomalies!{tp, N}(data::AbstractArray{tp, N}, P::PARAMS)
mutating version of `detectAnomalies()`. Directly writes the output into `P`.
"""
function detectAnomalies!(data::AbstractArray{tp, 2}, P::PARAMS) where {tp}
copyto!(P.data, data)
allalgorithms = ["KDE", "REC", "KNN_Delta", "KNN_Gamma", "T2", "SVDD", "KNFST"]
@assert any(ispartof(P.algorithms, allalgorithms))
if(any(ispartof(P.algorithms, ["KDE", "REC", "KNN_Delta", "KNN_Gamma"]))) # D based
dist_matrix!(P.D, P.data, dist = P.dist, Q = P.Q)
if(any(ispartof(P.algorithms, ["KDE"])))
kernel_matrix!(P.K, P.D[1], P.K_sigma)
KDE!(P.KDE, P.K)
end
if(any(ispartof(P.algorithms, ["REC"])))
REC!(P.REC, P.D[1], P.REC_varepsilon, P.temp_excl)
end
if(any(ispartof(P.algorithms, ["KNN_Gamma", "KNN_Delta"])))
knn_dists!(P.KNN, P.D[1], P.temp_excl)
if(any(ispartof(P.algorithms, ["KNN_Gamma"]))) KNN_Gamma!(P.KNN_Gamma, P.KNN) end
if(any(ispartof(P.algorithms, ["KNN_Delta"]))) KNN_Delta!(P.KNN_Delta, P.KNN, P.data) end
end
end
if(any(ispartof(P.algorithms, ["T2"])))
T2!(P.T2, P.data, P.Q, P.mv)
end
if(any(ispartof(P.algorithms, ["SVDD", "KNFST"])))
dist_matrix!(P.D_train, P.training_data, dist = P.dist, Q = P.Q)
dist_matrix!(P.D_test, P.data, P.training_data, dist = P.dist, Q = P.Q)
kernel_matrix!(P.D_train[1],P.D_train[1], P.K_sigma) # transform distance to kernel matrix
kernel_matrix!(P.D_test[1], P.D_test[1], P.K_sigma) # transform distance to kernel matrix
if(any(ispartof(P.algorithms, ["KNFST"]))) KNFST_predict!(P.KNFST, P.KNFST_model, P.D_test[1]) end
if(any(ispartof(P.algorithms, ["SVDD"])))
(predicted_labels, decision_values) = SVDD_predict(P.SVDD_model, P.D_test[1])
P.SVDD = broadcast(*, decision_values, -1)
end
end
if(P.quantiles)
if(any(ispartof(P.algorithms, ["T2"]))) P.T2_quantiles = get_quantile_scores(P.T2[1]) end
if(any(ispartof(P.algorithms, ["REC"]))) P.REC_quantiles = get_quantile_scores(P.REC) end
if(any(ispartof(P.algorithms, ["KDE"]))) P.KDE_quantiles = get_quantile_scores(P.KDE) end
if(any(ispartof(P.algorithms, ["SVDD"]))) P.SVDD_quantiles = get_quantile_scores(P.SVDD[1,:]) end
if(any(ispartof(P.algorithms, ["KNFST"]))) P.KNFST_quantiles = get_quantile_scores(P.KNFST[1]) end
if(any(ispartof(P.algorithms, ["KNN_Gamma"]))) P.KNN_Gamma_quantiles = get_quantile_scores(P.KNN_Gamma) end
if(any(ispartof(P.algorithms, ["KNN_Delta"]))) P.KNN_Delta_quantiles = get_quantile_scores(P.KNN_Delta[1]) end
end
if(P.ensemble_method != "None")
L = length(P.algorithms)
if(L > 4 || L < 2) print("compute_ensemble() does currently only support 2-4 detection algorihms. You selected $L. \n") end
if(!P.quantiles) print("Warning: P.quantiles should be true for computing ensembles out of comparable scores, but is false")
@assert !any(ispartof(P.algorithms, ["KNN_Delta", "SVDD", "KNFST", "T2"]))
if(L == 2) P.ensemble = compute_ensemble(getfield(P, Meta.parse(P.algorithms[1])), getfield(P, Meta.parse(P.algorithms[2])), ensemble = P.ensemble_method) end
if(L == 3) P.ensemble = compute_ensemble(getfield(P, Meta.parse(P.algorithms[1])), getfield(P, Meta.parse(P.algorithms[2])), getfield(P, Meta.parse(P.algorithms[3])), ensemble = P.ensemble_method) end
if(L == 4) P.ensemble = compute_ensemble(getfield(P, Meta.parse(P.algorithms[1])), getfield(P, Meta.parse(P.algorithms[2])), getfield(P, Meta.parse(P.algorithms[3])), getfield(P, Meta.parse(P.algorithms[4])), ensemble = P.ensemble_method) end
else
if(L == 2) P.ensemble = compute_ensemble(getfield(P, Meta.parse(string(P.algorithms[1], "_quantiles"))), getfield(P, Meta.parse(string(P.algorithms[2], "_quantiles"))), ensemble = P.ensemble_method) end
if(L == 3) P.ensemble = compute_ensemble(getfield(P, Meta.parse(string(P.algorithms[1], "_quantiles"))), getfield(P, Meta.parse(string(P.algorithms[2], "_quantiles"))), getfield(P, Meta.parse(string(P.algorithms[3], "_quantiles"))), ensemble = P.ensemble_method) end
if(L == 4) P.ensemble = compute_ensemble(getfield(P, Meta.parse(string(P.algorithms[1], "_quantiles"))), getfield(P, Meta.parse(string(P.algorithms[2], "_quantiles"))), getfield(P, Meta.parse(string(P.algorithms[3], "_quantiles"))), getfield(P, Meta.parse(string(P.algorithms[4], "_quantiles"))), ensemble = P.ensemble_method) end
end
end
return(P)
end
"""
detectAnomalies(data::AbstractArray{tp, N}, P::PARAMS) where {tp, N}
detectAnomalies(data::AbstractArray{tp, N}, algorithms::Array{String,1} = ["REC", "KDE"]; mean = 0) where {tp, N}
detect anomalies, given some Parameter object `P` of type PARAMS. Train the Parameters `P` with `getParameters()` beforehand on some training data. See `getParameters()`.
Without training `P` beforehand, it is also possible to use `detectAnomalies(data, algorithms)` given some algorithms (except SVDD, KNFST).
Some default parameters are used in this case to initialize `P` internally.
# Examples
```
julia> training_data = randn(100, 2); testing_data = randn(100, 2);
julia> # compute the anoamly scores of the algorithms "REC", "KDE", "T2" and "KNN_Gamma", their quantiles and return their ensemble scores
julia> P = getParameters(["REC", "KDE", "T2", "KNN_Gamma"], training_data, quantiles = true, ensemble_method = "mean");
julia> detectAnomalies(testing_data, P)
```
"""
function detectAnomalies(data::AbstractArray{tp, 2}, P::PARAMS) where {tp}
init_detectAnomalies(data, P)
detectAnomalies!(data, P)
return(return_detectAnomalies(P))
end
function return_scores(i, P::PARAMS)
if(isa(getfield(P, Meta.parse(P.algorithms[i])), Tuple))
return(getfield(P, Meta.parse(P.algorithms[i]))[1])
else
return(getfield(P, Meta.parse(P.algorithms[i])))
end
end
function return_quantile_scores(i, P::PARAMS)
return(getfield(P, Meta.parse(string(P.algorithms[i], "_quantiles"))))
end
function return_detectAnomalies(P::PARAMS)
L = length(P.algorithms)
if(any(ispartof([P.ensemble_method], ["mean","max","min", "median"]))) return(P.ensemble)
elseif(L == 1 && !P.quantiles) return(return_scores(1,P))
elseif(!P.quantiles && L > 1) return(ntuple(i->return_scores(i,P), L))
elseif(L > 1 && P.quantiles) return(ntuple(i->return_quantile_scores(i,P), L))
elseif(L == 1 && P.quantiles) return(return_quantile_scores(1,P))
end
end
function detectAnomalies(data::AbstractArray{tp, N}, algorithms::Array{String,1} = ["REC", "KDE"]; mean = 0, dist = "Euclidean") where {tp, N}
@assert !any(ispartof(algorithms, ["SVDD", "KNFST"]))
Q = cov(reshape_dims_except_N(data))
if(mean == 0) meanvec = zeros(Float64, 1, size(data, N))
else meanvec = mean(data, dims = 1) end
D = dist_matrix(data; dist = dist)
sigma = median(unsafe_wrap(Array, pointer(D), length(D)))
P = PARAMS(algorithms, [NaN NaN], dist, sigma # sigma
, Int(ceil(0.05 * size(data, 1))) # k
, sigma # varepsilon
, Q
, meanvec, 0.2, NaN, NaN
, ([NaN NaN], [NaN NaN], [NaN NaN]) #D
, [NaN NaN] # K
, [NaN] # KDE
, [NaN] # REC
, NaN #KNN
, [NaN] # KNN_Gamma
, NaN # KNN_Delta
, NaN # T2
, ([NaN NaN], [NaN NaN], [NaN NaN]) # D_train
, ([NaN NaN], [NaN NaN], [NaN NaN], [NaN NaN], [NaN NaN]) # D test
, NaN # KNFST
, NaN # SVDD
, [NaN], [NaN], [NaN], [NaN], [NaN], [NaN], [NaN] # quantiles
, data # data
, 0, "None", NaN, false
)
init_detectAnomalies(data, P)
detectAnomalies!(data, P)
return(return_detectAnomalies(P))
end
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 1804 | __precompile__(true)
module MultivariateAnomalies
#######################
#import MultivariateStats
import Combinatorics
using LinearAlgebra
using Distances
using LIBSVM
using Base.Cartesian
using Statistics
using StatsBase
# DistDensity
export
dist_matrix,
dist_matrix!,
init_dist_matrix,
knn_dists,
init_knn_dists,
knn_dists!,
kernel_matrix,
kernel_matrix!,
# Detection Algorithms
REC,
init_REC,
REC!,
KDE,
init_KDE,
KDE!,
init_T2,
T2,
T2!,
KNN_Gamma,
init_KNN_Gamma,
KNN_Gamma!,
KNN_Delta,
KNN_Delta!,
init_KNN_Delta,
UNIV,
UNIV!,
init_UNIV,
SVDD_train,
SVDD_predict,
KNFST_train,
KNFST_predict,
KNFST_predict!,
init_KNFST,
Dist2Centers,
# FeatureExtraction
sMSC,
TDE,
mw_VAR,
mw_VAR!,
mw_AVG,
mw_AVG!,
mw_COR,
EWMA,
EWMA!,
get_MedianCycles,
get_MedianCycle,
get_MedianCycle!,
init_MedianCycle,
mapMovingWindow,
# AUC
auc,
# Scores
get_quantile_scores,
get_quantile_scores!,
compute_ensemble,
# high level functions
getParameters,
detectAnomalies,
init_detectAnomalies,
detectAnomalies!,
# online algorithms
Euclidean_distance!,
Mahalanobis_distance!,
SigmaOnline!,
KDEonline!,
KNNonline!,
REConline!
# Distance and Density Estimation
include("DistDensity.jl")
# Multivariate Anomaly Detection Algorithms
include("DetectionAlgorithms.jl")
# AUC computations
include("AUC.jl")
# Feature Extraction techniques
include("FeatureExtraction.jl")
# post processing for Anomaly Scores
include("Scores.jl")
# high level functions
include("HighLevelFunctions.jl")
# online algorithms
include("OnlineAlgorithms.jl")
#######################
end
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 9392 | # Online Outlier Detection
"""
Euclidean_distance!{tp}(d::Array{tp, 1}, x::AbstractArray{tp, 2}, i::Int, j::Int, dim::Int = 1)
compute the Euclidean distance between x[i,:] and x[j,:] and write the result to d. Memory efficient. dim is the dimension of i and j.
"""
function Euclidean_distance!(d::Array{tp, 1}, x::AbstractArray{tp, 2}, i::Int, j::Int, dim::Int = 1) where {tp}
d[1] = 0.0
if dim == 2
for v = 1:size(x, 1)
d[1] = d[1] + (x[v, i] - x[v, j]) * (x[v, i] - x[v, j])
end
else
for v = 1:size(x, 2)
d[1] = d[1] + (x[i, v] - x[j, v]) * (x[i, v] - x[j, v])
end
end
d[1] = sqrt(d[1])
return d
end
"""
Mahalanobis_distance!{tp}(d::Array{tp, 1}, x::AbstractArray{tp, 2}, Q::AbstractArray{tp, 2}, i::Int, j::Int, dim::Int = 1)
compute the Euclidean distance between x[i,:] and x[j,:] and write the result to d. Memory efficient. dim is the dimension of i and j.
"""
function Mahalanobis_distance!(d::Array{tp, 1}, x::AbstractArray{tp, 2}, Q::AbstractArray{tp, 2},i::Int, j::Int, dim::Int = 1) where {tp}
d[1] = 0.0
if dim == 2
for v2 = 1:size(x, 1)
for v = 1:size(x, 1)
d[1] = d[1] + (x[v, i] - x[v, j]) * Q[v, v2] * (x[v2, i] - x[v2, j])
end
end
else
for v = 1:size(x, 2)
for v2 = 1:size(x, 2)
d[1] = d[1] + (x[i, v] - x[j, v]) * Q[v, v2] * (x[i, v2] - x[j, v2])
end
end
end
d[1] = sqrt(d[1])
return d
end
"""
KDEonline!{tp}(kdescores::AbstractArray{tp, 1}, x::AbstractArray{tp, 2} [, Q::AbstractArray{tp, 2}], ฯ::tp, dim::Int = 1)
compute (1.0 - Kernel Density Estimates) from x and write it to kdescores with dim being the dimension of the observations.
If teh covariance matrix Q is given, the Mahalanobis distance is used instead of the Euclidean distance.
"""
function KDEonline!(k::AbstractArray{tp, 1}, d::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, i::Int, ฯ::tp, dim::Int = 1) where {tp}
k[1] = 0.0
for j = 1:size(x, dim)
Euclidean_distance!(d, x, i, j, dim)
innerKDEonline!(k, d, ฯ)
end
finalizeKDEonline!(k, x, dim)
return k
end
function KDEonline!(k::AbstractArray{tp, 1}, d::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, Q::AbstractArray{tp, 2}, i::Int, ฯ::tp, dim::Int = 1) where {tp}
k[1] = 0.0
for j = 1:size(x, dim)
Mahalanobis_distance!(d, x, Q, i, j, dim)
innerKDEonline!(k, d, ฯ)
end
finalizeKDEonline!(k, x, dim)
return k
end
function innerKDEonline!(k::AbstractArray{tp, 1}, d::AbstractArray{tp, 1}, ฯ::tp) where {tp}
k[1] = k[1] + exp(-0.5 * d[1] / ฯ^2)
return k
end
function finalizeKDEonline!(k::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, dim::Int) where {tp}
k[1] = 1.0 - (k[1] / size(x, dim))
return k
end
function KDEonline!(kdescores::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, ฯ::tp, dim::Int = 1) where {tp}
d = zeros(tp, 1)
k = zeros(tp, 1)
for i = 1:size(x, dim)
KDEonline!(k, d, x, i, ฯ, dim)
kdescores[i] = k[1]
end
return kdescores
end
function KDEonline!(kdescores::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, Q::AbstractArray{tp, 2}, ฯ::tp, dim::Int = 1) where {tp}
d = zeros(tp, 1)
k = zeros(tp, 1)
for i = 1:size(x, dim)
KDEonline!(k, d, x, Q, i, ฯ, dim)
kdescores[i] = k[1]
end
return kdescores
end
"""
REConline!{tp}(recscores::AbstractArray{tp, 1}, x::AbstractArray{tp, 2} [, Q::AbstractArray{tp, 2}], ษ::tp, dim::Int = 1)
compute recurrence scores from x and write it to recscores with dim being the dimension of the observations.
If the covariance matrix Q is given, the mahalanobis distance is used instead of the euclidean distance.
"""
function REConline!(r::AbstractArray{tp, 1}, d::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, i::Int, ษ::tp, dim::Int = 1) where {tp}
r[1] = 0.0
for j = 1:size(x, dim)
Euclidean_distance!(d, x, i, j, dim)
innerREConline!(r, d, ษ)
end
finalizeREConline!(r, x, dim)
return r
end
function REConline!(r::AbstractArray{tp, 1}, d::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, Q::AbstractArray{tp, 2}, i::Int, ษ::tp, dim::Int = 1) where {tp}
r[1] = 0.0
for j = 1:size(x, dim)
Mahalanobis_distance!(d, x, Q, i, j, dim)
innerREConline!(r, d, ษ)
end
finalizeREConline!(r, x, dim)
return r
end
function innerREConline!(r::AbstractArray{tp, 1}, d::AbstractArray{tp, 1}, ษ::tp) where {tp}
if d[1] <= ษ
r[1] = r[1] + 1.0
end
return r
end
function finalizeREConline!(r::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, dim::Int) where {tp}
r[1] = 1.0 - ((r[1] - 1.0) / size(x, dim))
return r
end
function REConline!(recscores::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, ษ::tp, dim::Int = 1) where {tp}
d = zeros(tp, 1)
r = zeros(tp, 1)
for i = 1:size(x, dim)
REConline!(r, d, x, i, ษ, dim)
recscores[i] = r[1]
end
return recscores
end
function REConline!(recscores::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, Q::AbstractArray{tp, 2}, ษ::tp, dim::Int = 1) where {tp}
d = zeros(tp, 1)
r = zeros(tp, 1)
for i = 1:size(x, dim)
REConline!(r, d, x, Q, i, ษ, dim)
recscores[i] = r[1]
end
return recscores
end
"""
KNNonline!{tp}(knnscores::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, [Q::AbstractArray{tp, 2},] k::Int, dim::Int = 1)
compute k-nearest neighbor (gamma) scores from x and write it to knnscores with dim being the dimension of the observations.
If the covariance matrix Q is given, the mahalanobis distance is used instead of the euclidean distance.
"""
function KNNonline!(knn::AbstractArray{tp, 1}, d::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, i::Int, k::Int, dim::Int = 1) where {tp}
if size(knn, 1) != k+1 error("size knn != k+1") end
for l = 1:length(knn)
knn[l] = 1.0e30#typemax(tp)
end
for j = 1:size(x, dim)
Euclidean_distance!(d, x, i, j, dim)
innerKNNonline!(knn, d, k)
end
finalizeKNNonline!(knn, k)
return knn
end
function KNNonline!(knn::AbstractArray{tp, 1}, d::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, Q::AbstractArray{tp, 2}, i::Int, k::Int, dim::Int = 1) where {tp}
if size(knn, 1) != k+1 error("size knn != k+1") end
for l = 1:length(knn)
knn[l] = 1.0e30#typemax(tp)
end
for j = 1:size(x, dim)
Mahalanobis_distance!(d, x, Q, i, j, dim)
innerKNNonline!(knn, d, k)
end
finalizeKNNonline!(knn, k)
return knn
end
function KNNonline!(knnscores::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, k::Int, dim::Int = 1) where {tp}
d = zeros(tp, 1)
knn = fill(typemax(tp), k+1)
for i = 1:size(x, dim)
KNNonline!(knn, d, x, i, k, dim)
knnscores[i] = knn[1] #mean(knn[2:k+1]) --> in finalize
end
return knnscores
end
function KNNonline!(knnscores::AbstractArray{tp, 1}, x::AbstractArray{tp, 2}, Q::AbstractArray{tp, 2}, k::Int, dim::Int = 1) where {tp}
d = zeros(tp, 1)
knn = fill(typemax(tp), k+1)
for i = 1:size(x, dim)
KNNonline!(knn, d, x, Q, i, k, dim)
knnscores[i] = knn[1] #mean(knn[2:k+1]) --> in finalize
end
return knnscores
end
function innerKNNonline!(knn::AbstractArray{tp, 1}, d::AbstractArray{tp, 1}, k::Int) where {tp}
if d[1] <= knn[1]
for kshift = (k+1):-1:2
knn[kshift] = knn[kshift-1] # shift old one to k+1
end
knn[1] = d[1] # replace by new distance
else
for j = 2:(k+1)
if d[1] > knn[j-1] && d[1] <= knn[j]
for kshift = k+1:-1:j
knn[kshift] = knn[kshift-1] # shift old one to k+1
end
knn[j] = d[1] # replace by new distance
end
end
end
return knn
end
function finalizeKNNonline!(knn::AbstractArray{tp, 1}, k::Int) where {tp}
knn[1] = 0.0
for i = 2:(k+1)
knn[1] = knn[1] + knn[i]
end
knn[1] = knn[1] / k
return knn
end
"""
SigmaOnline!{tp}(sigma::Array{tp, 1}, x::AbstractArray{tp, 2}, [Q::AbstractArray{tp, 2}], samplesize::Int = 250, dim::Int = 1)
compute `sigma` parameter as mean of the distances of `samplesize` randomly sampled points along `dim`.
If Q is given the Mahalanobis distance is used instead of Euclidean.
"""
function SigmaOnline!(sigma::Array{tp, 1}, x::AbstractArray{tp, 2}, samplesize::Int = 250, dim::Int = 1) where {tp}
sigma[1] = 0.0
d = zeros(tp, 1)
trainsample1 = zeros(Int, 1)
trainsample2 = zeros(Int, 1)
for i = 1:samplesize
StatsBase.rand!(trainsample1, 1:size(x, dim))[1]
for j = 1:samplesize
if j > i
Euclidean_distance!(d, x, trainsample1[1], StatsBase.rand!(trainsample2, 1:size(x, dim))[1], dim)
# sum of distances
sigma[1] = sigma[1] + d[1]
end
end
end
# get mean
sigma[1] = sigma[1] / (samplesize^2 / 2 - samplesize / 2)
return sigma
end
function SigmaOnline!(sigma::Array{tp, 1}, x::AbstractArray{tp, 2}, Q::AbstractArray{tp, 2}, samplesize::Int = 250, dim::Int = 1) where {tp}
sigma[1] = 0.0
d = zeros(tp, 1)
trainsample1 = zeros(Int, 1)
trainsample2 = zeros(Int, 1)
for i = 1:samplesize
StatsBase.rand!(trainsample1, 1:size(x, dim))[1]
for j = 1:samplesize
if j > i
Mahalanobis_distance!(d, x, Q, trainsample1[1], StatsBase.rand!(trainsample2, 1:size(x, dim))[1], dim)
# sum of distances
sigma[1] = sigma[1] + d[1]
end
end
end
# get mean
sigma[1] = sigma[1] / (samplesize^2 / 2 - samplesize / 2)
return sigma
end
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 4709 | """
get_quantile_scores(scores, quantiles = 0.0:0.01:1.0)
return the quantiles of the given N dimensional anomaly `scores` cube. `quantiles` (default: `quantiles = 0.0:0.01:1.0`) is a Float range of quantiles.
Any score being greater or equal `quantiles[i]` and beeing smaller than `quantiles[i+1]` is assigned to the respective quantile `quantiles[i]`.
# Examples
```
julia> scores1 = rand(10, 2)
julia> quantile_scores1 = get_quantile_scores(scores1)
```
"""
function get_quantile_scores(scores::AbstractArray{tp,N}, quantiles::StepRangeLen{Float64} = 0.0:0.01:1.0) where {tp,N}
quantile_scores = zeros(Float64, size(scores))
get_quantile_scores!(quantile_scores, scores, quantiles)
return quantile_scores
end
"""
get_quantile_scores!{tp,N}(quantile_scores::AbstractArray{Float64, N}, scores::AbstractArray{tp,N}, quantiles::StepRangeLen{Float64} = 0.0:0.01:1.0)
return the quantiles of the given N dimensional `scores` array into a preallocated `quantile_scores` array, see `get_quantile_scores()`.
"""
function get_quantile_scores!(quantile_scores::AbstractArray{Float64, N}, scores::AbstractArray{tp,N}, quantiles::StepRangeLen{Float64} = 0.0:0.01:1.0; return_thresholds::Bool = false) where {tp,N}
LENGTH = length(scores)
thresholds = quantile(reshape(scores, prod(size(scores))), collect(quantiles))
n_quants = size(thresholds, 1)
for j = 1:LENGTH
if(scores[j] <= thresholds[1])
quantile_scores[j] = quantiles[1]
end
for i = 2:n_quants
if(scores[j] > thresholds[i-1] && scores[j] <= thresholds[i])
quantile_scores[j] = quantiles[i]
end
end
end
if return_thresholds == false
return quantile_scores
else
return thresholds
end
end
"""
compute_ensemble(m1_scores, m2_scores[, m3_scores, m4_scores]; ensemble = "mean")
compute the mean (`ensemble = "mean"`), minimum (`ensemble = "min"`), maximum (`ensemble = "max"`) or median (`ensemble = "median"`) of the given anomaly scores.
Supports between 2 and 4 scores input arrays (`m1_scores, ..., m4_scores`). The scores of the different anomaly detection algorithms should be somehow comparable,
e.g., by using `get_quantile_scores()` before.
# Examples
```
julia> using MultivariateAnomalies
julia> scores1 = rand(10, 2)
julia> scores2 = rand(10, 2)
julia> quantile_scores1 = get_quantile_scores(scores1)
julia> quantile_scores2 = get_quantile_scores(scores2)
julia> compute_ensemble(quantile_scores1, quantile_scores2, ensemble = "max")
```
"""
function compute_ensemble(m1_scores::Array{Tp, N}, m2_scores::Array{Tp, N}; ensemble = "mean") where {Tp, N}
@assert any(ensemble .== ["mean","min","max","median"])
scores = cat(m1_scores, m2_scores, dims = N+1);
if ensemble == "mean"
ensemble_scores = dropdims(mean(scores, dims = N+1), dims = N+1)
end
if ensemble == "median"
ensemble_scores = dropdims(median(scores, dims = N+1), dims = N+1)
end
if ensemble == "max"
ensemble_scores = dropdims(maximum(scores, dims = N+1), dims = N+1)
end
if ensemble == "min"
ensemble_scores = dropdims(minimum(scores, dims = N+1), dims = N+1)
end
return ensemble_scores
end
function compute_ensemble(m1_scores::Array{Tp, N}, m2_scores::Array{Tp, N}, m3_scores::Array{Tp, N}; ensemble = "mean") where {Tp, N}
@assert any(ensemble .== ["mean","min","max","median"])
scores = cat(m1_scores, m2_scores, m3_scores, dims = N+1);
if ensemble == "mean"
ensemble_scores = dropdims(mean(scores, dims = N+1), dims = N+1)
end
if ensemble == "median"
ensemble_scores = dropdims(median(scores, dims = N+1), dims = N+1)
end
if ensemble == "max"
ensemble_scores = dropdims(maximum(scores, dims = N+1), dims = N+1)
end
if ensemble == "min"
ensemble_scores = dropdims(minimum(scores, dims = N+1), dims = N+1)
end
return ensemble_scores
end
function compute_ensemble(m1_scores::Array{Tp, N}, m2_scores::Array{Tp, N}, m3_scores::Array{Tp, N}, m4_scores::Array{Tp, N}; ensemble = "mean") where {Tp, N}
@assert any(ensemble .== ["mean","min","max","median"])
scores = cat(m1_scores, m2_scores, m3_scores, m4_scores, dims = N+1);
if ensemble == "mean"
ensemble_scores = dropdims(mean(scores, dims = N+1), dims = N+1)
end
if ensemble == "median"
ensemble_scores = dropdims(median(scores, dims = N+1), dims = N+1)
end
if ensemble == "max"
ensemble_scores = dropdims(maximum(scores, dims = N+1), dims = N+1)
end
if ensemble == "min"
ensemble_scores = dropdims(minimum(scores, dims = N+1), dims = N+1)
end
return ensemble_scores
end
###################################
# end
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 170 | using MultivariateAnomalies
#using LinearAlgebra
using Statistics
using Test
include("testFeatureExtr.jl")
include("testAlgorithms.jl")
include("testPostprocessing.jl")
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 3958 | # DistDensity.jl
dat = [[0.0 1.0];[1.0 -1.0];[2.0 2.0];[2.0 3.0];[1.0 1.0]]
D = dist_matrix(dat)
knn_dists_out = knn_dists(D, 2, 0)
@test all(vec(knn_dists_out[4]) .== [5,5,4,3,1,2,1,5,5,3]) || all(vec(knn_dists_out[4]) .== [5,5,4,3,1,3,1,5,5,3])
# parameters
Q = zeros(Float64, 2,2); Q[1,1] = 1.0; Q[2,2] = 1.0
sigma = median(D)
K = kernel_matrix(D, sigma)
#####################
# DetectionAlgorithms.jl
# model
svdd_model = SVDD_train(K[1:4,1:4], 0.2);
knfst_model = KNFST_train(K[1:4,1:4])
# REC, checked manually
@test all(round.(REC(D, sigma, 0), digits = 1) .== round.(1 .- ([1,0,2,1,2] ./ 5), digits = 1))
# KNN Gamma, checked manually
@test all(round.(KNN_Gamma(knn_dists_out), digits = 1) .== [1.6, 2.1,1.2,1.6,1.2])
# KNN Delta, approximately
@test all(round.(KNN_Delta(knn_dists_out, dat), digits = 1) .== [1.4, 2.1, 0.5, 1.6, 0.5])
# KDE # results show exhibit similar ordering like REC
@test all(sortperm(KDE(K)) .== [5,3,1,4,2])
# Hotelling's T^2
# is also the quared mahalanobis distance to the data's mean
using Distances
@test all(round.(T2(dat, Q, mean(dat, dims = 1)), digits = 2) .== round.(pairwise(SqMahalanobis(Q), dat', mean(dat', dims = 2), dims = 2), digits = 2))
# SVDD
Ktest = exp.(-0.5 * pairwise(Euclidean(), dat[1:4,:]', dat') ./ sigma^2)
@test all(SVDD_predict(svdd_model, Ktest)[1] .== [0, 1, 0, 0, 1])
# KNFST, last data point (not seen in training) should differ, i.e. have largest values
@test sortperm(-KNFST_predict(knfst_model, Ktest)[1])[1] == 5
# high level functions
algorithms = ["T2", "REC", "KDE", "KNN_Gamma", "KNN_Delta"]
P = getParameters(algorithms, dat)
P.KNN_k = 2
P.Q = Q
@test P.K_sigma == sigma == P.REC_varepsilon
# detectAnomalies
detectAnomalies(dat, P)
# chekct detectAnomalies for self consistency
@test round.(P.REC, digits = 3) == round.(REC(D, sigma, 0), digits = 3)
@test round.(P.KDE, digits = 3) == round.(KDE(K), digits = 3)
@test round.(P.KNN_Gamma, digits = 3) == round.(KNN_Gamma(knn_dists_out), digits = 3)
@test round.(P.KNN_Delta[1], digits = 3) == round.(KNN_Delta(knn_dists_out, dat), digits = 3)
@test round.(P.T2[1], digits = 3) == round.(T2(dat, Q, mean(dat, dims = 1)), digits = 3)
algorithms = ["KNFST", "SVDD"]
P = getParameters(algorithms, dat[1:4,:])
P.K_sigma = sigma
P.SVDD_model = svdd_model
P.KNFST_model = knfst_model
detectAnomalies(dat, P)
(labels, decvalues) = SVDD_predict(svdd_model, Ktest)
@test round.(P.SVDD, digits = 3) == round.(decvalues, digits = 3) * -1
@test K[1:4,1:4] == P.D_train[1]
@test Ktest == P.D_test[1]
@test round.(P.KNFST[1], digits = 3) == round.(KNFST_predict(knfst_model, Ktest)[1], digits = 3)
data = rand(200,3)
methods = ["REC","KDE"]
@test all(detectAnomalies(data, getParameters(methods, data)) .== detectAnomalies(data, methods))
x = [[1.0,2.0,3.0] [-2.0,4.0,1.0]]
@test all(round.(UNIV(x), digits = 0) .== [0,1,1])
# test online algorithms with the non-online corresponding ones
using StatsBase
out = zeros(100)
x = randn(100, 3)
Q = StatsBase.cov(x)
@test all(round.(KDEonline!(out, x, 1.0), digits = 4) .== round.(KDE(kernel_matrix(dist_matrix(x), 1.0)), digits = 4))
@test all(round.(KDEonline!(out, x, Q, 1.0), digits = 4) .== round.(KDE(kernel_matrix(dist_matrix(x, dist = "Mahalanobis", Q = Q), 1.0)), digits = 4))
@test all(round.(REConline!(out, x, 1.0), digits = 4) .== round.(REC(dist_matrix(x), 1.0), digits = 4))
@test all(round.(REConline!(out, x, Q, 1.0), digits = 4) .== round.(REC(dist_matrix(x, dist = "Mahalanobis", Q = Q), 1.0), digits = 4))
@test all(round.(KNNonline!(out, x, 5), digits = 4) .== round.(KNN_Gamma(knn_dists(dist_matrix(x, dims = 2), 5)), digits = 4))
@test all(round.(KNNonline!(out, x, Q, 5), digits = 4) .== round.(KNN_Gamma(knn_dists(dist_matrix(x, dist = "Mahalanobis",Q = Q, dims = 2), 5)), digits = 4))
s = zeros(1)
SigmaOnline!(s, x)
@test floor.(Int, s)[1] == 2 || floor.(Int, s)[1] == 1
SigmaOnline!(s, x, Q)
@test floor.(Int, s)[1] == 2 || floor.(Int, s)[1] == 1
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 1244 |
# sMSC test
dc = hcat(2* sin.(0:pi/24:8*pi), 3* sin.(0:pi/24:8*pi));
sMSC_dc = sMSC(dc, 48);
@test all(abs.(sMSC_dc) .< 10e-10)
# TDE test
x = [0,0,0,5,0,0,5]
@test all(TDE(x, 3, 2) .== hcat([5,0,0,5],[0,0,0,5]))
# Dim 2
@test all(TDE(reshape(x, 7, 1), 3, 2) .== reshape(hcat([5,0,0,5],[0,0,0,5]), (4,2)))
# Dim 3
@test all(TDE(reshape(x, 7, 1, 1), 3, 2) .== reshape(hcat([5,0,0,5],[0,0,0,5]), (4,1,2)))
# Dim 4
@test all(TDE(reshape(x, 7, 1, 1, 1), 3, 2) .== reshape(hcat([5,0,0,5],[0,0,0,5]), (4,1,1,2)))
# mw_AVG
x = [4.0, 1.0, 1.0, 1.0, 4.0]
@test all(round.(Int, mw_AVG(x, 3)[2:4]) .== [2,1,2])
# mw_VAR
x = [3.0, -3.0, 4.0, -4.0, 5.0]
@test all(round.(Int, mw_VAR(x, 2)[2:5] .* 2) .== [20,25,34, 41])
# EWMA
x = [10.0, 0.0, 0.0, 0.0, 0.0, 10.0, 0.0]
@test all(round.(Int, EWMA(x, 0.5) * 4) .== [20, 10, 5, 2, 1, 21, 10])
# map moving window
x = randn(144, 1)
@test all(reshape(EWMA(reshape(x, 48, 3)')', 144, 1) .== mapMovingWindow(EWMA, x,ObsPerYear = 48, windowsize = 1, numoutvars = 1))
@test all(reshape(EWMA(reshape(x, 48, 3)', 0.5)', 144, 1) .== mapMovingWindow(EWMA, x, 0.5,ObsPerYear = 48, windowsize = 1, numoutvars = 1))
# get median cycle
x = sind.(0:8:719)
@test all(get_MedianCycle(x, 45) .== x[1:45])
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | code | 1029 | scores = [0.0, 1.0, 1.0, 0.0, 1.0, 0.0]
events = [0,1,1,1,0,0]
@test auc(scores, events) == 2/3
scores = [1.0,0.5,0.0,0.25,0.75] .- 1.0
@test all(get_quantile_scores(scores) .== scores .+ 1.0)
x = zeros(10, 3)
y = ones(10, 3)
z = fill(2.0, 10, 3)
@test all(compute_ensemble(x,z, ensemble = "mean") .== ones(10,3))
@test all(compute_ensemble(x,y, ensemble = "max") .== ones(10,3))
@test all(compute_ensemble(x,y, ensemble = "min") .== zeros(10,3))
@test all(compute_ensemble(x,y,z, ensemble = "mean") .== ones(10,3))
@test all(compute_ensemble(x,y,z, ensemble = "max") .== fill(2.0, 10, 3))
@test all(compute_ensemble(x,y,z, ensemble = "min") .== zeros(10,3))
@test all(compute_ensemble(x,y,z, ensemble = "median") .== ones(10,3))
@test all(compute_ensemble(x,y,y,z, ensemble = "mean") .== ones(10,3))
@test all(compute_ensemble(x,y,y,z, ensemble = "max") .== fill(2.0, 10, 3))
@test all(compute_ensemble(x,y,y,z, ensemble = "min") .== zeros(10,3))
@test all(compute_ensemble(x,y,y,z, ensemble = "median") .== ones(10,3))
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | docs | 2041 | # MultivariateAnomalies
*A julia package for detecting multivariate anomalies.*
*Keywords: Novelty detection, Anomaly Detection, Outlier Detection, Statistical Process Control, Process Monitoring*
| **Documentation** | **Build Status** | **Code Coverage**
|:-------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------:| :-----------------------------------------------------------------------------------------------:|
| [](https://milanflach.github.io/MultivariateAnomalies.jl/latest) | [](https://travis-ci.org/milanflach/MultivariateAnomalies.jl)| [](https://codecov.io/gh/milanflach/MultivariateAnomalies.jl) |
## Requirements
- Julia >= `0.7`
- Julia packages `Distances`, `LIBSVM`, `Combinatorics`, and `Linear Algebra`.
## Installation
- add the package: ```]add MultivariateAnomalies```
## Documentation
The Documentation is available [here](https://milanflach.github.io/MultivariateAnomalies.jl/latest).
## Reference
The package was implemented by Milan Flach and Fabian Gans, Max Planck Institute for Biogeochemistry, Department Biogeochemical Integration, Jena.
**Please cite this package as:**
Flach, M., Gans, F., Brenning, A., Denzler, J., Reichstein, M., Rodner, E., Bathiany, S., Bodesheim, P., Guanche, Y., Sippel, S., and Mahecha, M. D. (2017): Multivariate anomaly detection for Earth observations: a comparison of algorithms and feature extraction techniques, Earth Syst. Dynam., 8, 677-696, [doi:10.5194/esd-8-677-2017](https://doi.org/10.5194/esd-8-677-2017).
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | docs | 2758 | # MultivariateAnomalies.jl
*A julia package for detecting multivariate anomalies.*
*Keywords: Novelty detection, Anomaly Detection, Outlier Detection, Statistical Process Control, Process Monitoring*
**Please cite this package as:**
Flach, M., Gans, F., Brenning, A., Denzler, J., Reichstein, M., Rodner, E., Bathiany, S., Bodesheim, P., Guanche, Y., Sippel, S., and Mahecha, M. D. (2017): Multivariate anomaly detection for Earth observations: a comparison of algorithms and feature extraction techniques, Earth Syst. Dynam., 8, 677-696, [doi:10.5194/esd-8-677-2017](https://doi.org/10.5194/esd-8-677-2017).
## Requirements
- Julia >= `1.0`
- Julia packages `Distances`, `Combinatorics`, `LinearAlgebra`, and `LIBSVM`.
## Installation
- add the package: ```]add MultivariateAnomalies```
## Package Features
- Detect anomalies in your data with easy to use [high level functions](man/HighLevelFunctions.md) or individual [anomaly detection algorithms](man/DetectionAlgorithms.md)
- [Feature Extraction](man/Preprocessing.md): Preprocess your data by extracting relevant features
- [Similarities and Dissimilarities](man/DistancesDensity.md): Compute distance matrices, kernel matrices and k-nearest neighbor objects.
- [Postprocessing](man/Postprocessing.md): Postprocess your anomaly scores, by computing their quantiles or combinations of several algorithms (ensembles).
- [AUC](man/AUC.md): Compute the area under the curve as external evaluation metric of your scores.
- [Online Algorithms](man/OnlineAlgorithms.md): Algorithms tuned for little memory allocation.
## Using the Package
For a quick start it might be useful to start with the [high level functions](man/HighLevelFunctions.md) for detecting anomalies. They can be used in highly automized way.
## Input Data
*MultivariateAnomalies.jl* assumes that observations/samples/time steps are stored along the first dimension of the data array (rows of a matrix) with the number of observations `T = size(data, 1)`. Variables/attributes are stored along the last dimension `N` of the data array (along the columns of a matrix) with the number of variables `VAR = size(data, N)`. The implemented anomaly detection algorithms return anomaly scores indicating which observation(s) of the data are anomalous.
## Authors
<img align="right" src="img/MPG_Minerva.png" alt="Minerva" width="75"/>
The package was implemented by Milan Flach and Fabian Gans, Max Planck Institute for Biogeochemistry, Department Biogeochemical Integration, Research group for Empirical Inference of the Earth System, Jena.
## Index
```@index
Pages = ["man/Preprocessing.md", "man/DetectionAlgorithms.md", "man/Postprocessing.md", "man/AUC.md", "man/DistancesDensity.md", "man/OnlineAlgorithms.md"]
```
| MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | docs | 405 | ## Area Under the Curve
Compute true positive rates, false positive rates and the area under the curve to evaulate the algorihtms performance.
Efficient implementation according to
Fawcett, T. (2006). An introduction to ROC analysis. Pattern Recognition Letters, 27(8), 861โ874. [Link](http://doi.org/10.1016/j.patrec.2005.10.010)
## Functions
```@docs
auc
```
Index
```@index
Pages = ["AUC.md"]
``` | MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | docs | 1205 | ## Anomaly Detection Algorithms
Most of the anomaly detection algorithms below work on a distance/similarity matrix `D` or a kernel/dissimilarity matrix `K`. They can be comuted using the functions provided [here](DistancesDensity.md).
Currently supported algorithms include
- Recurrences (REC)
- Kernel Density Estimation (KDE)
- Hotelling's $T^2$ (Mahalanobis distance) (T2)
- two k-Nearest Neighbor approaches (KNN-Gamma, KNN-Delta)
- Univariate Approach (UNIV)
- Support Vector Data Description (SVDD)
- Kernel Null Foley Summon Transform (KNFST)
## Functions
### Recurrences
```@docs
REC
REC!
init_REC
```
### Kernel Density Estimation
```@docs
KDE
KDE!
init_KDE
```
### Hotelling's T<sup>2</sup>
```@docs
T2
T2!
init_T2
```
### k-Nearest Neighbors
```@docs
KNN_Gamma
KNN_Gamma!
init_KNN_Gamma
KNN_Delta
KNN_Delta!
init_KNN_Delta
```
### Univariate Approach
```@docs
UNIV
UNIV!
init_UNIV
```
### Support Vector Data Description
```@docs
SVDD_train
SVDD_predict
```
### Kernel Null Foley Summon Transform
```@docs
KNFST_train
KNFST_predict
KNFST_predict!
init_KNFST
```
### Distance to some Centers
```@docs
Dist2Centers
```
## Index
```@index
Pages = ["DetectionAlgorithms"]
``` | MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | docs | 1083 | ## Distance, Kernel Matrices and k-Nearest Neighbours
Compute distance matrices (similarity matrices) and convert them into kernel matrices or k-nearest neighbor objects.
## Distance/Similarity Matrices
A distance matrix `D` consists of pairwise distances $d()$computed with some metrix (e.g. Euclidean):
$D = d(X_{t_i}, X_{t_j})$
i.e. the distance between vector $X$ of observation $t_i$ and $t_j$ for all observations $t_i,t_j = 1 \ldots T$.
#### Functions
```@docs
dist_matrix
dist_matrix!
init_dist_matrix
```
## k-Nearest Neighbor Objects
k-Nearest Neighbor objects return the k nearest points and their distance out of a distance matrix `D`.
#### Functions
```@docs
knn_dists
knn_dists!
init_knn_dists
```
## Kernel Matrices (Dissimilarities)
A distance matrix `D` can be converted into a kernel matrix `K`, i.e. by computing pairwise dissimilarities using Gaussian kernels centered on each datapoint.
$K= exp(-0.5 \cdot D \cdot \sigma^{-2})$
#### Functions
```@docs
kernel_matrix
kernel_matrix!
```
## Index
```@index
Pages = ["DistancesDensity.md"]
``` | MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | docs | 1084 | ## High Level Anomaly Detection Algorithms
We provide high-level convenience functions for detecting the anomalies. Namely the pair of
`P = getParameters(algorithms, training_data)`
and
`detectAnomalies(testing_data, P)`
sets standard choices of the Parameters `P` and hands the parameters as well as the algorithms choice over to detect the anomalies.
Currently supported algorithms include Kernel Density Estimation (`algorithms = ["KDE"]`), Recurrences (`"REC"`), k-Nearest Neighbors algorithms (`"KNN-Gamma"`, `"KNN-Delta"`), Hotelling's $T^2$ (`"T2"`), Support Vector Data Description (`"SVDD"`) and Kernel Null Foley Summon Transform (`"KNFST"`). With `getParameters()` it is also possible to compute output scores of multiple algorithms at once (`algorihtms = ["KDE", "T2"]`), quantiles of the output anomaly scores (`quantiles = true`) and ensembles of the selected algorithms (e.g. `ensemble_method = "mean"`).
### Functions
```@docs
getParameters
detectAnomalies
detectAnomalies!
init_detectAnomalies
```
## Index
```@index
Pages = ["HighLevelFunctions.md"]
``` | MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | docs | 364 | ## Online Algorithms
We provide online some functions, which are tuned to allocate minimal amounts of memory. Implemented so far:
- Euclidean distance
- Sigma estimation for KDE / REC
- KDE
- REC
- KNN-Gamma
#### Functions
```@docs
Euclidean_distance!
SigmaOnline!
KDEonline!
REConline!
KNNonline!
```
## Index
```@index
Pages = ["OnlineAlgorithms.md"]
``` | MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | docs | 257 | ## Scores
Postprocess your anomaly scores by making different algorithms comparable and computing their ensemble.
### Functions
```@docs
get_quantile_scores
get_quantile_scores!
compute_ensemble
```
## Index
```@index
Pages = ["Postprocessing.md"]
``` | MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.2.4 | e48ad740ee72e61fee62e21e1e99d5fecc900ad0 | docs | 1419 | ## Feature Extraction Techniques
Extract the relevant inforamtion out of your data and use them as input feature for the anomaly detection algorithms.
## Dimensionality Reduction
For dimensionality reduction, we would like to point to the package *MultivariateStats.jl*. Several techniques are implemented there, e.g.
- Principal Component Analysis (PCA)
- Independent Component Analysis (ICA)
## Seasonality
When dealing with time series, i.e. the observations are time steps, it might be important to remove or get robust estimates of the mean seasonal cycles. This is implemended by
- subtracting the median seasonal cycle (sMSC) and
- getting the median seasonal cycle (get_MedianCycles)
#### Functions
```@docs
sMSC
get_MedianCycles
get_MedianCycle
get_MedianCycle!
init_MedianCycle
```
## Exponential Weighted Moving Average
One option to reduce the noise level in the data and detect more 'significant' anomalies is computing an exponential weighted moving average (EWMA)
#### Function
```@docs
EWMA
EWMA!
```
## Time Delay Embedding
Increase the feature space (Variabales) with lagged observations.
#### Function
```@docs
TDE
```
## Moving Window Features
include the variance (mw_VAR) and correlations (mw_COR) in a moving window along the first dimension of the data.
#### Functions
```@docs
mw_VAR
mw_VAR!
mw_COR
mw_AVG
mw_AVG!
```
## Index
```@index
Pages = ["Preprocessing.md"]
``` | MultivariateAnomalies | https://github.com/milanflach/MultivariateAnomalies.jl.git |
|
[
"MIT"
] | 0.1.0 | 8efec99164aef955d6fe515667329a3d6a2dedd9 | code | 8901 | module IntervalLapper
export Interval, overlap, intersect
export Lapper, find, seek, lower_bound, merge_overlaps!, coverage, union_and_intersect
export Bits, count
"""
Represents an interval of (start, stop] with a label val.
"""
struct Interval{T}
start::Int
stop::Int
val::T
end
"""
Check if two intervals overlap.
# Example
```jldoctest
julia> overlap(Interval(0, 10, "cats"), Interaval(5, 15, "Dogs"))
true
```
"""
function overlap(a::Interval, b::Interval)
a.start < b.stop && a.stop > b.start
end
function overlap(a::Interval, start::Int, stop::Int)
a.start < stop && a.stop > start
end
"""
Determine the number of positions shared by two intervals
# Example
```jldoctest
julia> intersect(Interval(0, 10, "cats"), Interaval(5, 15, "Dogs"))
5
```
"""
function intersectlen(a::Interval, b::Interval)
diff = min(a.stop, b.stop) - max(a.start, b.start)
diff >= 0 ? diff : 0
end
"""
Primary object of the library. The intervals can be used for iterating / pulling values out of the tree
"""
mutable struct Lapper{T}
intervals::Vector{Interval{T}}
max_len::Int
cursor::Int
cov::Union{Nothing, Int}
overlaps_merged::Bool
end
function Lapper(intervals::Vector{Interval{T}}) where T
sort!(intervals, by = x -> (x.start))
max_len = 0
for interval in intervals
iv_len = interval.stop - interval.start # add an check for intervals where this could be negative?
if iv_len > max_len
max_len = iv_len
end
end
return Lapper(intervals, max_len, 0, nothing, false)
end
function lower_bound(start::Int, intervals::Vector{Interval{T}}) where T
size = length(intervals)
low = 1
@inbounds while size > 1
half = div(size, 2)
other_half = size - half
probe = low + half
other_low = low + other_half
v = intervals[probe]
size = half
low = v.start < start ? other_low : low
end
low
end
"""
Merge any intervals that overlap with eachother within the Lapper. This is an easy
way to speed up queries.
"""
function merge_overlaps!(lapper::Lapper{T}) where T
if length(lapper.intervals) <= 1
lapper.overlaps_merged = true
return
elseif lapper.overlaps_merged
return
end
stack = Vector{Interval{T}}()
first = lapper.intervals[1]
push!(stack, first)
for interval in lapper.intervals[2:end]
top = pop!(stack)
if top.stop < interval.start
push!(stack, top)
push!(stack, interval)
elseif top.stop < interval.stop
top = Interval{T}(top.start, interval.stop, interval.val)
push!(stack, top)
else
# they were equal
push!(stack, top)
end
end
lapper.overlaps_merged = true
lapper.intervals = stack
end
"""
Calculate the nuber of positions covered by the intervals in Lapper.
"""
function coverage(lapper::Lapper{T}) where T
moving_start = 0
moving_stop = 0
cov = 0
for interval in lapper.intervals
if overlap(interval, moving_start, moving_stop)
moving_start = min(moving_start, interval.start)
moving_stop = max(moving_stop, interval.stop)
else
cov += moving_stop - moving_start
moving_start = interval.start
moving_stop = interval.stop
end
end
cov += moving_stop - moving_start
cov
end
"""
Find the union and the intersect of two lapper objects.
Union: The set of positions found in both lappers
Intersect: The number of positions where both lappers intersect. Note that a position only
ounts one time, multiple Intervals covering the same position don't add up.
TODO: make this and other funcitons more generic an not depend on T if they don't have to
"""
function union_and_intersect(self::Lapper{T}, other::Lapper{T}, self_cov::Union{Nothing, Int}=nothing, other_cov::Union{Nothing, Int}=nothing) where T
cursor = Ref(1)
if !self.overlaps_merged || !other.overlaps_merged
intersections = Vector{Interval{Bool}}()
for self_iv in self.intervals
for other_iv in seek(other, self_iv.start, self_iv.stop, cursor)
start = max(self_iv.start, other_iv.start)
stop = min(self_iv.stop, other_iv.stop)
push!(intersections, Interval(start, stop, true))
end
end
temp_lapper = Lapper(intersections)
merge_overlaps!(temp_lapper)
temp_cov = coverage(temp_lapper)
other_cov = isnothing(other_cov) ? coverage(other) : other_cov
self_cov = isnothing(self_cov) ? coverage(self) : self_cov
union = self_cov + other_cov - temp_cov
return union, temp_cov
else
intersect = 0
for c1_iv in self.intervals
for c2_iv in seek(other, c1_iv.start, c1_iv.stop, cursor)
local_intersect = intersectlen(c1_iv, c2_iv)
intersect += local_intersect
end
end
other_cov = isnothing(other_cov) ? coverage(other) : other_cov
self_cov = isnothing(self_cov) ? coverage(self) : self_cov
union = self_cov + other_cov - intersect
return (union, intersect)
end
end
#=
#
# Find Iterator / Seek Iterator
#
=#
@inline function checked_sub(a::Int, b::Int, or=1)
maybe = a - b
maybe >= 1 ? maybe : or
end
struct FindIter{T}
inner::Lapper{T}
start::Int
stop::Int
end
struct SeekIter{T}
inner::Lapper{T}
start::Int
stop::Int
cursor::Ref{Int}
end
@inline function _find(iter::Union{FindIter, SeekIter}, offset::Int)
while offset <= length(iter.inner.intervals)
interval = iter.inner.intervals[offset]
offset += 1
if overlap(interval, iter.start, iter.stop)
return (interval, offset)
elseif interval.start >= iter.stop
break
end
end
nothing
end
find(lapper::Lapper{T}, start::Int, stop::Int) where T = FindIter(lapper, start, stop)
Base.iterate(iter::FindIter, offset=lower_bound(checked_sub(iter.start, iter.inner.max_len), iter.inner.intervals)) = _find(iter, offset)
Base.IteratorSize(::FindIter) = Base.SizeUnknown()
function seek(lapper::Lapper{T}, start::Int, stop::Int, cursor::Ref{Int}) where T
if cursor[] <= 1 || (cursor[] <= length(lapper.intervals) && lapper.intervals[cursor[]].start > start)
cursor[] = lower_bound(checked_sub(start, lapper.max_len), lapper.intervals)
end
while cursor[] + 1 <= length(lapper.intervals) && lapper.intervals[cursor[] + 1].start < checked_sub(start, lapper.max_len)
cursor[] += 1
end
SeekIter(lapper, start, stop, cursor)
end
Base.iterate(iter::SeekIter, offset=iter.cursor[]) = _find(iter, offset)
Base.IteratorSize(::SeekIter) = Base.SizeUnknown()
#=
# Depth Iterator
=#
struct DepthIter
inner::Lapper
# Lapper that is merged lapper of the inner
merged::Lapper{Bool}
merged_len::Int
end
"""
Return the contiguous intervals of coverage, `val` represents the number of intervals
covering the returned interval.
"""
function depth(lapper::Lapper)
merged_lapper = Lapper(collect(map(x -> Interval(x.start, x.stop, true), lapper.intervals)))
merge_overlaps!(merged_lapper)
merged_len = length(merged_lapper.intervals)
DepthIter(lapper, merged_lapper, merged_len)
end
Base.IteratorSize(::DepthIter) = Base.SizeUnknown()
function Base.iterate(iter::DepthIter, (curr_merged_pos, curr_pos, cursor)=(1, 1, Ref(1)))
interval = iter.merged.intervals[curr_pos]
if curr_merged_pos == 1
curr_merged_pos = interval.start
end
if interval.stop == curr_merged_pos
if curr_pos + 1 <= iter.merged_len
curr_pos += 1
interval = iter.merged.intervals[curr_pos]
curr_merged_pos = interval.start
else
return nothing
end
end
start = curr_merged_pos
depth_at_point = 0
for _ in seek(iter.inner, curr_merged_pos, curr_merged_pos + 1, cursor)
depth_at_point += 1
end
new_depth_at_point = depth_at_point
while new_depth_at_point == depth_at_point && curr_merged_pos < interval.stop
curr_merged_pos += 1
tmp = 0
for _ in seek(iter.inner, curr_merged_pos, curr_merged_pos + 1, cursor)
tmp += 1
end
new_depth_at_point = tmp
end
return (Interval(start, curr_merged_pos, depth_at_point), (curr_merged_pos, curr_pos, cursor))
end
"""
A data structure for counting all intervals that overlap start .. stop. It is very fast.
Two binary searches are performed to fina all the excluded elements, then the intersections
can be deduced from there. See [BITS](https://arxiv.org/pdf/1208.3407.pdf) for more info.
"""
struct Bits
starts::Vector{Int}
stops::Vector{Int}
end
@inline unzip(a) = map(x -> getfield.(a, x), fieldnames(eltype(a)))
function Bits(intervals::Vector{Interval{T}}) where T
starts, stops = unzip(map( x -> (x.start, x.stop), intervals))
Bits(sort!(starts), sort!(stops))
end
@inline function bsearch_seq(key::Int, elems::Vector{Int})
if elems[1] > key
return 1
end
high = length(elems) + 1
low = 1
while high - low > 1
mid = div(high + low, 2)
if elems[mid] < key
low = mid
else
high = mid
end
end
high
end
function count(bits::Bits, start::Int, stop::Int)
len = length(bits.starts)
first = bsearch_seq(start, bits.stops)
last = bsearch_seq(stop, bits.starts)
while first <= len && bits.stops[first] == start
first += 1
end
num_cant_after = len - last
len - first - num_cant_after
end
end # module
| IntervalLapper | https://github.com/sstadick/IntervalLapper.jl.git |
|
[
"MIT"
] | 0.1.0 | 8efec99164aef955d6fe515667329a3d6a2dedd9 | code | 10538 | using Test
import IntervalLapper
const IL = IntervalLapper
const Iv = IL.Interval{Int}
function setup_nonoverlapping()
data = map(x -> Iv(x, x + 10, 0), 0:20:100)
IL.Lapper(data)
end
function setup_overlapping()
data = map(x -> Iv(x, x + 15, 0), 0:10:100)
IL.Lapper(data)
end
function setup_badlapper()
data = [
Iv(70, 120, 0), # max_len = 50
Iv(10, 15, 0),
Iv(10, 15, 0), # exact overlap
Iv(12, 15, 0), # inner overlap
Iv(14, 16, 0), # overlap end
Iv(40, 45, 0),
Iv(50, 55, 0),
Iv(60, 65, 0),
Iv(68, 71, 0), # overlap start
Iv(70, 75, 0),
]
IL.Lapper(data)
end
function setup_single()
data = [Iv(10, 35, 0)]
IL.Lapper(data)
end
@testset "Query Stop Interval Start" begin
lapper = setup_nonoverlapping()
bits = IL.Bits(lapper.intervals)
cursor = Ref(1)
@test nothing == Base.iterate(IL.find(lapper, 30, 35))
@test nothing == Base.iterate(IL.seek(lapper, 30, 35, cursor))
@test IL.count(bits, 30, 35) == length(collect(IL.find(lapper, 30, 35)))
end
# Test that a query that overlaps the start of an interval returns that interval
@testset "Query Overlaps Interval Start" begin
lapper = setup_nonoverlapping()
cursor = Ref(1)
expected = Iv(20, 30, 0)
@test expected == Base.iterate(IL.find(lapper, 15, 25))[1]
@test expected == Base.iterate(IL.seek(lapper, 15, 25, cursor))[1]
bits = IL.Bits(lapper.intervals)
@test IL.count(bits, 15, 25) == length(collect(IL.find(lapper, 15, 25)))
end
# Test that a query that overlaps the stop of an interval returns that interval
@testset "Query Overlaps Interval Stop" begin
lapper = setup_nonoverlapping()
cursor = Ref(1)
expected = Iv(20, 30, 0)
@test expected == Base.iterate(IL.find(lapper, 25, 35))[1]
@test expected == Base.iterate(IL.seek(lapper, 25, 35, cursor))[1]
bits = IL.Bits(lapper.intervals)
@test IL.count(bits, 25, 35) == length(collect(IL.find(lapper, 25, 35)))
end
# Test that a query that is enveloped by interval returns interval<Paste>
@testset "Interval Envelops Query" begin
lapper = setup_nonoverlapping()
cursor = Ref(1)
expected = Iv(20, 30, 0)
@test expected == Base.iterate(IL.find(lapper, 22, 27))[1]
@test expected == Base.iterate(IL.seek(lapper, 22, 27, cursor))[1]
bits = IL.Bits(lapper.intervals)
@test IL.count(bits, 22, 27) == length(collect(IL.find(lapper, 22, 27)))
end
# Test that a query that envolops an interval returns that interval
@testset "Query Envelops Interval" begin
lapper = setup_nonoverlapping()
cursor = Ref(1)
expected = Iv(20, 30, 0)
@test expected == Base.iterate(IL.find(lapper, 15, 35))[1]
@test expected == Base.iterate(IL.seek(lapper, 15, 35, cursor))[1]
bits = IL.Bits(lapper.intervals)
@test IL.count(bits, 15, 35) == length(collect(IL.find(lapper, 15, 35)))
end
@testset "Overlapping Intervals" begin
lapper = setup_overlapping()
cursor = Ref(1)
e1 = Iv(0, 15, 0)
e2 = Iv(10, 25, 0)
@test [e1, e2] == collect(IL.find(lapper, 8, 20))
@test [e1, e2] == collect(IL.seek(lapper, 8, 20, cursor))
@test 2 == length(collect(IL.find(lapper, 8, 20)))
bits = IL.Bits(lapper.intervals)
@test IL.count(bits, 8, 20) == length(collect(IL.find(lapper, 8, 20)))
end
@testset "Merge Overlaps" begin
lapper = setup_badlapper()
expected = [
Iv( 10, 16, 0),
Iv( 40, 45, 0),
Iv( 50, 55, 0),
Iv( 60, 65, 0),
Iv( 68, 120, 0), # max_len = 50
]
IL.merge_overlaps!(lapper)
@test expected == lapper.intervals
end
@testset "Lapper Coverage" begin
lapper = setup_badlapper()
before = IL.coverage(lapper)
IL.merge_overlaps!(lapper)
after = IL.coverage(lapper)
@test before == after
lapper = setup_nonoverlapping()
coverage = IL.coverage(lapper)
@test coverage == 60
end
@testset "Interval Intersections" begin
i1 = Iv(70, 120, 0)
i2 = Iv(10, 15, 0)
i3 = Iv( 10, 15, 0) # exact overlap
i4 = Iv( 12, 15, 0) # inner overlap
i5 = Iv( 14, 16, 0) # overlap end
i6 = Iv( 40, 50, 0)
i7 = Iv( 50, 55, 0)
i8 = Iv( 60, 65, 0)
i9 = Iv( 68, 71, 0) # overlap start
i10 = Iv( 70, 75, 0)
@test IL.intersectlen(i2, i3) == 5 # exact match
@test IL.intersectlen(i2, i4) == 3 # inner intersect
@test IL.intersectlen(i2, i5) == 1 # end intersect
@test IL.intersectlen(i9, i10) == 1 # start intersect
@test IL.intersectlen(i7, i8) == 0 # no intersect
@test IL.intersectlen(i6, i7) == 0 # no intersect stop = start
@test IL.intersectlen(i1, i10) == 5 # inner intersect at start
end
@testset "Union and Itersect" begin
data1 = [
Iv( 70, 120, 0), # max_len = 50
Iv( 10, 15, 0), # exact overlap
Iv( 12, 15, 0), # inner overlap
Iv( 14, 16, 0), # overlap end
Iv( 68, 71, 0), # overlap start
]
data2 = [
Iv( 10, 15, 0),
Iv( 40, 45, 0),
Iv( 50, 55, 0),
Iv( 60, 65, 0),
Iv( 70, 75, 0),
]
lapper1 = IL.Lapper(data1)
lapper2 = IL.Lapper(data2)
# Should be the same either way it's calculated
@testset "Non-merged-lappers" begin
@testset "Lapper1 vs Lapper2" begin
union, intersect = IL.union_and_intersect(lapper1, lapper2)
@test intersect == 10
@test union == 73
end
@testset "Lapper2 vs Lapper1" begin
union, intersect = IL.union_and_intersect(lapper2, lapper1)
@test intersect == 10
@test union == 73
end
end
# Should still be the same
@testset "Merged-Lappers" begin
IL.merge_overlaps!(lapper1)
IL.merge_overlaps!(lapper2)
cov1 = IL.coverage(lapper1)
cov2 = IL.coverage(lapper2)
@testset "Lapper1 vs Lapper2" begin
union, intersect = IL.union_and_intersect(lapper1, lapper2, cov1, cov2)
@test intersect == 10
@test union == 73
end
@testset "Lapper2 vs Lapper1" begin
union, intersect = IL.union_and_intersect(lapper2, lapper1, cov2, cov1)
@test intersect == 10
@test union == 73
end
end
end
@testset "Find Overlaps In Large Intervals" begin
data1 = [
Iv( 0, 8, 0),
Iv( 1, 10, 0),
Iv( 2, 5, 0),
Iv( 3, 8, 0),
Iv( 4, 7, 0),
Iv( 5, 8, 0),
Iv( 8, 8, 0),
Iv( 9, 11, 0),
Iv( 10, 13, 0),
Iv( 100, 200, 0),
Iv( 110, 120, 0),
Iv( 110, 124, 0),
Iv( 111, 160, 0),
Iv( 150, 200, 0),
]
lapper = IL.Lapper(data1);
found = collect(IL.find(lapper, 8, 11))
@test found == [
Iv( 1, 10, 0),
Iv( 9, 11, 0),
Iv( 10, 13, 0),
]
bits = IL.Bits(lapper.intervals)
@test IL.count(bits, 8, 11) == length(collect(IL.find(lapper, 8, 11)))
cursor = Ref(1)
found = collect(IL.seek(lapper, 8, 11, cursor))
@test found == [
Iv( 1, 10, 0),
Iv( 9, 11, 0),
Iv( 10, 13, 0),
]
found = collect(IL.find(lapper, 145, 151))
@test found == [
Iv( 100, 200, 0),
Iv( 111, 160, 0),
Iv( 150, 200, 0),
]
bits = IL.Bits(lapper.intervals)
@test IL.count(bits, 145, 151) == length(collect(IL.find(lapper, 145, 151)))
cursor = Ref(1)
found = collect(IL.seek(lapper, 145, 151, cursor))
@test found == [
Iv( 100, 200, 0),
Iv( 111, 160, 0),
Iv( 150, 200, 0),
]
end
@testset "Depth Sanity" begin
data1 = [
Iv( 0, 10, 0),
Iv( 5, 10, 0)
]
lapper = IL.Lapper(data1);
found = collect(IL.depth(lapper))
@test found == [
IL.Interval( 0, 5, 1),
IL.Interval( 5, 10, 2)
]
end
@testset "Depth Harder" begin
data1 = [
Iv( 1, 10, 0),
Iv( 2, 5, 0),
Iv( 3, 8, 0),
Iv( 3, 8, 0),
Iv( 3, 8, 0),
Iv( 5, 8, 0),
Iv( 9, 11, 0),
Iv( 15, 20, 0),
]
lapper = IL.Lapper(data1);
found = collect(IL.depth(lapper))
@test found == [
IL.Interval( 1, 2, 1),
IL.Interval( 2, 3, 2),
IL.Interval( 3, 8, 5),
IL.Interval( 8, 9, 1),
IL.Interval( 9, 10, 2),
IL.Interval( 10, 11, 1),
IL.Interval( 15, 20, 1),
]
end
@testset "Depth Hard" begin
data1 = [
Iv( 1, 10, 0),
Iv( 2, 5, 0),
Iv( 3, 8, 0),
Iv( 3, 8, 0),
Iv( 3, 8, 0),
Iv( 5, 8, 0),
Iv( 9, 11, 0),
]
lapper = IL.Lapper(data1);
found = collect(IL.depth(lapper))
@test found == [
IL.Interval( 1, 2, 1),
IL.Interval( 2, 3, 2),
IL.Interval( 3, 8, 5),
IL.Interval( 8, 9, 1),
IL.Interval( 9, 10, 2),
IL.Interval( 10, 11, 1),
]
end
#=
# Bug tests - these are tests that came from real life
=#
# Test that it's not possible to induce index out of bounds by pushing the
# cursor past the end of the lapper
@testset "Seek Over Len" begin
lapper = setup_nonoverlapping();
single = setup_single();
cursor = Ref(1)
count = 0
for interval in lapper.intervals
for o_interval in IL.seek(single, interval.start, interval.stop, cursor)
count += 1
end
end
end
# Test that if lower_bound puts us before the first match, we still return match
@testset "Find Over Behind First Match" begin
lapper = setup_badlapper();
e1 = Iv( 50, 55, 0)
found = Base.iterate(IL.find(lapper, 50, 55))[1];
@test found == e1
end
# When there is a very long interval that spans many little intervals, test that the
# little intervals still get returned properly
@testset "Bad Skips" begin
data = [
Iv(25264912, 25264986, 0),
Iv(27273024, 27273065 , 0),
Iv(27440273, 27440318 , 0),
Iv(27488033, 27488125 , 0),
Iv(27938410, 27938470 , 0),
Iv(27959118, 27959171 , 0),
Iv(28866309, 33141404 , 0),
]
lapper = IL.Lapper(data)
found = collect(IL.find(lapper, 28974798, 33141355))
@test found == [
Iv(28866309, 33141404 , 0),
]
bits = IL.Bits(lapper.intervals)
@test IL.count(bits, 28974798, 33141355) == length(collect(IL.find(lapper, 28974798, 33141355)))
end
| IntervalLapper | https://github.com/sstadick/IntervalLapper.jl.git |
|
[
"MIT"
] | 0.1.0 | 8efec99164aef955d6fe515667329a3d6a2dedd9 | docs | 1064 | # IntervalLapper
This is a Julia port of my Rust
[port](https://docs.rs/rust-lapper/>) of
[nim-lapper](https://github.com/brentp/nim-lapper).
## Install
```
]
add IntervalLapper
```
## Docs
See the docs for the rust-lapper project. The API's are essentially the
same. The version 5.0.0 release is virtually identical at the time of
writing this readme. https://docs.rs/rust-lapper/
## Examples
```julia
using Test
import IntervalLapper
const IL = IntervalLapper
const Iv = IL.Interval{Int}
data = map(x -> Iv(x, x + 15, 0), 0:10:100)
lapper = IL.Lapper(data)
cursor = Ref(1)
e1 = Iv(0, 15, 0)
e2 = Iv(10, 25, 0)
@test [e1, e2] == collect(IL.find(lapper, 8, 20))
@test [e1, e2] == collect(IL.seek(lapper, 8, 20, cursor))
@test 2 == length(collect(IL.find(lapper, 8, 20)))
bits = IL.Bits(lapper.intervals)
@test IL.count(bits, 8, 20) == length(collect(IL.find(lapper, 8, 20)))
```
## Benchmarks
TBD. Anecdotally seems speedy, but no optimizations have been done. I'm
sure there some funkiness with type instability or missed broadcasting
opportunities.
| IntervalLapper | https://github.com/sstadick/IntervalLapper.jl.git |
|
[
"MIT"
] | 0.1.0 | 8efec99164aef955d6fe515667329a3d6a2dedd9 | docs | 378 | ## 191012
- Added all the helper methods and tests
- TODO: benchmarks
- TODO: into BITS, or seperate lib?
- TODO: clean up module and exports
## 191011
- Basic structure is all in place.
- union_and_intersect is broken
- Need to finish adding tests
- Need to remove places where functions specialize on T when they don't
need to
- Need to add basic benchmarks of some sort
| IntervalLapper | https://github.com/sstadick/IntervalLapper.jl.git |
|
[
"MIT"
] | 1.0.0 | b6e44b2c85bb17704bfd5c17220e24691539fd53 | code | 453 | using Horus
using Documenter
DocMeta.setdocmeta!(Horus, :DocTestSetup, :(using Horus); recursive=true)
makedocs(;
modules=[Horus],
authors="Avik Sengupta <[email protected]> and contributors",
sitename="Horus.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
"Reference" => "api.md",
],
)
| Horus | https://github.com/aviks/Horus.jl.git |
|
[
"MIT"
] | 1.0.0 | b6e44b2c85bb17704bfd5c17220e24691539fd53 | code | 4810 | module Horus
using Redis
using JSON3
using StructTypes
using Dates
export HorusClientConfig, HorusServerConfig, HorusJob, start_runner
export enqueue, execute
using Random
abstract type HorusConfig; end
"""
Configuration for a Client
"""
struct HorusClientConfig <: HorusConfig
backend::Any
end
HorusClientConfig(; host="127.0.0.1", port=6379, password="", db=0, sslconfig=nothing) = HorusClientConfig(RedisConnection(host, port, password, db, Redis.Transport.transport(host, port, sslconfig)))
"""
Configuration for a Sever/JobRunner
"""
struct HorusServerConfig
backend::Any
queues::Vector{String}
opts::Dict{Symbol, Any}
end
HorusServerConfig(queues=["horus_default"]; host="127.0.0.1", port=6379, password="", db=0, sslconfig=nothing, opts...) = HorusServerConfig(RedisConnection(host, port, password, db, Redis.Transport.transport(host, port, sslconfig)), queues, convert(Dict{Symbol, Any}, opts))
"""
All jobs should be subtypes of HorusJob
"""
abstract type HorusJob; end
"""
`enqueue(cfg::HorusClientConfig, typename::Type{T}, args...; queue::AbstractString="default", meta...) where T<: HorusJob`
Enqueue a job. Inputs are the type that represents that job, and the arguments
"""
function enqueue(cfg::HorusClientConfig, typename::Type{T}, args...; queue::AbstractString="horus_default", meta...) where T<: HorusJob
data = convert(Dict{Symbol, Any}, meta)
data[:typename] = string(typename)
data[:modulename] = string(parentmodule(typename))
data[:args] = args
data[:tries] = 1
data[:posted] = now()
data[:id] = incr(cfg.backend, "jobid")
data[:queue] = queue # While the q is passed in separately here, we store it with the job.
enqueue(cfg.backend, JSON3.write(data), queue)
end
"""
`enqueue(conn::RedisConnection, payload)`
Enqueue a job given a json object representation.
`payload` is converted to a string using `JSON3.write`, and queue is picked from the payload itself.
This method is low level, and does not validate that the payload is semantically correct. Use with care.
"""
function enqueue(conn::RedisConnection, payload)
enqueue(conn, JSON3.write(payload), payload[:queue])
end
"""
`enqueue(conn::RedisConnection, payload::String, queue)`
Enqueue a job given a json string representation.
This method is very low level, and does not validate that the json is syntactically or
semantically correct. End users should not use this.
"""
function enqueue(conn::RedisConnection, payload::String, queue)
lpush(conn, queue, payload)
end
"""
`fetch(cfg::HorusServerConfig)`
Fetches a job from Redis using the connection and queues from the supplied config.
Uses brpop to fetch from the queue, and returns the redis result.
Since `brpop` searches the queues in the order they are passed in,
the queues are shuffled for each call to prevent exhaustion.
If all the queues are empty, this function will block for TIMEOUT seconds
"""
function fetch(cfg::HorusServerConfig)
fetch(cfg.backend, shuffle(cfg.queues))
end
global TIMEOUT = 2
function fetch(conn::RedisConnection, queues)
brpop(conn, queues, TIMEOUT)
end
"""
`start_runner()``
Start a runner process, with a default RedisConnection talking to localhost
"""
function start_runner()
cfg = HorusServerConfig(RedisConnection(), ["default"], Dict{Symbol, Any}())
start_runner(cfg)
end
global scheduler_task = nothing
"""
`start_runner(cfg)`
Start a runner process, and block indefinitely
"""
function start_runner(cfg)
global scheduler_task = start_scheduler(cfg)
Base.atexit(handle_exit)
@info "[Horus] Starting runner loop"
while true
yield()
redisjob = fetch(cfg)
if redisjob === nothing
continue
end
job = JSON3.read(redisjob[2])
run_job(cfg, job)
end
end
function handle_exit(exitcode)
@info "Exiting, shutting down scheduler"
scheduler_done=false
sleep(POLL_INTERVAL)
end
"""
`run_job(cfg::HorusServerConfig, jobjson::String)`
Run a job, given a job definiton as a json object
"""
function run_job(cfg, jobjson)
modulename = getproperty(Main, Symbol(get(jobjson, "modulename", "Main")))
jobtype = getproperty(modulename, Symbol(jobjson[:typename]))
job = jobtype(jobjson.args...)
@info "[Horus] Processing job $(jobjson[:id])"
try
execute(job)
@info "[Horus] Successfully executed job $(jobjson[:id])"
catch (ex)
# Ensure that the log has the location of where the exception was thrown, not this place
bt = catch_backtrace()
st = stacktrace(bt)
line = st[1].line
file = string(st[1].file)
@error "[Horus] Exception executing job $(jobjson[:id])." exception=ex _line=line _file=file
retry_job(cfg, jobjson, st, string(ex))
end
end
function execute end
include("schedule.jl")
end
| Horus | https://github.com/aviks/Horus.jl.git |
|
[
"MIT"
] | 1.0.0 | b6e44b2c85bb17704bfd5c17220e24691539fd53 | code | 3516 | ZPOPBYSCORE_SCRIPT = """
local key, now = KEYS[1], ARGV[1]
local jobs = redis.call("zrange", key, "-inf", now, "byscore", "limit", 0, 1)
if jobs[1] then
redis.call("zrem", key, jobs[1])
return jobs[1]
end
"""
zpopbyscore_script_sha = nothing
scheduler_done = false
scheduled_sets = ["horus_retry", "horus_schedule"]
function enqueue_jobs(cfg::HorusServerConfig)
try
for set in scheduled_sets
while !scheduler_done
job = zpopbyscore(cfg.backend, set, now().instant.periods.value)
if job === nothing
break
end
enqueue(cfg.backend, JSON3.read(job)) # need to read the queue from the job, so need to parse it.
end
end
catch e
@error "Exception enqueuing jobs in scheduler" exception=(e, catch_backtrace())
end
end
# Needs fixes in Redis.jl
# function zpopbyscore(conn, keys, argv)
# @repeat 2 try
# if lua_zpopbyscore_sha === nothing
# global lua_zpopbyscore_sha == script_load(conn, ZPOPBYSCORE_SCRIPT)
# @info "Loaded ZPOPBYSCORE to Redis"
# end
# evalsha(conn,lua_zpopbyscore_sha, 1, keys, argv )
# catch e
# @retry if startswith(e.message, "NOSCRIPT")
# global lua_zpopbyscore_sha = nothing
# end
# end
# end
function zpopbyscore(conn, keys, argv)
evalscript(conn, ZPOPBYSCORE_SCRIPT, 1, vcat(keys, argv))
end
function enqueue_at(cfg::HorusClientConfig, typename::Type{T}, time::DateTime, args...; queue::AbstractString="horus_default", meta...) where T<: HorusJob
data = convert(Dict{Symbol, Any}, meta)
data[:typename] = string(typename)
data[:modulename] = string(parentmodule(typename))
data[:args] = args
data[:tries] = 1
data[:posted] = now()
data[:id] = incr(cfg.backend, "jobid")
data[:queue] = queue
enqueue_at(conn, data, "horus_schedule", time)
end
function enqueue_at(conn::RedisConnection, payload, queue::String, time::DateTime)
enqueue_at(conn, JSON3.write(payload), queue, time.instant.periods.value)
end
function enqueue_at(conn::RedisConnection, payload::String, queue::String, time::Integer)
zadd(conn, queue, time, payload)
end
function terminate_scheduler()
global scheduler_done = true
end
POLL_INTERVAL = 5 #seconds
MAXTRIES = 20
function retry_job(cfg::HorusServerConfig, job, st, errmsg)
tries = get(job, :tries, 0) + 1
job = copy(job)
job[:tries] = tries
job[:errmsg] = errmsg
job[:stacktrace] = repr("text/plain", st[1:min(end, 5)], context=:compact=>true)
if tries > min(get(job, :maxtries, MAXTRIES), get(cfg.opts, :maxtries, MAXTRIES))
lpush(cfg.backend, "horus_dlq", jobjson)
@info "Sending job $(job[:id]) to Dead Letter Queue"
return
end
job[:queue] = get(job, :retry_queue, job[:queue]) #move job to a slower retry queue if configured
time = now()+Second(tries^4 * 5)
enqueue_at(cfg.backend, job, "horus_retry", time)
end
function start_scheduler(cfg::HorusServerConfig)
t = @task begin
while !scheduler_done
@info "Running Scheduler at $(now())"
enqueue_jobs(cfg)
wait(Base.Timer(POLL_INTERVAL))
end
@info "Shutting down Scheduler"
end
t.sticky = false
@info "Starting Scheduler"
schedule(t)
return t
end
| Horus | https://github.com/aviks/Horus.jl.git |
|
[
"MIT"
] | 1.0.0 | b6e44b2c85bb17704bfd5c17220e24691539fd53 | code | 582 | using Horus
using Test
using Redis
using JSON3
global x = 1
struct TestJob1 <:HorusJob
a
end
function Horus.execute(t::TestJob1)
global x = x + t.a
end
#This test assumes redis running on localhost
@testset "Horus.jl" begin
redishost = "127.0.0.1"
#redishost = "172.23.164.254"
conf = Horus.HorusClientConfig(;host=redishost, port=6379)
Horus.enqueue(conf, TestJob1, 1)
sconf = Horus.HorusServerConfig(;host=redishost, port=6379)
redisjob = Horus.fetch(sconf)
job = JSON3.read(redisjob[2])
Horus.run_job(sconf, job)
@test x == 2
end
| Horus | https://github.com/aviks/Horus.jl.git |
|
[
"MIT"
] | 1.0.0 | b6e44b2c85bb17704bfd5c17220e24691539fd53 | docs | 1437 | # Horus
[](https://github.com/aviks/Horus.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://juliahub.com/ui/Packages/General/Horus)
[](https://juliahub.com/ui/Packages/General/Horus)
[](https://juliahub.com/ui/Packages/General/Horus?t=2)
[](https://docs.juliahub.com/General/Horus/stable/)
Simple background jobs for Julia, backed by Redis.
Horus.jl is a package for creating and running background jobs in Julia
There are many libraries in other languages that provide similar functionality, such as Sidekiq, Resque (both Ruby) and Celery (Python). Of these, Sidekiq is probably the closest in spirit to this library.
Currently requires a [patch to Redis.jl](https://github.com/JuliaDatabases/Redis.jl/pull/110) to fix a bug. An upcoming new release of Redis.jl will fix this.
---
๐จ Please read the [documentation](https://docs.juliahub.com/General/Horus/stable/) carefully before using this package, particularly the section that describes what the design guarantees are.
---
Horus is the ancient Egyptian god of kingship, protection, healing and the sky.
| Horus | https://github.com/aviks/Horus.jl.git |
|
[
"MIT"
] | 1.0.0 | b6e44b2c85bb17704bfd5c17220e24691539fd53 | docs | 56 | ```@index
```
```@autodocs
Modules = [Horus]
```
| Horus | https://github.com/aviks/Horus.jl.git |
|
[
"MIT"
] | 1.0.0 | b6e44b2c85bb17704bfd5c17220e24691539fd53 | docs | 5641 | ```@meta
CurrentModule = Horus
```
# Horus
Simple background jobs for Julia, backed by Redis.
Horus.jl is a package for creating background job, placing them on queues, and running them asynchronously.
## Concepts
All state is held in Redis. There is no communication between Julia processes.
* A _client_ is any Julia process that submits a job to be run. This only needs a connection to a running Redis server. A client _enqueue_s a _job_, which then gets added to a queue on Redis. This could, for example, be a webserver that recieves user requests, as part of which it kicks off a background job.
* A _server_ or _runner_ is one or more dedicated Julia processes that _fetch_ jobs from the Redis queue, and _executes_ them. These processes typically would run an infinite loop, fetching and running jobs one after another.
* Each job in enqueued on a *named* queue. The default queue name is "_default_". Each runner can be configured to fetch jobs from one or more of these named queues. This allows sophisticated configuration -- for example, fast and slow jobs can be put on different queues, and processed on separate processes, so that a slow job does not block fast jobs.
## Usage
A job is described using a custom struct, that subtypes `Horus.HorusJob`. Here we define an `EmailJob` that contains all the attributes necessary to send an email.
```
module MyPackage
using Horus
struct EmailJob
recipient::String
subject::String
message::String
end
end
```
A client configuration is generated with a RedisConnection. This configuration object can then be used to enqueue a job. Here the `EmailJob` is enqueued on the `emails` queue.
```
module MyPackage
...
cfg = HorusClientConfig(;host="172.23.164.254", port=6379)
Horus.enqueue(cfg, EmailJob, "[email protected]",
"A Test Email",
"The body of said email"
; queue = "emails"
)
end
```
The code to perform the job (in this case, send an email) should be written in an override of
the `Horus.execute` function for the specific job type. Here we define `execute` for our `EmailJob` type, which then uses the existing `SMTPClient` julia package to actually send an email.
```
module MyPackage
...
using SMTPClient
function Horus.execute(job::EmailJob)
opt = SendOptions()
SMTPClient.send("smtps://your.server",
[job.recipient],
"[email protected]", IOBuffer(
"Date: Fri, 18 Oct 2024 21:44:29 +0100\r\n" *
"From: Sender <[email protected]>\r\n" *
"To: $(job.recipient)\r\n" *
"Subject: $(job.subject)\r\n" *
"\r\n" *
"$(job.body)\r\n")
)
end
```
On the server/runner, ensure your julia environment contains all the necessary dependencies, and
run a script that looks something like this:
```
using MyPackage
using Horus
using Redis
conn = RedisConnection(;host="x.x.x.x", port=6379)
scfg = HorusServerConfig(["emails"]; host="172.23.164.254", port=6379)
start_runner(scfg) ## will block indefinitely.
```
Jobs that fail are sent to a retry queue, and then executed using an exponential backoff strategy. The maximum number of retries can be set on a server configuration, or within an individual job, or is 20 by default. If not set elsewhere, a maximum of 20 retries are attempted. Jobs that have failed all their allowed retries are sent to a dead letter queue for manual processing.
Jobs can also be enqueued for running at a particular time using the `enqueue_at` method. Currently, scheduled jobs are executed within an accuracy of upto 10 seconds.
## Design & Guarantees
* The primary principle is simplicity, which will hopefully lead to robustness. However, the cost of that simplicity is that certain transactional behaviours cannot be guaranteed. We believe that is a worthwhile tradeoff that has been proven in many real world scenarios over the years. Read below for the details for what can and can't be guaranteed.
* Runners can be simple, single threaded code. To increase throughput, runners can be scaled horizontally using multiple independent Julia processes, fetching from the same queue(s).
* When multiple runners are be launched simultaneously against a queue, a job will *only* be made available to a single runner. A single job will never be fetched by two or more runners.
* The execution of the job will be protected by a try/catch -- thus logic errors or bugs in the job execution code will usually not bring down a runner.
* However, there is always the possibility of the runner process crashing when executing a job. The server will attempt to record this fact in Redis, but in the current implementation doesn't give any guarantees. Logs should make the information about crashed workers apparent, including which job it was running when it crashed. This behavior allows you to manually retry that job if needed. While this should be a rare occurance (and this architecture itself has been validated in similar libraries in other languages), in practice this means that production use of this package should typically provide for log aggregation and monitoring, as well as process monitoring. We hope this is a standard part of most production environments in this day and age.
## TODO
* Admin web services
* on_exit behaviour
* OpenTelemetry support
* Document which julia objects can be serialized through this method, and which not | Horus | https://github.com/aviks/Horus.jl.git |
|
[
"MIT"
] | 0.3.2 | 96e65755f306264e9cc07e754bb955145c1f7354 | code | 7513 | function sample_genotype(geno::T, n_alleles::Int) where T<:Genotype
sample(SVector(geno), n_alleles, replace = false)
end
function sample_genotype(geno::Missing, n_alleles::Int)
return [missing]
end
function haploid_cross!(data::DataFrame, p1::T, p2::T) where T <: GenoArray
iter_df = DataFrames.groupby(data, :name)
for simulation in iter_df
all_alleles = getindex.(collect.(zip.(p1,p2)), 1)
offspring_geno = Tuple(Base.Iterators.flatten(rand.(all_alleles, 1)) |> collect)
miss_idx = reduce(union, findall.(i -> i == (missing,), offspring_geno))
simulation.genotype[Not(miss_idx)] .= offspring_geno[Not(miss_idx)]
end
return data
end
function polyploid_cross!(data::DataFrame, p1::T, p2::T; ploidy::Signed) where T <: GenoArray
iter_df = DataFrames.groupby(data, :name)
n_alleles = ploidy รท 2
for simulation in iter_df
p1_contrib = Base.Iterators.flatten(sample_genotype.(p1, n_alleles)) |> collect
p2_contrib = Base.Iterators.flatten(sample_genotype.(p2, n_alleles)) |> collect
miss_idx = reduce(union, findall.(ismissing, [p1_contrib, p2_contrib]))
offspring_geno = zip(p1_contrib, p2_contrib) |> collect
simulation.genotype[Not(miss_idx)] .= offspring_geno[Not(miss_idx)]
end
return data
end
"""
cross(data::PopData, parent1::String, parent2::String; n::Int = 100, generation::String = "F1")
Simulate a breeding cross between individuals `parent1` and `parent2` from a `PopData` object.
Returns PopData consisting of `n` offspring resulting from the cross.
#### Keyword Arguments
- `n` : Integer of number of offspring to generate (default: `100`)
- `generation` : A string to assign `population` identity to the offspring (default: `"F1"`)
"""
function cross(data::PopData, parent1::String, parent2::String; n::Int = 100, generation::String = "F1")
# check for presence of parents
err = ""
err *= parent1 โ (@view data.sampleinfo[!, :name]) ? "$parent1 " : ""
err *= parent2 โ (@view data.sampleinfo[!, :name]) ? "$parent2" : ""
err != "" && error("One or more parents not found in PopData: $err")
# Get the ploidy value & check for equal ploidy
p1_ploidy = data.sampleinfo.ploidy[data.sampleinfo.name .== parent1] |> first
p2_ploidy = data.sampleinfo.ploidy[data.sampleinfo.name .== parent2] |> first
p1_ploidy != p2_ploidy && error("Parents must have identical ploidy. Parent1 = $p1_ploidy | Parent2 = $p2_ploidy")
# check for parents not having mixed ploidy
p1_ploidy isa AbstractVector && error("Parent $parent1 has mixed ploidy, which is unsupported")
p2_ploidy isa AbstractVector && error("Parent $parent2 has mixed ploidy, which is unsupported")
# get parental genotypes
p1 = genotypes(data, parent1)
p2 = genotypes(data, parent2)
loci = data.locusinfo.locus
# pre-allocate all output information
out_loci_names = repeat(loci, outer = n)
_padding = length(string(n))
out_offspring = repeat(["$generation" * "_" * lpad("$i", _padding, "0") for i in 1:n], inner = length(loci))
out_population = fill(generation, n * length(loci))
out_geno = similar(p1, n * length(loci))
out_loci = DataFrame(
:name => PooledArray(out_offspring, compress = true),
:population => PooledArray(out_population, compress = true),
:locus => PooledArray(out_loci_names, compress = true),
:genotype => out_geno
)
# perform the cross
if p1_ploidy == 1
haploid_cross!(data, parent1, parent2)
elseif p1_ploidy โ [2, 4, 6, 8]
polyploid_cross!(out_loci, p1, p2, ploidy = p1_ploidy)
else
throw(MethodError("Currently supported ploidy: 1, 2, 4, 6, 8"))
end
out = PopData(out_loci)
insertcols!(out.sampleinfo, :parents => PooledArray(fill((parent1,parent2), n), compress = true))
return out
end
"""
cross(parent_1::Pair, parent_2::Pair, n::Int = 100, generation::String = "F1")
Simulate a breeding cross between individuals `parent` and `parent2` from two different `PopData` objects.
Returns PopData consisting of `n` offspring resulting from the cross. `parent_1_data` and `parent_2_data`
are positional arguments, therefore they must be written without keywords and in the order of parents 1, parent 2.
#### Keyword Arguments
- `parent_1` : Pair of `PopData => "Parent1Name"`
- `parent_2` : Pair of `PopData => "Parent1Name"`
- `n` : Integer of number of offspring to generate (default: `100`)
- `generation` : A string to assign `population` identity and name prefix to the offspring (default: `"F1"`)
"""
function cross(parent_1::Pair, parent_2::Pair; n::Int = 100, generation::String = "F1")
parent_1_data = parent_1.first
parent_2_data = parent_2.first
loci = parent_1_data.locusinfo.locus
loci_p2 = parent_2_data.locusinfo.locus
length(loci) != length(loci_p2) && error("Both datasets must have the same number of loci. $parent1 : $length(loci) | $parent2 : $length(loci_p2")
# verify identical loci
loci_check = loci .!= loci_p2
culprits_p1 = loci[loci_check]
culprits_p2 = loci_p2[loci_check]
culp_print = "Parent 1\tParent 2" * "\n---------\t---------\n" * join("$i\t$j\n" for (i,j) in zip(culprits_p1, culprits_p2))
length(culprits_p1) > 0 && error("Both datasets must have loci in the same order. Loci causing this error:\n" * culp_print)
parent1 = parent_1.second
parent2 = parent_2.second
# check for presence of parents
parent1 โ (@view parent_1_data.sampleinfo[!, :name]) && error("$parent1 not found in PopData")
parent2 โ (@view parent_2_data.sampleinfo[!, :name]) && error("$parent2 not found in PopData")
# Get the ploidy value & check for equal ploidy
p1_ploidy = parent_1_data.sampleinfo.ploidy[parent_1_data.sampleinfo.name .== parent1] |> first
p2_ploidy = parent_2_data.sampleinfo.ploidy[parent_2_data.sampleinfo.name .== parent2] |> first
p1_ploidy != p2_ploidy && error("Parents must have identical ploidy. Parent1 = $p1_ploidy | Parent2 = $p2_ploidy")
# check for parents not having mixed ploidy
p1_ploidy isa AbstractVector && error("Parent $parent1 has mixed ploidy, which is unsupported")
p2_ploidy isa AbstractVector && error("Parent $parent2 has mixed ploidy, which is unsupported")
# get parental genotypes
p1 = genotypes(parent_1_data, parent1)
p2 = genotypes(parent_2_data, parent2)
# pre-allocate all output information
out_loci_names = repeat(loci, outer = n)
_padding = length(string(n))
out_offspring = repeat(["$generation" * "_" * lpad("$i", _padding, "0") for i in 1:n], inner = length(loci))
out_population = fill(generation, n * length(loci))
out_geno = similar(p1, n * length(loci))
out_loci = DataFrame(:name => out_offspring, :population => out_population, :locus => out_loci_names, :genotype => out_geno)
out_loci.name = PooledArray(out_loci.name, compress = true)
out_loci.population = PooledArray(out_loci.population, compress = true)
out_loci.locus = PooledArray(out_loci.locus, compress = true)
# perform the cross
if p1_ploidy == 1
haploid_cross!(data, parent1, parent2)
elseif p1_ploidy โ [2, 4, 6, 8]
polyploid_cross!(out_loci, p1, p2, ploidy = p1_ploidy)
else
error("Currently supported ploidy: 1, 2, 4, 6, 8")
end
out = PopData(out_loci)
insertcols!(out.sampleinfo, :parents => fill((parent1,parent2), n))
return out
end | PopGenSims | https://github.com/pdimens/PopGenSims.jl.git |
|
[
"MIT"
] | 0.3.2 | 96e65755f306264e9cc07e754bb955145c1f7354 | code | 1347 | """
# Population genetics simulations
Repository: https://www.github.com/pdimens/PopGenSims.jl/
Documentation: https://biojulia.net/PopGen.jl/
\nA few things things you can do to get started:
## Import Data using PopGenCore
- `PopGenCore.read(filename; kwargs...)`
- `genepop(infile; kwargs...)` or similar file-specific importer
- use available `@gulfsharks` or `@nancycats` datasets
## Simulate Samples within Populations
- `simulate(popdata, n = 100)` to simulate samples using population-specific allele frequencies
## Simulate breeding crosses
- `cross(popdata, parent1, parent2, ...)` to cross two parents from the same PopData
- `cross(popdata => parent1, popdata => parent2, ...) to cross two parents from different PopData (e.g. backcross)
## Simulate siblingship
- `simulatekin(popdata, fullsib = , halfsib = , unrelated = , ...)` to simulate breeding events generating pairs of individuals of known kinship.
"""
module PopGenSims
using DataFrames, PooledArrays, StaticArrays
using StatsBase: sample, Weights
import PopGenCore: read, write
using PopGenCore:
allelefreq,
copy,
Genotype,
GenoArray,
genotypes,
PopData,
sort
include("Cross.jl")
export cross, backcross
include("Samples.jl")
export simulate
include("Sibship.jl")
export simulatekin
include("Utils.jl")
export append, append!
end
| PopGenSims | https://github.com/pdimens/PopGenSims.jl.git |
|
[
"MIT"
] | 0.3.2 | 96e65755f306264e9cc07e754bb955145c1f7354 | code | 6485 | """
sample_locus(locus::Dict, n::Int, ploidy::Signed)
Internal function used by `simulate` to take a `Dict` of alleles => frequencies of a locus and return
`n` number of genotypes (n_alleles = `ploidy`) by using weighted sampling of the
allele-frequency pairs.
**Example**
```julia
d = Dict(133 => 0.125,135 => 0.5625,143 => 0.25,137 => 0.0625)
julia> sample_locus(d, 3, 2)
5-element Array{Tuple{Int16,Int16},1}:
(133, 135)
(135, 135)
(143, 137)
julia> sample_locus(d, 3, 3)
5-element Array{Tuple{Int16,Int16,Int16},1}:
(135, 135, 133)
(143, 135, 133)
(137, 135, 135)
```
"""
function sample_locus(locus::Dict, n::Int, ploidy::Signed)
isempty(locus) && return fill(missing, n)
k,v = collect(keys(locus)), collect(values(locus))
Tuple.(sort.([sample(k, Weights(v), ploidy) for i in 1:n]))
end
"""
simulate(data::PopData; n::Int)
Simulate `n` number of individuals per population using per-population
allele frequencies derived from a `PopData` object. Returns a new `PopData` object with `n` * `n_populations` samples.
simulate(data::PopData; scale::Int)
Simulate individuals per population in the same proportions they appear in the PopData
using per-population allele frequencies. Simulation volume can be multiplied using `scale`,
i.e. if you want to keep the same proportions but generate twice the number of samples, `scale`
would be `2`. Returns a new `PopData` object with `n_samples` * `scale` samples.
**Example**
```julia
julia> cats = @nanycats;
julia> sims = simulate(cats, n = 100)
PopData{Diploid, 9 Microsatellite Loci}
Samples: 1700
Populations: 17
julia> sims_prop = simulate(cats, scale = 3)
PopData{Diploid, 9 Microsatellite Loci}
Samples: 711
Populations: 17
```
"""
function simulate(data::PopData; n::Union{Dict{String,Int}, Int}=0, scale::Int = 0)
n == scale && throw(ArgumentError("Please use one of n (flat) or scale (proportional) keywords for simulations. See ?simulate for more info."))
((n isa Dict) | (n != 0)) & !iszero(scale) && throw(ArgumentError("Must use only one of n (flat) or scale (proportional) keywords for simulations. See ?simulate for more info."))
length(data.metadata.ploidy) > 1 && error("Simulations do not work on mixed-ploidy data (yet)")
geno_out =
if n != 0
n isa Int ? _simulateflat(data, n) : _simulatearbitrary(data, n)
else
_simulatescale(data, scale)
end
transform!(
geno_out,
:name => (i -> PooledArray(i, compress = true)) => :name,
:population => (i -> PooledArray(i, compress = true)) => :population,
:locus => (i -> PooledArray(i, compress = true)) => :locus,
:genotype
)
PopData(geno_out)
end
function _simulatearbitrary(data::PopData, n::Dict{String, Int})
ploidy = data.metadata.ploidy
pops = collect(keys(n))
popcounts = collect(values(n))
nloci = data.metadata.loci
nsamples = sum(popcounts)
# instantiate output df
simnames = repeat(["sim_" * "$i" for i in 1:nsamples], inner = nloci)
popnames = reduce(vcat, [fill(i, nloci * j) for (i,j) in zip(pops,popcounts)])
locinames = repeat(unique(data.genodata.locus), outer = nsamples)
geno_out = DataFrame(:name => simnames, :population => popnames, :locus => locinames, :genotype => similar(data.genodata.genotype, length(locinames)))
# generate allele freqs per population
gdf = groupby(data.genodata[data.genodata.population .โ Ref(pops),:], [:population, :locus])
freqs = DataFrames.combine(
gdf,
:genotype => allelefreq => :frq,
)
freqs[:, :n] .= getindex.(Ref(n), freqs.population)
transform!(freqs, [:frq, :n] => ((i,j) -> sample_locus.(i, j, ploidy)) => :frq)
# populate out df
out_gdf = groupby(geno_out, :population)
geno_gdf = groupby(freqs, :population)
for pop in pops
out_gdf[(population = pop,)][:,:genotype] = reduce(hcat, geno_gdf[(population = pop,)].frq) |> permutedims |> vec
end
return geno_out
end
function _simulateflat(data::PopData, n::Int)
ploidy = data.metadata.ploidy
pops = unique(data.sampleinfo.population)
npops = data.metadata.populations
nloci = data.metadata.loci
# instantiate output df
simnames = repeat(["sim_" * "$i" for i in 1:(n*npops)], inner = nloci)
popnames = repeat(pops, inner = (nloci * n))
locinames = repeat(unique(data.genodata.locus), outer = (n * npops))
geno_out = DataFrame(:name => simnames, :population => popnames, :locus => locinames, :genotype => similar(data.genodata.genotype, length(locinames)))
# generate allele freqs per population
gdf = groupby(data.genodata, [:population, :locus])
freqs = DataFrames.combine(
gdf,
:genotype => allelefreq => :frq
)
# create new genotypes
transform!(freqs, :frq => (i -> sample_locus.(i,n,ploidy)) => :frq)
# populate out df
out_gdf = groupby(geno_out, :population)
geno_gdf = groupby(freqs, :population)
for pop in pops
out_gdf[(population = pop,)][:,:genotype] = reduce(hcat, geno_gdf[(population = pop,)].frq) |> permutedims |> vec
end
return geno_out
end
function _simulatescale(data::PopData, n::Int)
ploidy = data.metadata.ploidy
pops = unique(data.sampleinfo.population)
popcounts = [count(i -> i == j, data.sampleinfo.population) for j in pops]
nloci = data.metadata.loci
# instantiate output df
simnames = repeat(["sim_" * "$i" for i in 1:(data.metadata.samples * n)], inner = nloci)
popnames = reduce(vcat, [fill(i, nloci * j * n) for (i,j) in zip(pops,popcounts)])
locinames = repeat(unique(data.genodata.locus), outer = (n * data.metadata.samples))
geno_out = DataFrame(:name => simnames, :population => popnames, :locus => locinames, :genotype => similar(data.genodata.genotype, length(locinames)))
# generate allele freqs per population
gdf = groupby(data.genodata, [:population, :locus])
freqs = DataFrames.combine(
gdf,
:genotype => allelefreq => :frq
)
freqs[:, :n] .= repeat(popcounts, inner = nloci)
transform!(freqs, [:frq, :n] => ((i,j) -> sample_locus.(i, j*n, ploidy)) => :frq)
# populate out df
out_gdf = groupby(geno_out, :population)
geno_gdf = groupby(freqs, :population)
for pop in pops
out_gdf[(population = pop,)][:,:genotype] = reduce(hcat, geno_gdf[(population = pop,)].frq) |> permutedims |> vec
end
return geno_out
end | PopGenSims | https://github.com/pdimens/PopGenSims.jl.git |
|
[
"MIT"
] | 0.3.2 | 96e65755f306264e9cc07e754bb955145c1f7354 | code | 8820 | """
_cross(parent1::Vector{Vector{T}}, parent2::Vector{Vector{T}}) where T <: Signed
Simulate a mating cross between two parents, generating one offspring with the same
ploidy as `parent1`. This variant of `cross` is used internally for `simulatekin`.
"""
function _cross(parent1::Vector{Vector{T}}, parent2::Vector{Vector{T}}) where T <: Signed
ploidy = length(first(parent1))
ploidy == 1 && error("Haploid crosses are not yet supported. Please file and issue or pull request")
if ploidy == 2
p1_contrib = rand.(parent1)
p2_contrib = rand.(parent2)
geno_out = sort.(zip(p1_contrib, p2_contrib))
elseif iseven(ploidy)
n_allele = ploidy รท 2
p1_contrib = sample.(parent1, n_allele, replace = false)
p2_contrib = sample.(parent2, n_allele, replace = false)
geno_out = Tuple.(sort!.(append!.(p1_contrib, p2_contrib)))
else
# special method to provide a 50% chance of one parent giving more alleles than the other
rng = rand()
contrib_1 = ploidy รท 2
contrib_2 = ploidy - contrib_1
p1_contrib = rng > 0.5 ? sample.(parent1, contrib_1, replace = false) : sample.(parent1, contrib_2, replace = false)
p2_contrib = rng > 0.5 ? sample.(parent2, contrib_2, replace = false) : sample.(parent2, contrib_1, replace = false)
geno_out = Tuple.(sort!.(append!.(p1_contrib, p2_contrib)))
end
return geno_out
end
function _parentoffspring(alleles::Dict, loc::Vector{String}, n::Int, ploidy::Signed, padding::Int)
out_df = DataFrame(:locus => loc)
for i in 1:n
prefix = "sim" * lpad(i, padding, '0')
p1,p2 = [simulate_sample(alleles, loc, ploidy = ploidy) for j in 1:2]
insertcols!(out_df, Symbol(prefix * "_parent") => Tuple.(p1))
insertcols!(out_df, Symbol(prefix * "_offspring") => _cross(p1, p2))
end
out_df = rename!(select!(DataFrames.stack(out_df, Not(:locus)), 2, 1, 3), [:name, :locus, :genotype])
insertcols!(out_df, 2, :population => "parent_offspring")
return out_df
end
function _fullsib(alleles::Dict, loc::Vector{String}, n::Int, ploidy::Signed, padding::Int)
out_df = DataFrame(:locus => loc)
for i in 1:n
prefix = "sim" * lpad(i, padding, '0')
p1,p2 = [simulate_sample(alleles, loc, ploidy = ploidy) for j in 1:2]
[insertcols!(out_df, Symbol(prefix * "_fullsib_$j") => _cross(p1, p2)) for j in 1:2]
end
out_df = rename!(select!(DataFrames.stack(out_df, Not(:locus)), 2, 1, 3), [:name, :locus, :genotype])
insertcols!(out_df, 2, :population => "fullsib")
return out_df
end
function _halfsib(alleles::Dict, loc::Vector{String}, n::Int, ploidy::Signed, padding::Int)
out_df = DataFrame(:locus => loc)
for i in 1:n
prefix = "sim" * lpad(i, padding, '0')
p1,p2,p3 = [simulate_sample(alleles, loc, ploidy = ploidy) for j in 1:3]
insertcols!(out_df, Symbol(prefix * "_halfsib_1") => _cross(p1, p2))
insertcols!(out_df, Symbol(prefix * "_halfsib_2") => _cross(p1, p3))
end
out_df = rename!(select!(DataFrames.stack(out_df, Not(:locus)), 2, 1, 3), [:name, :locus, :genotype])
insertcols!(out_df, 2, :population => "halfsib")
return out_df
end
function _unrelated(alleles::Dict, loc::Vector{String}, n::Int, ploidy::Signed, padding::Int)
out_df = DataFrame(:locus => loc)
for i in 1:n
prefix = "sim" * lpad(i, padding, '0')
p1,p2 = [simulate_sample(alleles, loc, ploidy = ploidy) for j in 1:2]
insertcols!(out_df, Symbol(prefix * "_unrelated_1") => Tuple.(p1))
insertcols!(out_df, Symbol(prefix * "_unrelated_2") => Tuple.(p2))
end
out_df = rename!(select!(DataFrames.stack(out_df, Not(:locus)), 2, 1, 3), [:name, :locus, :genotype])
insertcols!(out_df, 2, :population => "unrelated")
return out_df
end
"""
simulatekin(data::PopData; fullsib::Int, halfsib::Int, unrelated::Int, parentoffspring::Int, ploidy::Signed)
Simulate mating crosses to generate sample pairs with any combination of the specified relationships,
returning a `PopData` object. The simulations will first generate parents of a given
`ploidy` (inferred or specified) by drawing alleles from a global allele pool derived
from the given `data` (i.e. weighted by their frequencies).
#### Relationship
Simulated parents will be crossed to generate offspring depending on the relationship:
- `fullsib` : 2 parents generate 2 full-sibling offspring, return 2 offspring
- `halfsib` : 3 parents generate 2 half-sibling offspring, returns 2 offspring
- `unrelated` : returns 2 randomly generated individuals from the global allele pools
- `parentoffspring` : 2 parents generate 1 offspring, returns 1 offspring and 1 parent
#### Identifying pairs
The relationship between the newly generated samples can be identified by:
- Sample `name`s will specify their simulation number, relationship, and whether parent or offspring
- Naming convention: [simulation #]_[relationship]_[offspring #]
- example: sim005_fullsib_1 = [simulation 005]_[full sibling]_[offspring 1]
- Their `population` name will be that of their relationship (e.g. "fullsib")
#### Ploidy
If the samples in your `PopData` are of a single ploidy, then `ploidy = 0` (the default) will infer the ploidy
and generate parents and offspring according to the ploidy of your data. If you have mixed-ploidy data or wish
to generate parents and offspring of a ploidy different than the source `PopData` you can specify the ploidy
with which to simulate parents and offspring. For example, if your `PopData` is diploid, but you wish to generate
triploid or octoploid parents and offspring, you would specify `ploidy = 3` or `ploidy = 8` repectively.
#### Odd ploidy
If trying to create offspring with an odd ploidy (3,5, etc.), each parent has a 50% chance of
contributing (ยฝ ร ploidy) + 1 alleles for all loci to the offspring. In other words, if ploidy = 3,
there's a 50% chance parent_1 will give 2 alleles for every locus for that simulated cross.
**Example**
```
julia> cats = @nanycats ;
julia> cat_sims = simulatekin(cats, fullsib = 10, halfsib = 50)
PopData{Diploid, 9 Microsatellite loci}
Samples: 120
Populations: 2
julia> cat_sims.sampleinfo
120ร3 DataFrame
Row โ name population ploidy
โ String String Int64
โโโโโโผโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
1 โ sim01_fullsib_1 fullsib 2
2 โ sim01_fullsib_2 fullsib 2
3 โ sim02_fullsib_1 fullsib 2
4 โ sim02_fullsib_2 fullsib 2
5 โ sim03_fullsib_1 fullsib 2
6 โ sim03_fullsib_2 fullsib 2
โฎ โ โฎ โฎ โฎ
115 โ sim48_halfsib_1 halfsib 2
116 โ sim48_halfsib_2 halfsib 2
117 โ sim49_halfsib_1 halfsib 2
118 โ sim49_halfsib_2 halfsib 2
119 โ sim50_halfsib_1 halfsib 2
120 โ sim50_halfsib_2 halfsib 2
108 rows omitted
```
"""
function simulatekin(data::PopData; fullsib::Int = 0, halfsib::Int = 0, unrelated::Int = 0, parentoffspring::Int = 0, ploidy::Signed = 0)
if iszero(sum([fullsib, halfsib, unrelated, parentoffspring]))
throw(ArgumentError("Please specify at least one of: \n- \"fullsib\" \n- \"halfsib\" \n- \"unrelated\"\n- \"parentoffspring\""))
end
# automatic ploidy finding
if ploidy == 0
ploids = data.metadata.ploidy
if ploids isa AbstractVector
error("For mixed ploidy data, please specify a single ploidy with which to generate parents and offspring")
else
ploidy += ploids
end
end
loc, alleles = allele_pool(data)
# how many digits to pad the offspring names
padding = length(string(maximum([fullsib, halfsib, unrelated, parentoffspring])))
# perform the simulation if the integer > 0, otherwise return an empty boolean vector
# the empty vector is just to skip over with Base.Iterators.filter
fs = fullsib > 0 ? _fullsib(alleles, loc, fullsib, ploidy, padding) : Bool[]
hs = halfsib > 0 ? _halfsib(alleles, loc, halfsib, ploidy, padding) : Bool[]
unrl = unrelated > 0 ? _unrelated(alleles, loc, unrelated, ploidy, padding) : Bool[]
poff = parentoffspring > 0 ? _parentoffspring(alleles, loc, parentoffspring, ploidy, padding) : Bool[]
# combine the results together into a single df
geno_df = reduce(vcat, Base.Iterators.filter(!isempty, (fs, hs, unrl, poff)))
geno_df.name = PooledArray(geno_df.name, compress = true)
geno_df.population = PooledArray(geno_df.population, compress = true)
geno_df.locus = PooledArray(geno_df.locus, compress = true)
#meta_df = select(unique(geno_df, :name), 1, 2)
#insertcols!(meta_df, :ploidy => ploidy)
return PopData(geno_df)
end | PopGenSims | https://github.com/pdimens/PopGenSims.jl.git |
|
[
"MIT"
] | 0.3.2 | 96e65755f306264e9cc07e754bb955145c1f7354 | code | 4232 | """
append!(data::PopData, data2::PopData)
Add the rows of `data2` to the end of `data`. This will add the samples present
in the second `PopData` object to the first `PopData` object (mutating it).
**Note** that this is a simple appending, and you risk corrupting your `PopData` if
the two `PopData` objects do not have identical loci.
**Example**
```
julia> cats = @nancycats
PopData{Diploid, 9 Microsatellite Loci}
Samples: 237
Populations: 17
julia> purrfect_pairs = cross(cats, "N200", "N7", generation = "F1")
PopData{Diploid, 9 Microsatellite Loci}
Samples: 100
Populations: 1
julia> append!(cats, purrfect_pairs);
julia> cats
PopData{Diploid, 9 Microsatellite Loci}
Samples: 337
Populations: 18
```
"""
function Base.append!(data::PopData, data2::PopData)
n1 = data.metadata.samples
pl1 = data.metadata.ploidy
if "parents" โ names(data.sampleinfo) && "parents" โ names(data2.sampleinfo)
len = data.metadata.samples
insertcols!(
data.sampleinfo,
:parents => Vector{Union{Missing, Tuple{String,String}}}(undef, len)
)
elseif "parents" โ names(data2.sampleinfo) && "parents" โ names(data.sampleinfo)
len = length(data2.sampleinfo.name)
insertcols!(
data2.sampleinfo,
:parents => Vector{Union{Missing, Tuple{String,String}}}(undef, len)
)
end
append!(data.sampleinfo, data2.sampleinfo)
append!(data.genodata, data2.genodata)
# update metadata
data.metadata.samples = n1 + data2.metadata.samples
if pl1 != data2.metadata.ploidy
data.metadata.ploidy = Int8[pl1, data2.metadata.ploidy]
end
data.metadata.populations = length(unique(data.sampleinfo.population))
return data
end
"""
append(data::PopData, data2::PopData)
Add the rows of `data2` to the end of `data`. This will combine the samples present
in both `PopData` objects and return a new `PopData` object. **Note** that this is
a simple appending, and you risk corrupting your `PopData` if the two `PopData`
objects do not have identical loci.
**Example**
```
julia> cats = @nanycats
PopData{Diploid, 9 Microsatellite Loci}
Samples: 237
Populations: 17
julia> purrfect_pairs = cross(cats, "N200", "N7", generation = "F1")
PopData{Diploid, 9 Microsatellite Loci}
Samples: 100
Populations: 1
julia> merged_cats = append(cats, purrfect_pairs)
PopData{Diploid, 9 Microsatellite Loci}
Samples: 337
Populations: 18
```
"""
function append(data::PopData, data2::PopData)
#=
new_pdi = deepcopy(data.info)
append!(new_pdi.sampleinfo, data2.sampleinfo)
new_pdi.samples = data.info.samples + data2.info.samples
new_pdi.populations = length(unique(new_pdi.sampleinfo.population))
ploid = vcat(data.metadata.ploidy, data2.metadata.ploidy)
new_pdi.ploidy = length(unique(ploid)) == 1 ? ploid[1] : Int8[unique(ploidy)]
PopData(new_pdi, vcat(data.genodata, data2.genodata))
=#
tmp = copy(data)
append!(tmp, data2)
return tmp
end
function allele_pool(locus::T) where T <: GenoArray
Tuple(Base.Iterators.flatten(skipmissing(locus)))
end
function allele_pool(data::PopData)
# index dataframe by locus
idx_df = groupby(data.genodata, [:locus])
# instantiate dict to store alleles
# pull out loci names
loc = getindex.(keys(idx_df), :locus)
allele_dict = Dict(i => allele_pool(idx_df[(;locus = i)].genotype) for i in loc)
return string.(loc), allele_dict
end
"""
```
simulate_sample(alleles::Dict{String,NTuple}, loc::Vector{String}; ploidy::Int)
```
Using a global allele pool given by a Dict{loci,alleles} and a list of loci (`loc`), simulate
an individual with a given `ploidy`. Returns a Vector of genotypes.
**Example**
```
julia> cats = @nanycats ;
julia> loc, alleles = allele_pool(cats) ;
julia> simulate_sample(alleles, loc, ploidy = 2)
9-element Array{Array{Int16,1},1}:
[139, 129]
[146, 146]
[145, 141]
[126, 126]
[150, 148]
[148, 140]
[185, 199]
[91, 113]
[208, 208]
```
"""
function simulate_sample(alleles::Dict{String,<:Tuple}, loc::Vector{String}; ploidy::Signed)
map(i -> rand(alleles[i], ploidy), loc)
end
feature_req() = "\nPlease open an Issue or Pull Request on https://www.github.com/pdimens/PopGenSims.jl if you would like this feature implemented" | PopGenSims | https://github.com/pdimens/PopGenSims.jl.git |
|
[
"MIT"
] | 0.3.2 | 96e65755f306264e9cc07e754bb955145c1f7354 | code | 1287 | module TestCross
using PopGenCore
using PopGenSims
using Test
cats = @nancycats ;
@testset "Crosses" begin
@testset "sample genotypes" begin
@test length(PopGenSims.sample_genotype(cats.genodata.genotype[30], 2)) == 2
@test eltype(PopGenSims.sample_genotype(cats.genodata.genotype[30],1)) == eltype(cats.genodata.genotype[30])
@test length(PopGenSims.sample_genotype(missing, 2)) == 1
@test first(PopGenSims.sample_genotype(missing, 2)) === missing
end
@testset "crosses" begin
f1 = cross(cats, "N111", "N107", n = 10)
@test f1 isa PopData
@test f1.metadata.samples == 10
@test length(f1.genodata.name) == 90
f1 = cross(cats, "N111", "N107", n = 10, generation = "firstgen")
@test f1 isa PopData
@test f1.metadata.samples == 10
@test length(f1.genodata.name) == 90
f2 = cross(cats => "N111", f1 => "firstgen_10", n = 10)
@test f2 isa PopData
@test f2.metadata.samples == 10
@test length(f2.genodata.name) == 90
f2 = cross(cats => "N111", f1 => "firstgen_10", n = 10, generation = "F2")
@test f2 isa PopData
@test f2.metadata.samples == 10
@test length(f2.genodata.name) == 90
end
end
end # module TestCross | PopGenSims | https://github.com/pdimens/PopGenSims.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.