licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 1299 | using Strategems, Temporal, Indicators, Dates
# define universe and gather data
assets = ["CME_CL1", "CME_RB1"]
universe = Universe(assets)
function datasource(asset::String; save_downloads::Bool=true)::TS
savedata_path = joinpath(dirname(pathof(Strategems)), "..", "data", "test", "$asset.csv")
if isfile(savedata_path)
return Temporal.tsread(savedata_path)
else
X = quandl("CHRIS/$asset")
if save_downloads
Temporal.tswrite(X, savedata_path)
end
return X
end
end
gather!(universe, source=datasource)
# define indicators and parameter space
arg_names = [:fastlimit, :slowlimit]
arg_defaults = [0.5, 0.05]
arg_ranges = [0.01:0.01:0.99, 0.01:0.01:0.99]
paramset = ParameterSet(arg_names, arg_defaults, arg_ranges)
f(x; args...) = Indicators.mama(x; args...)
indicator = Indicator(f, paramset)
# define signals that will trigger trading decisions
siglong = @signal MAMA β FAMA
sigshort = @signal MAMA β FAMA
sigexit = @signal MAMA == FAMA
# define the trading rules
longrule = @rule siglong β long 100
shortrule = @rule sigshort β short 100
exitrule = @rule sigexit β liquidate 1.0
rules = (longrule, shortrule, exitrule)
# run strategy
strat = Strategy(universe, indicator, rules)
backtest!(strat)
optimize!(strat, samples=10)
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 1251 | __precompile__(true)
module Strategems
using Dates
using Temporal
using Indicators
using Random
using ProgressMeter
export
# universe definitions
Universe, gather!, get_overall_index,
# parameter sets
ParameterSet, count_runs, generate_combinations, generate_dict,
# indicators
Indicator, calculate,
# signals
Signal, prep_signal, β, β, @signal,
# rules
Rule, @rule, β,
# portfolios
Portfolio,#, update_portfolio!,
# order
AbstractOrder, MarketOrder, LimitOrder, StopOrder, liquidate, long, buy, short, sell,
# strategy results
Backtest,
# summary statistic calculations
cum_pnl,
# strategies
Strategy, generate_trades, generate_trades!, backtest, backtest!, optimize, optimize!, summarize_results
include("model/universe.jl")
include("model/paramset.jl")
include("model/indicator.jl")
include("model/signal.jl")
include("model/rule.jl")
include("model/portfolio.jl")
include("model/order.jl")
include("model/backtest.jl")
include("model/strategy.jl")
include("compute/backtest.jl")
include("compute/optimize.jl")
end
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 2622 | using ProgressMeter
function generate_trades(strat::Strategy; verbose::Bool=true)::Dict{String,TS}
all_trades = Dict{String,TS}()
verbose ? progress = Progress(length(strat.universe.assets), 1, "Generating Trades") : nothing
for asset in strat.universe.assets
verbose ? next!(progress) : nothing
trades = TS(falses(size(strat.universe.data[asset],1), length(strat.rules)),
strat.universe.data[asset].index)
local indicator_data = calculate(strat.indicator, strat.universe.data[asset])
for (i,rule) in enumerate(strat.rules);
trades[:,i] = rule.trigger.fun(indicator_data)
end
all_trades[asset] = trades
end
return all_trades
end
function generate_trades!(strat::Strategy; args...)::Nothing
strat.backtest.trades = generate_trades(strat; args...)
return nothing
end
#TODO: generalize this logic to incorporate order types
function backtest(strat::Strategy; px_trade::Symbol=:Open, px_close::Symbol=:Settle, verbose::Bool=true)::Dict{String,TS{Float64}}
if isempty(strat.backtest.trades)
generate_trades!(strat, verbose=verbose)
end
result = Dict{String,TS}()
verbose ? progress = Progress(length(strat.universe.assets), 1, "Running Backtest") : nothing
for asset in strat.universe.assets
verbose ? next!(progress) : nothing
trades = strat.backtest.trades[asset].values
N = size(trades, 1)
summary_ts = strat.universe.data[asset]
trade_price = summary_ts[px_trade].values
close_price = summary_ts[px_close].values
pos = zeros(Float64, N)
pnl = zeros(Float64, N)
do_trade = false
for t in 2:N
for (i,rule) in enumerate(strat.rules)
if trades[t-1,i] != 0
do_trade = true
order_side = rule.action in (long,buy) ? 1 : rule.action in (short,sell) ? -1 : 0
(order_qty,) = rule.args
pos[t] = order_qty * order_side
pnl[t] = pos[t] * (close_price[t] - trade_price[t])
end
end
if !do_trade
pos[t] = pos[t-1]
pnl[t] = pos[t] * (close_price[t]-close_price[t-1])
end
do_trade = false
end
summary_ts = [summary_ts TS([pos pnl cumsum(pnl)], summary_ts.index, [:Pos,:PNL,:CumPNL])]
result[asset] = summary_ts
end
return result
end
function backtest!(strat::Strategy; args...)::Nothing
strat.backtest.backtest = backtest(strat; args...)
return nothing
end
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 1498 | using Random, ProgressMeter
import Base: copy
copy(strat::Strategy) = Strategy(strat.universe, strat.indicator, strat.rules)
#TODO: parallel processing
function optimize(strat::Strategy; samples::Int=0, seed::Int=0, verbose::Bool=true, summary_fun::Function=cum_pnl, args...)::Matrix
original = copy(strat)
if samples > 0
seed >= 0 ? Random.seed!(seed) : nothing
sample_index = rand(collect(1:samples), samples)
else
samples = count_runs(strat.indicator.paramset)
sample_index = collect(1:samples)
end
combos = generate_combinations(strat.indicator.paramset)[sample_index,:]
optimization = zeros(samples, 1)
verbose ? progress = Progress(length(sample_index), 1, "Optimizing Backtest") : nothing
for (i, combo) in enumerate(sample_index)
verbose ? next!(progress) : nothing
strat.indicator.paramset.arg_defaults = combos[i,:]
generate_trades!(strat, verbose=false)
backtest!(strat, verbose=false; args...)
optimization[i] = summary_fun(strat.backtest)
end
# prevent out-of-scope alteration of strat object
strat = original
return [combos optimization]
end
function optimize!(strat::Strategy; samples::Int=0, seed::Int=0, verbose::Bool=true, summary_fun::Function=cum_pnl, args...)::Nothing
optimization = optimize(strat, samples=samples, seed=seed, verbose=verbose, summary_fun=summary_fun; args...)
strat.backtest.optimization = optimization
return nothing
end
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 696 | #=
methods for handling backtest results of strategy objects
=#
mutable struct Backtest
trades::Dict{String,TS}
backtest::Dict{String,TS{Float64}}
optimization::Matrix{Float64}
function Backtest(trades::Dict{String,TS}=Dict{String,TS}(),
backtest::Dict{String,TS{Float64}}=Dict{String,TS{Float64}}(),
optimization::Matrix{Float64}=Matrix{Float64}(undef,0,0))
return new(trades, backtest, optimization)
end
end
function cum_pnl(results::Backtest)::Float64
result = 0.0
@inbounds for val in values(results.backtest)
pnl::Vector = val[:PNL].values[:]
result += sum(pnl)
end
return result
end
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 990 |
import Base: show
mutable struct Indicator
fun::Function
paramset::ParameterSet
data::TS
function Indicator(fun::Function, paramset::ParameterSet)
data = TS()
return new(fun, paramset, data)
end
end
function calculate(indicator::Indicator, input::TS)::TS
return indicator.fun(input; generate_dict(indicator.paramset)...)
end
# function calculate!(indicator::Indicator, input::TS)::Nothing
# indicator.data = calculate(indicator, input)
# return nothing
# end
function generate_dict(universe::Universe, indicator::Indicator)::Dict{String,Indicator}
indicators = Dict{String,Indicator}()
for asset in universe.assets
local ind = Indicator(indicator.fun, indicator.paramset)
calculate!(ind, universe.data[asset])
indicators[asset] = ind
end
return indicators
end
# TODO: add information about the calculation function
function show(io::IO, indicator::Indicator)
show(io, indicator.paramset)
end
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 868 | #=
types and methods to facilitate the interaction between orders, rules, and portfolios
=#
abstract type AbstractOrder end
struct MarketOrder <: AbstractOrder
asset::String
quantity::Number
end
struct LimitOrder <: AbstractOrder
asset::String
quantity::Number
limit::Number
end
struct StopOrder <: AbstractOrder
asset::String
quantity::Number
stop::Number
end
#TODO: complete this logic, enable interaction with strategy/portfolio objects
#TODO: diffierentiate between buying vs. going long and the like (the latter should reverse position if short)
#TODO: add logic whereby the order logic is altered by the type `T` of qty
# (if T<:Int, order that many *shares*, else if T<:Float64, interpret qty as a fraction of the portfolio at time t)
liquidate(qty) = qty
long(qty) = qty
buy(qty) = qty
short(qty) = qty
sell(qty) = qty
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 1827 | import Base.show
mutable struct ParameterSet
arg_names::Vector{Symbol}
arg_defaults::Vector
arg_ranges::Vector
arg_types::Vector{<:Type}
n_args::Int
#TODO: parameter constraints (e.g. ensure one parameter always greater than another)
#TODO: refactor out the arg_ prefix (its redundant if they all start with it)
function ParameterSet(arg_names::Vector{Symbol},
arg_defaults::Vector,
arg_ranges::Vector=[x:x for x in arg_defaults])
@assert length(arg_names) == length(arg_defaults) == length(arg_ranges)
@assert eltype.(arg_defaults) == eltype.(arg_ranges)
arg_types::Vector{<:Type} = eltype.(arg_defaults)
return new(arg_names, arg_defaults, arg_ranges, arg_types, length(arg_names))
end
end
function count_runs(ps::ParameterSet)::Int
n_runs = 1
@inbounds for i in 1:ps.n_args
n_runs *= length(ps.arg_ranges[i])
end
return n_runs
end
function generate_dict(ps::ParameterSet; arg_values::Vector=ps.arg_defaults)::Dict{Symbol,Any}
out_dict = Dict{Symbol,Any}()
for j in 1:ps.n_args
out_dict[ps.arg_names[j]] = arg_values[j]
end
return out_dict
end
function show(io::IO, ps::ParameterSet)::Nothing
println(io, "# Parameters:")
@inbounds for i in 1:ps.n_args
println(io, TAB, "($i) $(ps.arg_names[i]) β $(ps.arg_defaults[i]) β {$(string(ps.arg_ranges[i]))} :: $(ps.arg_types[i])")
end
end
function generate_combinations(ps::ParameterSet)
A = collect(Iterators.product(ntuple(i->ps.arg_ranges[i], length(ps.arg_ranges))...))
B = A[:]
T = Tuple(Array{arg_type}(undef, size(B,1)) for arg_type in ps.arg_types)
for j in 1:length(ps.arg_ranges), i in 1:size(B,1)
T[j][i] = B[i][j]
end
return hcat(T...)
end
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 750 | #=
type and methods to track the evolution of a strategy's securities portfolio composition
=#
mutable struct Portfolio
quantity::Matrix{Float64}
weight::Matrix{Float64}
entry_price::Matrix{Float64}
close_price::Matrix{Float64}
pnl::Matrix{Float64}
idx::Vector{<:TimeType}
function Portfolio(universe::Universe)
idx = get_overall_index(universe)
quantity = weight = entry_price = close_price = pnl =
zeros(Float64, length(idx), length(universe.assets))
return new(quantity, weight, entry_price, close_price, pnl, idx)
end
end
#function update_portfolio!(portfolio::Portfolio, order::Order, universe::Universe)
# i = findfirst(portfolio.idx .> order.time)
# quantity[i]
#end
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 976 | #=
Type and methods facilitating simple but effective syntax interface for defining trading rules
=#
import Base: show
#TODO: figure out how to make this a function that interfaces with the portfolio & account objects
struct Rule{S,F,T}
trigger::S
action::F
args::Tuple{Vararg{T}}
function Rule(trigger::S, action::F, args::Tuple{Vararg{T}}) where {S<:Signal, F<:Function, T}
return new{S,F,T}(trigger, action, args)
end
end
macro rule(logic::Expr, args...)
trigger = :($(logic.args[2]))
#action = :($(logic.args[3])$((args...)))
action = :($(logic.args[3]))
args = :($(args))
return esc(:(Rule($trigger, $action, $args)))
end
β(a,b) = a ? b() : nothing
function show(io::IO, rule::Rule)
action_string = titlecase(split(string(rule.action), '.')[end])
arg_string = titlecase(string(rule.args...))
trigger_string = string(rule.trigger.switch)
print(io, "$action_string $arg_string when $trigger_string")
end
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 1375 | #=
type and methods for handling signals generating by consuming data exhaust from indicators
=#
struct Signal
switch::Expr
fun::Function
function Signal(switch::Expr)
@assert typeof(eval(switch.args[1])) <: Function
a::Symbol = switch.args[2]
b::Symbol = switch.args[3]
#pair = eval(switch.args[1])::Function => Tuple{Symbol,Symbol}(switch.args[2]::Symbol, switch.args[3]::Symbol)
#fun(x::TS)::BitVector = pair.first(x[pair.second[1]].values[:]::Vector, x[pair.second[2]].values[:]::Vector)
function fun(x::TS)::BitVector
#fld1::Symbol = switch.args[2]
#fld2::Symbol = switch.args[3]
vec1::Vector = x[a].values[:]
vec2::Vector = x[b].values[:]
comp::Function = eval(switch.args[1])
if comp == ==
comp(x,y) = broadcast(==, x, y)
end
out::BitVector = comp(vec1, vec2)
return out
end
return new(switch, fun)
end
end
function prep_signal(signal::Signal, indicator_data::TS)::Expr
local switch = copy(signal.switch)
for i in 2:length(switch.args)
switch.args[i] = indicator_data[switch.args[i]]
end
return switch
end
macro signal(logic::Expr)
return Signal(logic)
end
β(x, y) = Indicators.crossover(x, y)
β(x, y) = Indicators.crossunder(x, y)
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 2198 | #=
Type definition and methods containing the overarching backtesting object fueling the engine
=#
import Base: show
const TABWIDTH = 4
const TAB = ' ' ^ TABWIDTH
mutable struct Strategy
universe::Universe
indicator::Indicator
rules::Tuple{Vararg{Rule}}
portfolio::Portfolio
backtest::Backtest
function Strategy(universe::Universe,
indicator::Indicator,
rules::Tuple{Vararg{Rule}},
portfolio::Portfolio=Portfolio(universe))
return new(universe, indicator, rules, portfolio, Backtest())
end
end
function show(io::IO, strat::Strategy)
println(io, strat.indicator.paramset)
println(io, "# Rules:")
for rule in strat.rules
println(io, TAB, rule)
end
println()
show(io, strat.universe)
end
function summarize_results(strat::Strategy)
holdings = TS()
values = TS()
profits = TS()
for asset in strat.universe.assets
asset_result = strat.backtest.backtest[asset]
holding = asset_result[:Pos]
holdings = [holdings holding]
values = [values cl(asset_result) * holding]
profits = [profits asset_result[:PNL]]
end
# data cleaning - field assignment and missing value replacement
holdings.fields = Symbol.(strat.universe.assets)
profits.fields = Symbol.(strat.universe.assets)
values.fields = Symbol.(strat.universe.assets)
holdings.values[isnan.(holdings.values)] .= 0.0
values.values[isnan.(values.values)] .= 0.0
profits.values[isnan.(profits.values)] .= 0.0
# portfolio weights, net exposure, and leverage calculations
weights = values / apply(values, 1, fun=sum)
weights.fields = Symbol.(strat.universe.assets)
exposure = apply(weights, 1, fun=sum)
leverage = apply(weights, 1, fun=x->(sum(abs.(x))))
weights = [weights exposure leverage]
weights.fields[end-1:end] = [:Exposure, :Leverage]
# compute other temporal totals and return
profits = [profits apply(profits, 1, fun=sum)]
profits.fields[end] = :Total
values = [values apply(values, 1, fun=sum)]
values.fields[end] = :Total
return weights, holdings, values, profits
end
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 2412 | #=
Type and methods to simplify data sourcing and management of the universe of tradable assets
=#
using ProgressMeter
import Base: show
const SEPARATORS = ['/', '_', '.']
# function guess_tickers(assets::Vector{String})::Vector{Symbol}
# tickers = Symbol.([Temporal.namefix(split(asset, SEPARATORS)[end]) for asset in assets])
# @assert tickers == unique(tickers) "Non-unique ticker symbols found in universe"
# return tickers
# end
mutable struct Universe
assets::Vector{String}
# tickers::Vector{Symbol}
data::Dict{String,TS}
from::TimeType
thru::TimeType
function Universe(assets::Vector{String}, from::TimeType=Dates.Date(0), thru::TimeType=Dates.today())
@assert assets == unique(assets)
# tickers = guess_tickers(assets)
data = Dict{String,TS}()
@inbounds for asset in assets
data[asset] = TS()
end
return new(assets, data, from, thru)
end
end
#TODO: ensure type compatibility across variables (specifically with regard to TimeTypes)
function gather!(universe::Universe; source::Function=Temporal.quandl, verbose::Bool=true)::Nothing
t0 = Vector{Dates.Date}()
tN = Vector{Dates.Date}()
verbose ? progress = Progress(length(universe.assets), 1, "Gathering Universe Data") : nothing
@inbounds for asset in universe.assets
verbose ? next!(progress) : nothing
indata = source(asset)
push!(t0, indata.index[1])
push!(tN, indata.index[end])
universe.data[asset] = indata
end
universe.from = max(minimum(t0), universe.from)
universe.thru = min(maximum(tN), universe.thru)
return nothing
end
#FIXME: make robust to other time types
function get_overall_index(universe::Universe)::Vector{Date}
idx = Vector{Date}()
for asset in universe.assets
idx = union(idx, universe.data[asset].index)
end
return idx
end
function show(io::IO, universe::Universe)
println(io, "# Universe:")
for (i, asset) in enumerate(universe.assets )
println(io, TAB, "Asset $i:", TAB, asset)
data = universe.data[asset]
if isempty(data)
println(io, TAB, TAB, "(No Data Gathered)")
else
println(io, TAB, TAB, "Range:", TAB, data.index[1], " to ", data.index[end])
println(io, TAB, TAB, "Fields:", TAB, join(String.(data.fields), " "))
end
end
end
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | code | 2499 | using Strategems, Temporal, Indicators
using Dates
using Test
# define universe and gather data
assets = ["Corn"]
@testset "Universe" begin
@testset "Construct" begin
global universe = Universe(assets)
@test universe.assets == assets
end
@testset "Gather" begin
gather!(universe, source=(asset)->Temporal.tsread(joinpath(dirname(pathof(Temporal)), "..", "data/$asset.csv")))
@test length(setdiff(assets, collect(keys(universe.data)))) == 0
end
end
# define indicators and parameter space
@testset "Parameter Set" begin
global arg_names = [:fastlimit, :slowlimit]
global arg_defaults = [0.5, 0.05]
global arg_ranges = [0.01:0.01:0.99, 0.01:0.01:0.99]
global paramset = ParameterSet(arg_names, arg_defaults, arg_ranges)
@test paramset.arg_names == arg_names
end
@testset "Indicator" begin
global f(x; args...) = Indicators.mama(x; args...)
global indicator = Indicator(f, paramset)
@test indicator.fun == f
@test indicator.paramset == paramset
end
# define signals that will trigger trading decisions
@testset "Signal" begin
@testset "Construct" begin
global siglong = @signal MAMA β FAMA
global sigshort = @signal MAMA β FAMA
global sigexit = @signal MAMA == FAMA
@test siglong.fun.a == sigshort.fun.a == sigexit.fun.a == :MAMA
@test siglong.fun.b == sigshort.fun.b == sigexit.fun.b == :FAMA
end
end
# define the trading rules
@testset "Rule" begin
@testset "Construct" begin
global longrule = @rule siglong β long 100
global shortrule = @rule sigshort β short 100
global exitrule = @rule sigexit β liquidate 1.0
global rules = (longrule, shortrule, exitrule)
@test longrule.action == long
@test shortrule.action == short
@test exitrule.action == liquidate
end
end
# run strategy
@testset "Strategy" begin
@testset "Construct" begin
global strat = Strategy(universe, indicator, rules)
end
@testset "Backtest" begin
backtest!(strat)
end
@testset "Optimize" begin
optimize!(strat, samples=10)
@test size(strat.backtest.optimization,1) == 10
@test size(strat.backtest.optimization,2) == length(arg_names)+1
end
end
# test example(s)
@testset "Examples" begin
include("$(joinpath(dirname(pathof(Strategems)), "..", "examples", "mama.jl"))")
@test assets == ["CME_CL1", "CME_RB1"]
end
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.3.0 | 5c09f5af273dcee88449ab5a08c5aaa2102cd251 | docs | 6897 | [](https://travis-ci.org/dysonance/Strategems.jl)
[](https://coveralls.io/github/dysonance/Strategems.jl?branch=master)
[](http://codecov.io/github/dysonance/Strategems.jl?branch=master)
# Strategems
**Strategems** is a [Julia](https://julialang.org/) package aimed at simplifying and streamlining the process of developing, testing, and optimizing algorithmic/systematic trading strategies. This package is inspired in large part by the quantstrat<sup>[1](http://past.rinfinance.com/agenda/2013/workshop/Humme+Peterson.pdf)</sup><sup>,</sup><sup>[2](https://github.com/braverock/quantstrat)</sup> package in [R](https://www.r-project.org/), adopting a similar general structure to the building blocks that make up a *strategy*.
Given the highly iterative nature of event-driven trading strategy development, Julia's high-performance design (particularly in the context of loops) and straightforward syntax would seem to make it a natural fit as a language for systematic strategy research and development. While this package remains early in development, with time the hope is to be able to rapidly implement a trading idea, construct a historical backtest, analyze its results, optimize over a given parameter set, and visualize all of this with great detail.
## Dependencies
This package makes heavy use of the [**Temporal**](https://github.com/dysonance/Temporal.jl) package's `TS` time series type to facilitate the underlying computations involved in cleaning & preprocessing the data used when testing a `Strategy`. Additionally, the [**Indicators**](https://github.com/dysonance/Indicators.jl/) package offers many technical analysis functions that have been written/designed with the goal of a highly generalized systematic trading strategy research engine in mind, and should thus should simplify the process of working with this data quite a bit.
## Install
The Strategems package can be installed using the standard Julia package manager functions.
```julia
# Option A:
Pkg.add("Strategems")
# Option B:
Pkg.clone("https://github.com/dysonance/Strategems.jl")
```
# Anatomy of a Strategy
Below are the basic building blocks making up the general anatomy of a *Strategy* with respect to the `Strategems.jl` package design and the type definitions used to facilitate the research workflow.
- `Universe`: encapsulation of the assets/securities the strategy is to be allowed to trade
- `Indicator`: calculation done on each asset in the universe whose results we think have predictive potential for future price movement
- `ParameterSet`: inputs/arguments to the indicator calculations
- `Signal`: boolean flag sending messages to the trading logic/rules to be interpreted and acted upon
- `Rule`: applications of trading logic derived from interpretations of prior calculations & signals at each time step
- `Strategy`: overarching object encapsulating and directing all of the above logic and data to power the backtesting engine
# Example Usage
Below is a quick example demonstrating a simple use-case that one might use to get acquainted with how the package works. Note that the custom infix operators denoted by the uparrow and downarrow below are defined in this package as another way of expressing that one variable crosses over another. The intention of this infix operator definition is to hopefully make the definition of a strategy more syntactically expressive and intuitive.
The key indicator used in this strategy is John Ehlers's MESA Adaptive Moving Average (or *MAMA* for short). This functionality is implemented in the `Indicators.jl` package described above, and outputs a `Matrix` (or `TS` object if one is passed as an input) of two columns, the first being the *MAMA* itself and the second being the *FAMA*, or following adaptive moving average.
This strategy simply goes long when the *MAMA* crosses over the *FAMA*, and goes short when the *FAMA* crosses over the *MAMA*. Below is an implementation that shows how to set default arguments to the `Indicators.mama` function and run a simple backtest using those parameters, and also define specified ranges over which we might like to see how the strategy behaves under different parameter sets.
```julia
using Strategems, Indicators, Temporal, Dates
# define universe and gather data
assets = ["CHRIS/CME_CL1", "CHRIS/CME_RB1"]
universe = Universe(assets)
function datasource(asset::String; save_downloads::Bool=true)::TS
savedata_path = joinpath(dirname(pathof(Strategems)), "..", "data", "$asset.csv")
if isfile(savedata_path)
return Temporal.tsread(savedata_path)
else
X = quandl(asset)
if save_downloads
if !isdir(dirname(savedata_path))
mkdir(dirname(savedata_path))
end
Temporal.tswrite(X, savedata_path)
end
return X
end
end
gather!(universe, source=datasource)
# define indicators and parameter space
arg_names = [:fastlimit, :slowlimit]
arg_defaults = [0.5, 0.05]
arg_ranges = [0.01:0.01:0.99, 0.01:0.01:0.99]
paramset = ParameterSet(arg_names, arg_defaults, arg_ranges)
f(x; args...) = Indicators.mama(x; args...)
indicator = Indicator(f, paramset)
# define signals that will trigger trading decisions
# note the uparrow infix operator is defined to simplify one variable crossing over another
# (similarly for the downarrow infix operator for crossing under)
siglong = @signal MAMA β FAMA
sigshort = @signal MAMA β FAMA
sigexit = @signal MAMA == FAMA
# define the trading rules
longrule = @rule siglong β long 100
shortrule = @rule sigshort β short 100
exitrule = @rule sigexit β liquidate 1.0
rules = (longrule, shortrule, exitrule)
# run strategy
strat = Strategy(universe, indicator, rules)
backtest!(strat)
optimize!(strat, samples=0) # randomly sample the parameter space (0 -> use all combinations)
# cumulative pnl for each combination of the parameter space
strat.backtest.optimization
# visualizing results with the Plots.jl package
using Plots
gr()
(x, y, z) = (strat.backtest.optimization[:,i] for i in 1:3)
surface(x, y, z)
```

# Roadmap / Wish List
* Get a sufficiently full-featured type system established to facilitate easy construction of simple strategies
* Allow more intelligent logic for trading rules
- Adjust order sizing based on portfolio/account at time *t*
- Portfolio optimization logic
- Risk limits
* Stop loss rules
* Define a more diverse set of order types
- Limit orders
* Stop orders
| Strategems | https://github.com/dysonance/Strategems.jl.git |
|
[
"MIT"
] | 0.1.0 | 06a6480e0547122eccc20646a32cece3dbee08e2 | code | 2556 | module PercolationModel
using Judycon
import Judycon: connect!
struct Percolation
grid :: Matrix{Bool}
li :: LinearIndices
dim :: Int
wuf1 :: QuickUnion
wuf2 :: QuickUnion
topnode :: Int
bottomnode :: Int
end
function Percolation(dim)
grid = zeros(Bool, dim, dim)
li = LinearIndices(grid)
wuf1 = QuickUnion(dim^2+2)
wuf2 = QuickUnion(dim^2+1)
topnode = dim^2 + 1
bottomnode = topnode + 1
return Percolation(grid, li, dim, wuf1, wuf2, topnode, bottomnode)
end
function connect!(p::Percolation, i, j)
connect!(p.wuf1, i, j)
connect!(p.wuf2, i, j)
end
function isopen(p::Percolation, i, j)
return p.grid[i,j]
end
function number_of_open(p::Percolation)
return sum(p.grid)
end
function isfull(p::Percolation, i, j)
!isopen(p, i, j) && return false
return isconnected(p.wuf2, p.topnode, p.li[i,j])
end
function percolates(p::Percolation)
return isconnected(p.wuf1, p.topnode, p.bottomnode)
end
function open!(p::Percolation, i, j)
isopen(p, i, j) && return
p.grid[i, j] = true
i == 1 && connect!(p, p.topnode, p.li[i, j])
i == p.dim && connect!(p.wuf1, p.bottomnode, p.li[i, j])
i > 1 && isopen(p, i-1, j) && connect!(p, p.li[i,j], p.li[i-1, j])
i < p.dim-1 && isopen(p, i+1, j) && connect!(p, p.li[i,j], p.li[i+1, j])
j > 1 && isopen(p, i, j-1) && connect!(p, p.li[i,j], p.li[i, j-1])
j < p.dim-1 && isopen(p, i, j+1) && connect!(p, p.li[i,j], p.li[i, j+1])
end
using Statistics
struct Result{T}
samples :: Vector{T}
mean :: Float64
stddev :: Float64
confidence_lo :: Float64
confidence_hi :: Float64
end
const CONFIDENCE_95 = 1.96
function Result(samples)
n = length(samples)
m = mean(samples)
s = std(samples)
cl = m - CONFIDENCE_95 * s / sqrt(n)
ch = m + CONFIDENCE_95 * s / sqrt(n)
return Result(samples, m, s, cl, ch)
end
using Printf
function Base.show(io::IO, r::Result)
@printf("% 25s = %d\n", "samples", length(r.samples))
@printf("% 25s = %0.3f\n", "mean", r.mean)
@printf("% 25s = %0.3f\n", "stddev", r.stddev)
@printf("% 25s = [%0.3f, %0.3f]\n", "95% confidence interval", r.confidence_lo, r.confidence_hi)
end
function run_one(n)
p = Percolation(n)
while !percolates(p)
open!(p, rand(1:n), rand(1:n))
end
return p
end
function run(n, trials)
thresholds = zeros(trials)
for i=1:trials
p = run_one(n)
thresholds[i] = number_of_open(p) / n^2
end
return Result(thresholds)
end
end | Judycon | https://github.com/ahojukka5/Judycon.jl.git |
|
[
"MIT"
] | 0.1.0 | 06a6480e0547122eccc20646a32cece3dbee08e2 | code | 1719 | module Judycon
"""
QuickFind
"""
struct QuickFind
id :: Vector{Int}
end
function QuickFind(n)
id = collect(1:n)
return QuickFind(id)
end
function find(qf::QuickFind, p::Int)
id = qf.id
return id[p]
end
function isconnected(qf::QuickFind, p::Int, q::Int)
i = find(qf, p)
j = find(qf, q)
return isequal(i, j)
end
function connect!(qf::QuickFind, p::Int, q::Int)
id = qf.id
pid = id[p]
qid = id[q]
n = length(id)
for i=1:n
if isequal(getindex(id, i), pid)
setindex!(id, qid, i)
end
end
return
end
"""
QuickUnion
"""
struct QuickUnion
id :: Vector{Int}
sz :: Vector{Int}
weighted :: Bool
compress :: Bool
end
function QuickUnion(n::Int, weighted=true, compress=true)
id = collect(1:n)
sz = zeros(Int, n)
return QuickUnion(id, sz, weighted, compress)
end
function find(qu::QuickUnion, p::Int)
id = qu.id
while p != id[p]
if qu.compress
id[p] = id[id[p]]
end
p = id[p]
end
return p
end
function isconnected(qu::QuickUnion, p::Int, q::Int)
i = find(qu, p)
j = find(qu, q)
return isequal(i, j)
end
"""
connect(G, p, q)
Connect p and q in G.
If weighting is used, connect such a way that the depth of the tree is minimized.
"""
function connect!(qu::QuickUnion, p::Int, q::Int)
id = qu.id
i = find(qu, p)
j = find(qu, q)
if qu.weighted
sz = qu.sz
if (sz[i] < sz[j])
id[i] = j
sz[j] += sz[i]
else
id[j] = i
sz[i] += sz[j]
end
else
id[i] = j
end
end
export QuickFind, QuickUnion, connect!, isconnected, find
end
| Judycon | https://github.com/ahojukka5/Judycon.jl.git |
|
[
"MIT"
] | 0.1.0 | 06a6480e0547122eccc20646a32cece3dbee08e2 | code | 1298 | using Judycon, Test
#
# 1 2---3 4---5
# | | | | |
# 6---7 8 9 10
#
G1 = QuickFind(10);
G2 = QuickUnion(10, false, false);
G3 = QuickUnion(10, true, false);
G4 = QuickUnion(10, false, true);
G5 = QuickUnion(10, true, true);
function test(G)
connect!(G, 1, 6)
connect!(G, 6, 7)
connect!(G, 7, 2)
connect!(G, 2, 3)
connect!(G, 3, 8)
connect!(G, 9, 4)
connect!(G, 4, 5)
connect!(G, 5, 10)
# `pts1` should form one set of connected components and `pts2` another, respectively.
pts1 = [1, 2, 3, 6, 7, 8]
pts2 = [4, 5, 9, 10]
for p in pts1
for q in pts1
@test isconnected(G, p, q)
@test isconnected(G, q, p)
end
for q in pts2
@test !isconnected(G, p, q)
@test !isconnected(G, q, p)
end
end
end
@testset "Test Judycon.jl" begin
@testset "Test QuickFind" begin test(G1) end
@testset "Test QuickUnion without weighting and path compression" begin test(G2) end
@testset "Test QuickUnion with weighting and without path compression" begin test(G3) end
@testset "Test QuickUnion without weighting and with path compression" begin test(G4) end
@testset "Test QuickUnion with weighting and path compression" begin test(G5) end
end
| Judycon | https://github.com/ahojukka5/Judycon.jl.git |
|
[
"MIT"
] | 0.1.0 | 06a6480e0547122eccc20646a32cece3dbee08e2 | docs | 5969 | # Judycon.jl
[![][travis-img]][travis-url]
[![][coveralls-img]][coveralls-url]
Package author: Jukka Aho (@ahojukka5)
Judycon.jl implements dynamic connectivity algorithms for Julia programming
language. In computing and graph theory, a [dynamic connectivity structure][1]
is a data structure that dynamically maintains information about the connected
components of a graph. Dynamic connectivity has a lot of applications. For
example, dynamic connetivity [can be used][2] to determine functional
connectivity change points in fMRI data. In below, percolation model is solved
using the functions provided this package. For more information about the model,
scroll down to the bottom of this readme file.

## Implemented algorithms:
- QuickFind
- QuickUnion
Both of the algorithms have same API, but the internal data structure is
different. Typical use case is:
```julia
using Judycon: QuickUnion, connect!, isconnected
wuf = QuickUnion(10)
connect!(wuf, 1, 2)
connect!(wuf, 2, 3)
connect!(wuf, 3, 4)
isconnected(wuf, 1, 4)
# output
true
```
QuickFind is a simple data structure making it possible to very fast query, does
points p and q belong to the same connected component, but connecting the points
is slow, up to ~ N^2 in the worst case.
QuickUnion makes it fast to connect points. Finding points is not that fast than
with QuickFind, but with some common modifications, i.e. weighting and path
compression, it gives good a performance.
Weighted quick union with path compression makes it possible to solve problems
that could not otherwise be addressed. In case of doubt which suits for your
need, use that.
The performance of the algorithms (M union-find operations on a set of N object)
is given below.
| algorithm | worst-case time |
| ------------------------------ | --------------- |
| quick-find | M N |
| quick-union | M N |
| weighted QU | M + N log N |
| QU + path compression | M + N log N |
| weighted QU + path compression | N + M lg N |
## Dynamic connectivity application: percolation
Source: <https://en.wikipedia.org/wiki/Percolation>
In physics, chemistry and materials science, percolation (from Latin percΕlΔre,
"to filter" or "trickle through") refers to the movement and filtering of fluids
through porous materials. It is described by Darcy's law. Broader applications
have since been developed that cover connectivity of many systems modeled as
lattices or graphs, analogous to connectivity of lattice components in the
filtration problem that modulates capacity for percolation.
During the last decades, percolation theory, the mathematical study of
percolation, has brought new understanding and techniques to a broad range of
topics in physics, materials science, complex networks, epidemiology, and other
fields. For example, in geology, percolation refers to filtration of water
through soil and permeable rocks. The water flows to recharge the groundwater in
the water table and aquifers. In places where infiltration basins or septic
drain fields are planned to dispose of substantial amounts of water, a
percolation test is needed beforehand to determine whether the intended
structure is likely to succeed or fail.
Percolation typically exhibits universality. Statistical physics concepts such
as scaling theory, renormalization, phase transition, critical phenomena and
fractals are used to characterize percolation properties. Percolation is the
downward movement of water through pores and other spaces in the soil due to
gravity. Combinatorics is commonly employed to study percolation thresholds.
Due to the complexity involved in obtaining exact results from analytical models
of percolation, computer simulations are typically used. The current fastest
algorithm for percolation was published in 2000 by Mark Newman and Robert
Ziff.
### Use cases of percolation model
- Coffee percolation, where the solvent is water, the permeable substance is
the coffee grounds, and the soluble constituents are the chemical compounds
that give coffee its color, taste, and aroma.
- Movement of weathered material down on a slope under the earth's surface.
- Cracking of trees with the presence of two conditions, sunlight and under
the influence of pressure.
- Collapse and robustness of biological virus shells to random subunit removal
(experimentally verified fragmentation and disassembly of viruses).
- Robustness of networks to random and targeted attacks.
- Transport in porous media.
- Epidemic spreading.
- Surface roughening.
- Dental percolation, increase rate of decay under crowns because of a
conducive environment for strep mutants and lactobacillus
- Potential sites for septic systems are tested by the "perk test".
Example/theory: A hole (usually 6β10 inches in diameter) is dug in the ground
surface (usually 12β24" deep). Water is filled in to the hole, and the time is
measured for a drop of one inch in the water surface. If the water surface
quickly drops, as usually seen in poorly-graded sands, then it is a potentially
good place for a septic "leach field". If the hydraulic conductivity of the
site is low (usually in clayey and loamy soils), then the site is undesirable.
- Traffic percolation.
From `demos`, you find a percolation model implemented using Judycon.jl The
development of system from initial state to percolation is animated in the top
of this file.
[1]: https://en.wikipedia.org/wiki/Dynamic_connectivity
[2]: https://www.frontiersin.org/articles/10.3389/fnins.2015.00285/full
[travis-img]: https://travis-ci.org/ahojukka5/Judycon.jl.svg?branch=master
[travis-url]: https://travis-ci.org/ahojukka5/Judycon.jl
[coveralls-img]: https://coveralls.io/repos/github/ahojukka5/Judycon.jl/badge.svg?branch=master
[coveralls-url]: https://coveralls.io/github/ahojukka5/Judycon.jl?branch=master
| Judycon | https://github.com/ahojukka5/Judycon.jl.git |
|
[
"MIT"
] | 0.1.0 | a537ef077e3b83cefbf0cfe6e8d73523e8c17cb7 | code | 255 | module LeafletPluto
include("leaflet.jl")
export TileLayer, osm_tile_layer, osm_humanitarian_tile_layer, osm_tile_layers, stadia_tile_layers, LatLng, Path, MapOption, staticMapOption, to_dict, Polyline, Marker, Circle, Polygon, Map, build, leaflet
end
| LeafletPluto | https://github.com/florianfmmartin/LeafletPluto.jl.git |
|
[
"MIT"
] | 0.1.0 | a537ef077e3b83cefbf0cfe6e8d73523e8c17cb7 | code | 15509 | ### A Pluto.jl notebook ###
# v0.19.46
using Markdown
using InteractiveUtils
# βββ‘ 7c52b98c-3617-4ad4-bf12-930468793f89
using HypertextLiteral
# βββ‘ baef2610-79dc-11ef-12f2-ad52aa82fd14
md"""
# LeafletPluto.jl
### Displaying Leaflet maps in Pluto
"""
# βββ‘ e51f2cf3-7e90-4489-a5eb-59aefbdfc3db
md"""
Highly inspired by [PlutoMapPicker.jl](https://github.com/lukavdplas/PlutoMapPicker.jl/blob/main/src/map-picker.jl)
Used [this LeafletJS page](https://leafletjs.com/reference.html) as a reference
"""
# βββ‘ 8db92502-e6cf-493b-9ade-a9f7bcf4ba70
md"""
## Tile layers
!!! warning "Thanks to PlutoMapPicker.jl"
This content was copied over from [PlutoMapPicker.jl](https://github.com/lukavdplas/PlutoMapPicker.jl/blob/main/src/map-picker.jl)
"""
# βββ‘ 2ed6631f-d845-48fb-be56-e4cffab14295
begin
"""
A tile layer that can be used in a Leaflet map.
The configuration includes:
- `url`: a url template to request tiles
- `options`: a `Dict` with extra configurations, such as a minimum and maximum zoom level of the tiles. This is interpolated to a Javascript object using HypertextLiteral.
The configuration is used to create a TileLayer in leaflet; see [leaflet's TileLayer documentation](https://leafletjs.com/reference.html#tilelayer) to read more about URL templates and the available options.
"""
struct TileLayer
url::String
options::Dict{String,Any}
end
attribution_stadia = "© <a href='https://stadiamaps.com/'>Stadia Maps</a>"
attribution_stamen = "© <a href='https://stamen.com/'>Stamen Design</a>"
attribution_openmaptiles = "© <a href='https://openmaptiles.org/'>OpenMapTiles</a>"
attribution_osm = "© <a href='https://www.openstreetmap.org/copyright'>OpenStreetMap</a>"
"""
TileLayer for open street map. Please read OSM's [tile usage policy](https://operations.osmfoundation.org/policies/tiles/) to decide if your usage complies with it.
"""
osm_tile_layer = TileLayer(
"https://tile.openstreetmap.org/{z}/{x}/{y}.png",
Dict(
"maxZoom" => 19,
"attribution" => attribution_osm
)
)
osm_humanitarian_tile_layer = TileLayer(
"https://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png",
Dict(
"maxZoom" => 19,
"subdomains" => "ab",
"attribution" => attribution_osm
)
)
"""
TileLayers for Open Street Map. Please read OSM's [tile usage policy](https://operations.osmfoundation.org/policies/tiles/) to decide if your usage complies with it.
"""
osm_tile_layers = (
standard = osm_tile_layer,
humanitarian = osm_humanitarian_tile_layer,
)
stadia_osm_bright_tile_layer = TileLayer(
"https://tiles.stadiamaps.com/tiles/osm_bright/{z}/{x}/{y}{r}.png",
Dict(
"maxZoom" => 20,
"attribution" => "$attribution_stadia $attribution_openmaptiles $attribution_osm",
"referrerPolicy" => "origin",
)
)
stadia_outdoors_tile_layer = TileLayer(
"https://tiles.stadiamaps.com/tiles/outdoors/{z}/{x}/{y}{r}.png",
Dict(
"maxZoom" => 20,
"attribution" => "$attribution_stadia $attribution_openmaptiles $attribution_osm",
"referrerPolicy" => "origin",
)
)
stadia_stamen_toner_tile_layer = TileLayer(
"https://tiles.stadiamaps.com/tiles/stamen_toner/{z}/{x}/{y}{r}.png",
Dict(
"maxZoom" => 20,
"attribution" => "$attribution_stadia $attribution_stamen $attribution_openmaptiles $attribution_osm",
"referrerPolicy" => "origin",
)
)
stadia_stamen_watercolor_tile_layer = TileLayer(
"https://tiles.stadiamaps.com/tiles/stamen_watercolor/{z}/{x}/{y}.jpg",
Dict(
"maxZoom" => 16,
"attribution" => "$attribution_stadia $attribution_stamen $attribution_openmaptiles $attribution_osm",
"referrerPolicy" => "origin",
)
)
"""
Tile layers that retrieve tiles from Stadia Maps.
See [the documentation of Stadia Maps](https://docs.stadiamaps.com/) for more information about their terms of service.
## Styles
- `osm_bright`: similar to the OpenStreetMap layout.
- `outdoors`: similar to `osm_bright`, but puts more focus on things like parks, hiking trails, mountains, etc.
- `stamen_toner`: a high-contrast, black and white style.
- `stamen_watercolor`: looks like a watercolour painting.
## Authentication
Requests to Stadia Maps are not authenticated and do not contain an API key.
At the time of writing, Stadia Maps allows unauthenticated requests from `localhost`, such as those from a local Pluto notebook. If you want to host your notebook online, you should request an API key from Stadia Maps and create a `TileLayer` that uses your API key.
"""
stadia_tile_layers = (
osm_bright = stadia_osm_bright_tile_layer,
outdoors = stadia_outdoors_tile_layer,
stamen_toner = stadia_stamen_toner_tile_layer,
stamen_watercolor = stadia_stamen_watercolor_tile_layer,
)
md"""
Click the eye icon to see this content.
"""
end
# βββ‘ e377c1ff-e618-4192-b351-9aad766d3e69
md"""
## Map
"""
# βββ‘ 87b5ce4e-756e-44b2-a725-451788c234bd
md"### Some useful types"
# βββ‘ 0f7abed3-56e3-4492-8592-5df441652314
"""
`LatLng` is a type alias for `Tuple{Number, Number}`.
Example:
`(0, 0)`
"""
LatLng = Tuple{Number, Number}
# βββ‘ 9c8ec369-8e24-4a9e-af45-0bd73946302d
"""
A `Path` contains some of the available options to render elements.
In JS, it is a class that other types inherit from.
Here in Julia, we use composition of this types inside the elements.
It contains:
- `stroke`: `true` to see the outline
- `color`: any `String` that is a valid CSS color will color the stroke
- `weight`: a `Number` for the width of the stroke
- `fill`: `true` to fill in the shape
- `fillColor`: any `String` that is a valid CSS color will color the fill
"""
@kwdef struct Path
stroke::Bool = true
color::String = "#3388ff"
weight::Number = 3
fill::Bool = true
fillColor::String = "#3388ff"
end
# βββ‘ a9e6e3d6-c607-4c3c-851d-8c1acf06322a
md"""
### Elements
These are things that can be put on to the map.
"""
# βββ‘ 50926132-f8e5-446c-ba18-ad0c275b1b94
"""
A `Marker` is a location to put a pin on the `Map`
"""
struct Marker
center::LatLng
end
# βββ‘ 8b3724ce-a96f-4aec-ab8b-4ac811ff0ce6
"""
A `Polyline` is a line to display on the `Map`
"""
@kwdef struct Polyline
latlngs::Vector{LatLng}
path::Path = Path()
end
# βββ‘ 708af5fa-86b9-4f48-ab3b-bd58ee9f8c0f
"""
A `Polygon` is a polygon or shape to display on the `Map`
"""
@kwdef struct Polygon
latlngs::Vector{LatLng}
path::Path = Path()
end
# βββ‘ e4583488-4266-4ecd-87c0-6cb07b7908d2
"""
A `Circle` is a circle to display on the `Map`
"""
@kwdef struct Circle
center::LatLng
radius::Number
path::Path = Path()
end
# βββ‘ a258ecde-61ee-4e21-8afa-e55c45bcad77
"""
A `MapOption` contains some of the available options to customize the `Map`.
It contains:
- `zoomControl`: `true` displays the zoom control buttons
- `doubleClickZoom`: `true` allows to double-click to zoom
- `scrollWheelZoom`: `true` allows the scroll-wheel to zoom
- `dragging`: `true` allows the mouse to drag the map around
"""
@kwdef struct MapOption
zoomControl::Bool = true
doubleClickZoom::Bool = true
scrollWheelZoom::Bool = true
dragging::Bool = true
end
# βββ‘ 28e0f702-2791-49f1-9620-caa126ffded6
"""
The `staticMapOption` is a function that returns a `MapOption` which creates a completly static `Map` rendering.
"""
function staticMapOption()::MapOption
MapOption(false, false, false, false)
end
# βββ‘ eb8bcc8a-1125-40e9-be98-8a4a4a3cf847
"""
A `Map` is a representation of map to be rendered.
It contains:
- `center`: a `LatLng` to center the map
- `zoom`: a `Number` for the zoom amount `1` is wide and `10+` is zoomed in
- `tile`: a `TileLayer` to change the look
- `height`: an `Integer` to set the height
- `lines`: a `Vector{Polyline}` for lines to display
- `polygons`: a `Vector{Polygon}` for polygons to display
- `circles`: a `Vector{Circle}` for circles to display
- `markers`: a `Vector{Marker}` for markers to display
- `option`: a `MapOption` to configure
"""
@kwdef mutable struct Map
center::LatLng
zoom::Number = 4
tile::TileLayer= osm_tile_layers.standard
height::Integer = 500
lines::Vector{Polyline} = []
polygons::Vector{Polygon} = []
circles::Vector{Circle} = []
markers::Vector{Marker} = []
option::MapOption = MapOption()
end
# βββ‘ 7b64daba-0701-4981-b0d0-015b4126d16a
md"""
### Custom @htl rendering
The following cells define custom rendering methods for the map rendering to work.
We extend `Base.show` and `HypertextLiteral.print_script`
"""
# βββ‘ 704362e3-be7f-43b4-8d45-80177668b86b
"""
The function `to_dict` is a little helper that is used for rendering struct correctly in JS.
"""
function to_dict(mo)::Dict{String, Any}
Dict(String(key) => getfield(mo, key) for key in propertynames(mo))
end
# βββ‘ af83beb6-1f07-4ef6-9fca-d6fa2ff2d2e1
function Base.show(io::IO, m::MIME"text/javascript", p::Polyline)
tio1, tio2 = (IOBuffer(), IOBuffer())
HypertextLiteral.print_script(tio1, p.latlngs)
HypertextLiteral.print_script(tio2, to_dict(p.path))
print(io, "L.polyline($(String(take!(tio1))), $(String(take!(tio2)))).addTo(zeMap);")
end
# βββ‘ 62bfb5e4-5a87-4391-90bd-137ff2d8d1ad
function HypertextLiteral.print_script(io::IO, value::Vector{Marker})
foreach(p -> Base.show(io, MIME("text/javascript"), p), value)
end
# βββ‘ 29defb6b-1f55-4e94-a481-d735e2a882e3
function Base.show(io::IO, m::MIME"text/javascript", p::Marker)
tio1 = IOBuffer()
HypertextLiteral.print_script(tio1, p.center)
print(io, "L.marker($(String(take!(tio1)))).addTo(zeMap);")
end
# βββ‘ 8d278e04-ffec-4730-b0f6-c01c91bd06dd
function HypertextLiteral.print_script(io::IO, value::Vector{Polyline})
foreach(p -> Base.show(io, MIME("text/javascript"), p), value)
end
# βββ‘ 8e7197d6-1432-4302-ac47-75d04652a5b2
function Base.show(io::IO, m::MIME"text/javascript", p::Circle)
tio1, tio2 = (IOBuffer(), IOBuffer())
HypertextLiteral.print_script(tio1, p.center)
HypertextLiteral.print_script(tio2, to_dict(p.path))
print(io, "L.circle($(String(take!(tio1))), { radius: $(p.radius), ...$(String(take!(tio2))) }).addTo(zeMap);")
end
# βββ‘ 4bf06d7d-3705-4c7d-bae6-aa3cb92c6b99
function HypertextLiteral.print_script(io::IO, value::Vector{Circle})
foreach(p -> Base.show(io, MIME("text/javascript"), p), value)
end
# βββ‘ 91802110-663e-4fb4-9167-c68af3561dbf
"""
"""
function Base.show(io::IO, m::MIME"text/javascript", p::Polygon)
tio1, tio2 = (IOBuffer(), IOBuffer())
HypertextLiteral.print_script(tio1, p.latlngs)
HypertextLiteral.print_script(tio2, to_dict(p.path))
print(io, "L.polygon($(String(take!(tio1))), $(String(take!(tio2)))).addTo(zeMap);")
end
# βββ‘ 7a563f43-4a2e-4e27-b153-ca33e126dc90
"""
"""
function HypertextLiteral.print_script(io::IO, value::Vector{Polygon})
foreach(p -> Base.show(io, MIME("text/javascript"), p), value)
end
# βββ‘ 54ab5a53-b023-427f-8918-cbd2abccc2da
md"## Map building"
# βββ‘ b10f75b0-b3be-4950-9644-127190284512
begin
"""
The `build` function takes a `Map` and a element and adds the element to its list of elements of the corresponding type.
"""
function build end
"""
Use this `build` to add a `Polyline` to your `Map`'s lines.
"""
function build(m::Map, l::Polyline)
m.lines = [m.lines ; l]
end
"""
Use this `build` to add a `Polygon` to your `Map`'s polygons.
"""
function build(m::Map, p::Polygon)
m.polygons = [m.polygons ; p]
end
"""
Use this `build` to add a `Circle` to your `Map`'s circles.
"""
function build(m::Map, c::Circle)
m.circles = [m.circles ; c]
end
"""
Use this `build` to add a `Marker` to your `Map`'s markers.
"""
function build(m::Map, k::Marker)
m.markers = [m.markers ; k]
end
end
# βββ‘ 2cc5f89f-042d-436c-bbbc-6be1a1d61e6c
md"## Render a map"
# βββ‘ 095c821d-60ee-401c-a51d-705a9ddd8e68
"""
The `leaflet` function takes a `Map` and renders it to the cell in Pluto.
It returns the `HypertextLiteral.Result` types.
"""
function leaflet(m::Map)
@htl("""
<div>
<link rel="stylesheet" href="https://unpkg.com/[email protected]/dist/leaflet.css"
integrity="sha256-p4NxAoJBhIIN+hmNHrzRCf9tD/miZyoHS5obTRR9BMY="
crossorigin=""/>
<script src="https://unpkg.com/[email protected]/dist/leaflet.js"
integrity="sha256-20nQCchB9co0qIjJZRGuk2/Z9VM+kNiyxNV1lvTlZBo="
crossorigin=""></script>
<div id="map"></div>
<style>
#map { height: $(m.height)px; }
</style>
<script>
var parent = currentScript.parentElement;
var mapElement = parent.querySelector("#map");
var zeMap = L.map(mapElement, $(to_dict(m.option)))
.setView([$(m.center[1]), $(m.center[2])], $(m.zoom));
L.tileLayer($(m.tile.url), $(m.tile.options)).addTo(zeMap);
$(m.lines)
$(m.polygons)
$(m.circles)
$(m.markers)
</script>
</div>
""")
end
# βββ‘ 96fbe075-8dfd-44cf-88a0-489bc361e62c
md"## Demo"
# βββ‘ 0edd50fd-e72f-4b36-869c-31d5465ac29a
begin
m = Map(center = (0, 0), option = staticMapOption())
build(m, Polyline(latlngs = [(0, 5), (0, 10), (0, 15)]))
build(m, Polyline(latlngs = [(5, 0), (10, 0), (15, 0)], path = Path(color = "#88ff33")))
build(m, Polygon(latlngs = [(2, 6), (6, 6), (6, 10), (2, 10)]))
build(m, Circle(center = (-4, -4), radius = 500_000, path = Path(color = "red")))
build(m, Marker((-4, -4)))
leaflet(m)
end
# βββ‘ 7a84c7e1-8d2d-4742-81bf-a35be540b517
begin
m2 = Map(center = (46.81411097653479, -71.20089272116749), option = staticMapOption(), zoom = 12, tile=stadia_tile_layers.outdoors)
build(m2, Marker((46.81411097653479, -71.20089272116749)))
leaflet(m2)
end
# βββ‘ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
HypertextLiteral = "ac1192a8-f4b3-4bfe-ba22-af5b92cd3ab2"
[compat]
HypertextLiteral = "~0.9.5"
"""
# βββ‘ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.10.5"
manifest_format = "2.0"
project_hash = "5b37abdf7398dc5da4cd347d0609990238d895bb"
[[deps.HypertextLiteral]]
deps = ["Tricks"]
git-tree-sha1 = "7134810b1afce04bbc1045ca1985fbe81ce17653"
uuid = "ac1192a8-f4b3-4bfe-ba22-af5b92cd3ab2"
version = "0.9.5"
[[deps.Tricks]]
git-tree-sha1 = "7822b97e99a1672bfb1b49b668a6d46d58d8cbcb"
uuid = "410a4b4d-49e4-4fbc-ab6d-cb71b17b3775"
version = "0.1.9"
"""
# βββ‘ Cell order:
# ββbaef2610-79dc-11ef-12f2-ad52aa82fd14
# ββe51f2cf3-7e90-4489-a5eb-59aefbdfc3db
# β β7c52b98c-3617-4ad4-bf12-930468793f89
# ββ8db92502-e6cf-493b-9ade-a9f7bcf4ba70
# ββ2ed6631f-d845-48fb-be56-e4cffab14295
# ββe377c1ff-e618-4192-b351-9aad766d3e69
# ββ87b5ce4e-756e-44b2-a725-451788c234bd
# β β0f7abed3-56e3-4492-8592-5df441652314
# β β9c8ec369-8e24-4a9e-af45-0bd73946302d
# ββa9e6e3d6-c607-4c3c-851d-8c1acf06322a
# β β50926132-f8e5-446c-ba18-ad0c275b1b94
# β β8b3724ce-a96f-4aec-ab8b-4ac811ff0ce6
# β β708af5fa-86b9-4f48-ab3b-bd58ee9f8c0f
# β βe4583488-4266-4ecd-87c0-6cb07b7908d2
# β βa258ecde-61ee-4e21-8afa-e55c45bcad77
# β β28e0f702-2791-49f1-9620-caa126ffded6
# β βeb8bcc8a-1125-40e9-be98-8a4a4a3cf847
# ββ7b64daba-0701-4981-b0d0-015b4126d16a
# β β91802110-663e-4fb4-9167-c68af3561dbf
# β β7a563f43-4a2e-4e27-b153-ca33e126dc90
# β β8e7197d6-1432-4302-ac47-75d04652a5b2
# β β4bf06d7d-3705-4c7d-bae6-aa3cb92c6b99
# β βaf83beb6-1f07-4ef6-9fca-d6fa2ff2d2e1
# β β8d278e04-ffec-4730-b0f6-c01c91bd06dd
# β β29defb6b-1f55-4e94-a481-d735e2a882e3
# β β62bfb5e4-5a87-4391-90bd-137ff2d8d1ad
# β β704362e3-be7f-43b4-8d45-80177668b86b
# ββ54ab5a53-b023-427f-8918-cbd2abccc2da
# β βb10f75b0-b3be-4950-9644-127190284512
# ββ2cc5f89f-042d-436c-bbbc-6be1a1d61e6c
# β β095c821d-60ee-401c-a51d-705a9ddd8e68
# ββ96fbe075-8dfd-44cf-88a0-489bc361e62c
# β β0edd50fd-e72f-4b36-869c-31d5465ac29a
# β β7a84c7e1-8d2d-4742-81bf-a35be540b517
# ββ00000000-0000-0000-0000-000000000001
# ββ00000000-0000-0000-0000-000000000002
| LeafletPluto | https://github.com/florianfmmartin/LeafletPluto.jl.git |
|
[
"MIT"
] | 0.1.0 | a537ef077e3b83cefbf0cfe6e8d73523e8c17cb7 | code | 617 | using LeafletPluto
using Test
using HypertextLiteral
@testset "LeafletPluto.jl" begin
@testset "rendering works" begin
m = Map(center = (0, 0), option = staticMapOption())
build(m, Polyline(latlngs = [(0, 5), (0, 10), (0, 15)]))
build(m, Polyline(latlngs = [(5, 0), (10, 0), (15, 0)], path = Path(color = "#88ff33")))
build(m, Polygon(latlngs = [(2, 6), (6, 6), (6, 10), (2, 10)]))
build(m, Circle(center = (-4, -4), radius = 500_000, path = Path(color = "red")))
build(m, Marker((-4, -4)))
@test typeof(leaflet(m)) == HypertextLiteral.Result
end
end
| LeafletPluto | https://github.com/florianfmmartin/LeafletPluto.jl.git |
|
[
"MIT"
] | 0.1.0 | a537ef077e3b83cefbf0cfe6e8d73523e8c17cb7 | docs | 2423 | # LeafletPluto.jl
[](https://github.com/florianfmmartin/LeafletPluto.jl/actions/workflows/CI.yml?query=branch%3Amain)
A simple map widget for Pluto.jl notebooks. It creates a map using [Leaflet](https://leafletjs.com/).

## Prerequisites
LeafletPluto is a package for [Julia](https://julialang.org/). It is is designed to be used in [Pluto notebooks](https://github.com/fonsp/Pluto.jl). If you are using Pluto, you're ready to use this package!
## Usage
Basic usage looks like this:
```julia
using LeafletPluto
# create a map
m = Map(center = (0, 0), option = staticMapOption())
# add a few elements to it
build(m, Polyline(latlngs = [(0, 5), (0, 10), (0, 15)]))
build(m, Polyline(latlngs = [(5, 0), (10, 0), (15, 0)], path = Path(color = "#88ff33")))
build(m, Polygon(latlngs = [(2, 6), (6, 6), (6, 10), (2, 10)]))
build(m, Circle(center = (-4, -4), radius = 500_000, path = Path(color = "red")))
build(m, Marker((-4, -4)))
# display it
leaflet(m)
```
### Tile layer
*This is inspired from PlutoMapPicker.jl*
Maps use a _raster tile layer_ to show the actual map. This layer is built of images of the world map. To load in these tiles as needed, the map must request the tiles from an API.
The default setting will request tiles from [Open Street Map](https://openstreetmap.org), but you can change this setting. The package also includes some ready-to-go configurations for [Stadia Maps](https://stadiamaps.com/). For example:
```julia
Map(center=(0, 0), tile=stadia_tile_layers.outdoors)
```
You can also create a custom `TileLayer` to use a different server or make requests with an API key.
Please note that PlutoMapPicker & LeafletPluto are not affiliated with Open Street Map or Stadia Maps. The `TileLayer` configurations for these services are provided for convenience, but it is up to you whether the way you're using these services complies with their usage policy. See [Open Street Map's usage policy](https://operations.osmfoundation.org/policies/tiles/) and [Stadia Map's documentation](https://docs.stadiamaps.com/) for more information.
## Licence
This package is shared under an MIT licence. See [LICENSE](./LICENSE) for more information.
| LeafletPluto | https://github.com/florianfmmartin/LeafletPluto.jl.git |
|
[
"MIT"
] | 0.1.0 | 7331fdf3771d283784487c57d10dc224138105b0 | code | 133 | module ArrayRotations
include("utils.jl")
include("auxrot.jl")
include("rev.jl")
include("bridge.jl")
include("gries-mills.jl")
end
| ArrayRotations | https://github.com/sadit/ArrayRotations.jl.git |
|
[
"MIT"
] | 0.1.0 | 7331fdf3771d283784487c57d10dc224138105b0 | code | 715 | struct AuxRotation{T}
buff::Vector{T} # each Thread must have an AuxRotation struct
end
export AuxRotation
export rotate!
"""
rotate!(::AuxRotation, A::AbstractVector, tailpos::Integer)
Rotates `A` on `tailpos` using at most `n / 2` extra memory, 3n/2 copy-write ops.
"""
function rotate!(aux::AuxRotation, A::AbstractVector, tailpos::Integer)
m = tailpos - 1
n = length(A) - m
if m <= n
resize!(aux.buff, m)
_copy!(aux.buff, 1, A, 1, m)
_left_shift!(A, tailpos)
_copy!(A, n+1, aux.buff, 1, m)
else
resize!(aux.buff, n)
_copy!(aux.buff, 1, A, tailpos, n)
_right_shift!(A, m)
_copy!(A, 1, aux.buff, 1, n)
end
A
end
| ArrayRotations | https://github.com/sadit/ArrayRotations.jl.git |
|
[
"MIT"
] | 0.1.0 | 7331fdf3771d283784487c57d10dc224138105b0 | code | 2195 | struct BridgeRotation{T}
buff::Vector{T}
end
export BridgeRotation
"""
rotate!(bridge::BridgeRotation, A::AbstractVector, tailpos::Integer)
Rotates `A` on `tailpos` using a small extra memory. β€ a third of A and β€ than n+n/3 copy-write ops.
"""
function rotate!(bridge::BridgeRotation, A::AbstractVector, tailpos::Integer)
m = tailpos - 1
n = length(A) - m
if m <= n
_interchange_right_large!(A, bridge.buff, tailpos)
else
_interchange_left_large!(A, bridge.buff, tailpos)
end
A
end
"""
_interchange_left_large!(A::AbstractVector, buff::AbstractVector, tail::Integer)
Interchange and shift two subarrays
```
buffer
-----
4 5 6 7 8 9 1 2 3
----- |
head tail
```
must produce
```
prev head
-----
1 2 3 4 5 6 1 2 3
----- -----
prev tail untouched
```
and finally, the buffer should be copied-back
```
prev head
-----
1 2 3 4 5 6 7 8 9
----- -----
prev tail from buffer
```
"""
@inline function _interchange_left_large!(A::AbstractVector, buff::AbstractVector, tail::Integer)
m = length(A)
n = m - tail + 1 # number of elements
p = 2n
resize!(buff, m - p)
_copy!(buff, 1, A, n + 1, length(buff))
@inbounds for i in 1:n
n += 1
A[n] = A[i]
A[i] = A[tail]
tail += 1
end
_copy!(A, p + 1, buff, 1, length(buff))
A
end
"""
_interchange_right_large!(A::AbstractVector, buff::AbstractVector, tail::Integer)
Interchange and shift two subarrays
```
head buffer
----- -----
7 8 9 1 2 3 4 5 6
|
tail
```
must produce
```
prev tail prev head
----- -----
1 2 3 1 2 3 7 8 9
-----
untouched
```
The centering block is then filled with the buffer.
"""
@inline function _interchange_right_large!(A::AbstractVector, buff::AbstractVector, tail::Integer)
m = length(A)
n = tail - 1 # number of elements
p = 2n
resize!(buff, m - p)
_copy!(buff, 1, A, p + 1, length(buff))
@inbounds for i in n:-1:1
A[m] = A[i]
A[i] = A[p]
p -= 1
m -= 1
end
_copy!(A, n + 1, buff, 1, length(buff))
A
end | ArrayRotations | https://github.com/sadit/ArrayRotations.jl.git |
|
[
"MIT"
] | 0.1.0 | 7331fdf3771d283784487c57d10dc224138105b0 | code | 943 | struct GriesMillsRotation end
export GriesMillsRotation
"""
rotate!(aux::GriesMillsRotation, A::AbstractVector, tailpos::Integer)
Rotates `A` on `tailpos` using a O(1) extra memory
"""
function rotate!(::GriesMillsRotation, A::AbstractVector, tailpos::Integer)
sp = 1
ep = length(A)
while sp < tailpos #sp < ep
mid = (ep+sp) >> 1
if tailpos > mid # right is shorter
n = ep - tailpos + 1
@inbounds for i in 0:n-1
tmp = A[sp+i]
A[sp+i] = A[tailpos+i]
A[tailpos+i] = tmp
end
sp = sp + n
else # left is shorter
n = tailpos - sp
p = ep - n + 1
@inbounds for i in 0:n-1
tmp = A[sp+i]
A[sp+i] = A[p+i]
A[p+i] = tmp
end
ep = p - 1
tailpos = sp + n
end
end
A
end
| ArrayRotations | https://github.com/sadit/ArrayRotations.jl.git |
|
[
"MIT"
] | 0.1.0 | 7331fdf3771d283784487c57d10dc224138105b0 | code | 389 | struct RevRotation end
export RevRotation
"""
rotate!(::RevRotation, A::AbstractVector, tailpos::Integer)
Rotates `A` on `tailpos` using ``O(1)`` extra memory, yet using several 2n copy-write ops (triple reversing)
"""
function rotate!(::RevRotation, A::AbstractVector, tailpos::Integer)
reverse!(A, 1, tailpos - 1)
reverse!(A, tailpos, length(A))
reverse!(A)
A
end
| ArrayRotations | https://github.com/sadit/ArrayRotations.jl.git |
|
[
"MIT"
] | 0.1.0 | 7331fdf3771d283784487c57d10dc224138105b0 | code | 1159 |
"""
_copy!(dst::AbstractVector, dst_sp::Integer, src::AbstractVector, src_sp::Integer, n::Integer)
Copies `n` contiguous elements from `src` (starting at `src_sp`) into `dst` (starting at `dst_sp`).
"""
@inline function _copy!(dst::AbstractVector, dst_sp::Integer, src::AbstractVector, src_sp::Integer, n::Integer)
@inbounds for i in 1:n
dst[dst_sp] = src[src_sp]
dst_sp += 1
src_sp += 1
end
dst
end
"""
_left_shift!(A::AbstractVector, sp)
Shifts `A[sp:end]` to the beggining of `A`
"""
@inline function _left_shift!(A::AbstractVector, sp)
i = 1
@inbounds for j in sp:length(A)
A[i] = A[j]
i += 1
end
A
end
"""
_right_shift!(A::AbstractVector, ep)
Shifts `A[1:ep]` to the end of `A`
"""
@inline function _right_shift!(A::AbstractVector, ep)
i = length(A)
@inbounds for j in ep:-1:1
A[i] = A[j]
i -= 1
end
A
end
#=
@inline function _interchange!(A::AbstractVector, head, tail, n)
@inbounds for i in 1:n
tmp = A[head]
A[head] = A[tail]
A[tail] = tmp
head += 1
tail += 1
end
A
end
=# | ArrayRotations | https://github.com/sadit/ArrayRotations.jl.git |
|
[
"MIT"
] | 0.1.0 | 7331fdf3771d283784487c57d10dc224138105b0 | code | 2259 | using ArrayRotations
using ArrayRotations: _copy!, _left_shift!, _right_shift!, _interchange_left_large!, _interchange_right_large!
using Test
@testset "_copy!" begin
A = [7, 8, 9, 10, 1, 2, 3, 4, 5, 6]
B = zeros(Int, 20)
_copy!(B, 1, A, 1, 10)
@test B == [7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
reverse!(A)
_copy!(B, 11, A, 1, 10)
@test B == [7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1, 10, 9, 8, 7]
B = zeros(Int, 100)
A = collect(1:100)
for i in 1:96
_copy!(B, i, A, i, 5)
end
@test A == B
end
@testset "_*_shift!" begin
A = [7, 8, 9, 10, 1, 2, 3, 4, 5, 6]
@test _left_shift!(A, 5) == [1, 2, 3, 4, 5, 6, 3, 4, 5, 6]
A = [7, 8, 9, 10, 1, 2, 3, 4, 5, 6]
_right_shift!(A, 4) == [7, 8, 9, 10, 1, 2, 7, 8, 9, 10]
end
@testset "AuxRotation" begin
aux = AuxRotation(Int[])
A = [7, 8, 9, 10, 1, 2, 3, 4, 5, 6]
@test rotate!(aux, A, 5) == collect(1:10)
A = [7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6]
@test rotate!(aux, A, 10) == collect(1:15)
end
@testset "RevRotation" begin
aux = RevRotation()
A = [7, 8, 9, 10, 1, 2, 3, 4, 5, 6]
@test rotate!(aux, A, 5) == collect(1:10)
A = [7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6]
@test rotate!(aux, A, 10) == collect(1:15)
end
#=
@testset "Interchange_*_large" begin
A = [4, 5, 6, 7, 8, 9, 1, 2, 3]
@test _interchange_left_large!(copy(A), 7) == [1, 2, 3, 4, 5, 6, 1, 2, 3]
A = [7, 8, 9, 1, 2, 3, 4, 5, 6]
@test _interchange_right_large!(copy(A), 4) == [1, 2, 3, 1, 2, 3, 7, 8, 9]
end
=#
@testset "BridgeRotation" begin
bridge = BridgeRotation(Int[])
A = [7, 8, 9, 10, 1, 2, 3, 4, 5, 6]
@test rotate!(bridge, A, 5) == collect(1:10)
A = [7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6]
@test rotate!(bridge, A, 10) == collect(1:15)
end
@testset "GriesMillsRotation" begin
gm = GriesMillsRotation()
A = [3, 1, 2]
@test rotate!(gm, A, 2) == collect(1:3)
A = [2, 3, 1]
@test rotate!(gm, A, 3) == collect(1:3)
A = [7, 8, 9, 10, 1, 2, 3, 4, 5, 6]
@test rotate!(gm, A, 5) == collect(1:10)
A = [7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6]
@test rotate!(gm, A, 10) == collect(1:15)
end | ArrayRotations | https://github.com/sadit/ArrayRotations.jl.git |
|
[
"MIT"
] | 0.1.0 | 7331fdf3771d283784487c57d10dc224138105b0 | docs | 508 | # Array rotations
A small package with array rotation algorithms (block swap). Only 1D arrays (vectors).
## Usage:
Call `rotate!(rot::Rotation, A::AbstractVector, tailpos::Integer)` to rotate the array `A` on the given tail position.
A few algorithms are implemented:
- AuxRotation: rotates using at most n/2 extra memory
- BridgeRotation: rotates using at most n/3 extra memory
- RevRotation: rotates using O(1) extra memory
- GriesMillsRotation: Gries Mills algorithm, rotates using O(1) extra memory | ArrayRotations | https://github.com/sadit/ArrayRotations.jl.git |
|
[
"MIT"
] | 0.1.0 | 73ff501c057bda1c12679545d85ecf11b2765b19 | code | 618 | using ElementarySymmetricFunctions
using Documenter
makedocs(;
modules=[ElementarySymmetricFunctions],
authors="Benjamin Deonovic",
repo="https://github.com/bdeonovic/ElementarySymmetricFunctions.jl/blob/{commit}{path}#L{line}",
sitename="ElementarySymmetricFunctions.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://bdeonovic.github.io/ElementarySymmetricFunctions.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/bdeonovic/ElementarySymmetricFunctions.jl",
)
| ElementarySymmetricFunctions | https://github.com/bdeonovic/ElementarySymmetricFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 73ff501c057bda1c12679545d85ecf11b2765b19 | code | 372 | module ElementarySymmetricFunctions
import DSP.filt!
import FFTW.fft!
import SpecialFunctions.logbeta
include("util.jl")
include("esf.jl")
include("poisbin.jl")
export esf_sum, esf_sum_reg, esf_dc_fft, esf_dc_group, esf_dc_group_reg
export poisbin_sum_taub, poisbin_fft, poisbin_chen, poisbin_dc_fft, poisbin_dc_group, poisbin_fft_cf
end
| ElementarySymmetricFunctions | https://github.com/bdeonovic/ElementarySymmetricFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 73ff501c057bda1c12679545d85ecf11b2765b19 | code | 8628 | function esf_sum!(S::AbstractArray{T,1}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S,zero(T))
S[1] = one(T)
@inbounds for col in 1:n
for r in 1:col
row = col - r + 1
S[row+1] = x[col] * S[row] + S[row+1]
end
end
end
"""
esf_sum(x)
Compute the elementary symmetric functions of order k = 1, ..., n
where n = length(x)
# Examples
```julia-repl
julia> esf_sum([3.5118, .6219, .2905, .8450, 1.8648])
6-element Array{Float64,1}:
1.0
7.134
16.9493
16.7781
7.05289
0.999736
```
"""
function esf_sum(x::AbstractArray{T,1}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
esf_sum!(S,x)
return S
end
function esf_sum_reg!(S::AbstractArray{T,1}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S,zero(T))
S[1] = one(T)
@inbounds for col in 1:n
for r in 1:col
row = col - r + 1
S[row+1] = ((col-row)/col) * S[row+1] + (row/col) * x[col] * S[row]
end
end
end
"""
esf_sum_reg(x)
Compute the elementary symmetric functions of order k = 1, ..., n
where n = length(x). Values are computed regularized by the binomial
coefficient binomial(n, k) to prevent over/under-flow.
# Examples
```julia-repl
julia> esf_sum_reg([3.5118, .6219, .2905, .8450, 1.8648])
6-element Array{Float64,1}:
1.0
1.4268
1.69493
1.67781
1.41058
0.999736
```
"""
function esf_sum_reg(x::AbstractArray{T,1}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
esf_sum_reg!(S,x)
return S
end
#Regularized summation algorithm where one input is zeroed out (for computing derivatives)
function esf_sum_reg2!(S::AbstractArray{T,1}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S,zero(T))
S[1] = one(T)
adj = 0
@inbounds for col in 1:n
if x[col] == 0.0
adj += 1
continue
end
for r in 1:(col-adj)
row = (col-adj) - r + 1
S[row+1] = (col-adj-row)/(col-adj+1) * S[row+1] + (row+1)/(col-adj+1) * x[col] * S[row]
end
S[1] *= (col-adj)/(col-adj+1)
end
end
#Regularized summation algorithm where two inputs are zeroed out (for computing derivatives)
function esf_sum_reg3!(S::AbstractArray{T,1}, x::AbstractArray{T,1}) where T <: Real
n = length(x)
fill!(S,zero(T))
S[1] = one(T)
adj = 0
@inbounds for col in 1:n
if x[col] == 0.0
adj += 1
continue
end
for r in 1:(col-adj)
row = (col-adj) - r + 1
S[row+1] = (col-adj-row)/(col-adj+2) * S[row+1] + (row+2)/(col-adj+2) * x[col] * S[row]
end
S[1] *= (col-adj) / (col-adj+2)
end
end
function esf_sum_dervs_1(x::AbstractVector{T}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
P = Array{T,2}(undef,n,n+1)
esf_sum!(S, x)
esf_sum_dervs_1!(P, x)
return S, P
end
function esf_sum_dervs_1!(P::AbstractArray{T,2}, x::AbstractVector{T}) where T <: Real
n = length(x)
xj=zero(T)
@inbounds for j in 1:n
xj = x[j]
x[j] = zero(T)
@views esf_sum!(P[j,:], x)
x[j] = xj
end
end
function esf_sum_dervs_1_reg(x::AbstractVector{T}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
P = Array{T,2}(undef,n,n+1)
esf_sum_reg!(S, x)
esf_sum_dervs_1_reg!(P, x)
return S, P
end
function esf_sum_dervs_1_reg!(P::AbstractArray{T,2}, x::AbstractVector{T}) where T <: Real
n = length(x)
xj=zero(T)
@inbounds for j in 1:n
xj = x[j]
x[j] = zero(T)
@views esf_sum_reg2!(P[j,:], x)
x[j] = xj
end
end
function esf_sum_dervs_2(x::AbstractVector{T}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
H = Array{T,3}(undef,n,n,n+1)
esf_sum!(S, x)
esf_sum_dervs_2!(H, x)
return S, H
end
function esf_sum_dervs_2!(H::AbstractArray{T,3}, x::AbstractVector{T}) where T <: Real
n = length(x)
xj=zero(T)
@inbounds for j in 1:n
xj = x[j]
x[j] = zero(T)
for k in j:n
xk = x[k]
x[k] = zero(T)
@views esf_sum!(H[j,k,:], x)
H[k,j,:] .= H[j,k,:]
x[k] = xk
end
x[j] = xj
end
end
function esf_sum_dervs_2_reg(x::AbstractVector{T}) where T <: Real
n = length(x)
S = Vector{T}(undef,n+1)
H = Array{T,3}(undef,n,n,n+1)
esf_sum_reg!(S, x)
esf_sum_dervs_2_reg!(H, x)
return S, H
end
function esf_sum_dervs_2_reg!(H::AbstractArray{T,3}, x::AbstractVector{T}) where T <: Real
n = length(x)
xj=zero(T)
@inbounds for j in 1:n
xj = x[j]
x[j] = zero(T)
@views esf_sum_reg2!(H[j,j,:], x)
for k in j+1:n
xk = x[k]
x[k] = zero(T)
@views esf_sum_reg3!(H[j,k,:], x)
H[k,j,:] .= H[j,k,:]
x[k] = xk
end
x[j] = xj
end
end
function esf_dc_fft!(S::AbstractArray{T,1}, tempS::AbstractArray{T,2},
x::AbstractArray{T,1}, k::D,
group_sizes::AbstractArray{D,1},
group_start_idx::AbstractArray{D,1}) where {T <: Real, D <: Integer}
n = length(x)
M = size(tempS)[2]
tempS .= zero(T)
#convolve initial subsets
@inbounds for g in 1:M
@views esf_sum!(tempS[1:(group_sizes[g]+1),g],
x[group_start_idx[g]:(group_start_idx[g]+group_sizes[g]-1)])
group_sizes[g] += 1
end
while M > 1
next_avail_col = 1
@inbounds for g in 1:2:M
m = group_sizes[g] + group_sizes[g+1] - 1
@views filt!(S[1:m], tempS[1:group_sizes[g],g], one(T), tempS[1:m,g+1])
@views copyto!(tempS[1:m,next_avail_col], S[1:m])
group_sizes[next_avail_col] = m
next_avail_col += 1
end
M = div(M,2)
end
end
function esf_dc_fft(x::AbstractArray{T,1}, k::D=2) where {T <: Real, D <: Integer}
n = length(x)
k = min(floor(D, log2(n)), k)
M = 2^k
L = n/M
r = rem(n,M) / M
group_sizes = [fill(floor(D, L), D(M*(1-r))); fill(ceil(D, L), D(M*r))]
group_start_idx = cumsum(group_sizes) .- (group_sizes .- 1)
S = Vector{T}(undef,n+1)
tempS = zeros(T, n+1,M)
esf_dc_fft!(S, tempS, x, k, group_sizes, group_start_idx)
return S
end
function esf_dc_group!(S::AbstractArray{T,1}, tempS::AbstractArray{T,2},
x::AbstractArray{T,1}, k::D,
group_sizes::AbstractArray{D,1},
group_start_idx::AbstractArray{D,1}) where {T <: Real, D <: Integer}
n = length(x)
M = size(tempS)[2]
#convolve initial subsets
@inbounds for g in 1:M
@views esf_sum!(tempS[1:(group_sizes[g]+1),g],
x[group_start_idx[g]:(group_start_idx[g]+group_sizes[g]-1)])
group_sizes[g] += 1
end
while M > 1
next_avail_col = 1
@inbounds for g in 1:2:M
m = group_sizes[g] + group_sizes[g+1] - 1
@views join_groups!(S[1:m], tempS[1:group_sizes[g+1],g+1], tempS[1:group_sizes[g],g])
@views copyto!(tempS[1:m,next_avail_col], S[1:m])
group_sizes[next_avail_col] = m
next_avail_col += 1
end
M = div(M,2)
end
end
function esf_dc_group(x::AbstractArray{T,1}, k::D=2) where {T <: Real, D <: Integer}
n = length(x)
k = min(floor(D, log2(n)), k)
M = 2^k
L = n/M
r = rem(n,M) / M
group_sizes = [fill(floor(D, L), D(M*(1-r))); fill(ceil(D, L), D(M*r))]
group_start_idx = cumsum(group_sizes) .- (group_sizes .- 1)
S = Vector{T}(undef,n+1)
tempS = zeros(T, n+1,M)
esf_dc_group!(S, tempS, x, k, group_sizes, group_start_idx)
return S
end
function esf_dc_group_reg!(S::AbstractArray{T,1}, tempS::AbstractArray{T,2},
x::AbstractArray{T,1}, k::D,
group_sizes::AbstractArray{D,1},
group_start_idx::AbstractArray{D,1}) where {T <: Real, D <: Integer}
n = length(x)
M = size(tempS)[2]
tempS .= zero(T)
#convolve initial subsets
@inbounds for g in 1:M
@views esf_sum_reg!(tempS[1:(group_sizes[g]+1),g],
x[group_start_idx[g]:(group_start_idx[g]+group_sizes[g]-1)])
group_sizes[g] += 1
end
while M > 1
next_avail_col = 1
@inbounds for g in 1:2:M
m = group_sizes[g] + group_sizes[g+1] - 1
@views join_groups_reg!(S[1:m], tempS[1:group_sizes[g+1],g+1], tempS[1:group_sizes[g],g])
@views copyto!(tempS[1:m,next_avail_col], S[1:m])
group_sizes[next_avail_col] = m
next_avail_col += 1
end
M = div(M,2)
end
end
function esf_dc_group_reg(x::AbstractArray{T,1}, k::D=2) where {T <: Real, D <: Integer}
n = length(x)
k = min(floor(D, log2(n)), k)
M = 2^k
L = n/M
r = rem(n,M) / M
group_sizes = [fill(floor(D, L), D(M*(1-r))); fill(ceil(D, L), D(M*r))]
group_start_idx = cumsum(group_sizes) .- (group_sizes .- 1)
S = Vector{T}(undef,n+1)
tempS = zeros(T, n+1,M)
esf_dc_group_reg!(S, tempS, x, k, group_sizes, group_start_idx)
return S
end
| ElementarySymmetricFunctions | https://github.com/bdeonovic/ElementarySymmetricFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 73ff501c057bda1c12679545d85ecf11b2765b19 | code | 6791 | function poisbin_fft!(S::AbstractArray{T,1}, y::AbstractArray{Complex{T},1},
p::AbstractArray{T,1}) where T <: Real
n = length(p)
omega = 2pi/(n+1)
@inbounds for k in 0:n
for m in 1:n
y[k+1] *= p[m] * exp(im * omega * k) + (1-p[m])
end
end
fft!(y)
S .= real.(y) / (n+1)
end
function poisbin_fft(p::AbstractArray{T,1}) where T <: Real
n = length(p)
S = Vector{T}(undef,n+1)
y = ones(Complex{T}, n+1)
poisbin_fft!(S, y, p)
return S
end
#implementation from Distributions.jl to compare
function poisbin_fft_cf!(S::AbstractArray{T,1}, y::AbstractArray{Complex{T},1},
z::AbstractArray{Complex{T},1},
p::AbstractArray{T,1}) where T<: Real
n = length(p)
y[1] = one(Complex{T}) / (n+1)
omega = 2 * one(T) / (n+1)
kmax = ceil(Int, n/2)
@inbounds for k in 1:kmax
logz = zero(T)
argz = zero(T)
for j in 1:n
zjl = 1 - p[j] + p[j] * cospi(omega*k) + im * p[j] * sinpi(omega*k)
logz += log(abs(zjl))
argz += atan(imag(zjl), real(zjl))
end
dl = exp(logz)
y[k+1] = dl * cos(argz) / (n+1) + im * dl * sin(argz) / (n+1)
if n + 1 - k > k
y[n + 1 - k + 1] = conj(y[k+1])
end
end
_dft!(z, y)
S .= real.(z)
end
# A simple implementation of a DFT to avoid introducing a dependency
# on an external FFT package just for this one distribution
function _dft!(y::Vector{T}, x::Vector{T}) where T
n = length(x)
y .= zero(T)
@inbounds for j = 0:n-1, k = 0:n-1
y[k+1] += x[j+1] * cis(-Ο * T(2 * mod(j * k, n)) / n)
end
end
function poisbin_fft_cf(p::AbstractArray{T,1}) where T <: Real
n = length(p)
S = Vector{T}(undef,n+1)
y = ones(Complex{T}, n+1)
z = Vector{Complex{T}}(undef,n+1)
poisbin_fft_cf!(S, y, z, p)
return S
end
function poisbin_sum_taub!(S::AbstractArray{T}, p::AbstractArray{T}) where T <: Real
n = length(p)
fill!(S,zero(T))
S[1] = 1-p[1]
S[2] = p[1]
@inbounds for col in 2:n
for r in 1:col
row = col - r + 1
S[row+1] = (1-p[col])*S[row+1] + p[col] * S[row]
end
S[1] *= 1-p[col]
end
end
function poisbin_sum_taub_log!(S::AbstractArray{T}, p::AbstractArray{T}) where T <: Real
n = length(p)
fill!(S,-Inf)
S[1] = log(1-p[1])
S[2] = log(p[1])
@inbounds for col in 2:n
for r in 1:col
row = col - r + 1
Sr = S[row] + log(p[col])
Sr1 = S[row+1] + log(1-p[col])
if (Sr1 > Sr) && (Sr1 > zero(T))
S[row+1] = Sr1 + log1p(exp(Sr - Sr1))
elseif (Sr >= Sr1) && (Sr > zero(T))
S[row+1] = Sr + log1p(exp(Sr1-Sr))
else
S[row+1] = log(exp(Sr1) + exp(Sr))
end
end
S[1] += log(1-p[col])
end
end
function poisbin_sum_taub(p::AbstractArray{T,1}) where T <: Real
n = length(p)
S = Vector{T}(undef,n+1)
poisbin_sum_taub!(S,p)
return S
end
function poisbin_sum_taub_dervs_2(p::AbstractVector{T}) where T <: Real
n = length(p)
S = Vector{T}(undef,n+1)
H = Array{T,3}(undef,n,n,n+1)
poisbin_sum_taub!(S, p)
poisbin_sum_taub_dervs_2!(H, p)
return S, H
end
function poisbin_sum_taub_dervs_2!(H::AbstractArray{T,3}, p::AbstractVector{T}) where T <: Real
n = length(p)
pj=zero(T)
@inbounds for j in 1:n
pj = p[j]
p[j] = zero(T)
for k in j:n
pk = p[k]
p[k] = zero(T)
@views poisbin_sum_taub!(H[j,k,:], p)
H[k,j,:] .= H[j,k,:]
p[k] = pk
end
p[j] = pj
end
end
function poisbin_dc_fft!(S::AbstractArray{T,1}, tempS::AbstractArray{T,2},
p::AbstractArray{T,1}, k::D,
group_sizes::AbstractArray{D,1},
group_start_idx::AbstractArray{D,1}) where {T <: Real, D <: Integer}
n = length(p)
M = size(tempS)[2]
#convolve initial subsets
@inbounds for g in 1:M
@views poisbin_sum_taub!(tempS[1:(group_sizes[g]+1),g],
p[group_start_idx[g]:(group_start_idx[g]+group_sizes[g]-1)])
group_sizes[g] += 1
end
while M > 1
next_avail_col = 1
@inbounds for g in 1:2:M
m = group_sizes[g] + group_sizes[g+1] - 1
@views filt!(S[1:m], tempS[1:group_sizes[g],g], 1, tempS[1:m,g+1])
@views copyto!(tempS[1:m,next_avail_col], S[1:m])
group_sizes[next_avail_col] = m
next_avail_col += 1
end
M = div(M,2)
end
end
function poisbin_dc_fft(p::AbstractArray{T,1}, k::D=2) where {T <: Real, D <: Integer}
n = length(p)
k = min(floor(D, log2(n)), k)
M = 2^k
L = n/M
r = rem(n,M) / M
group_sizes = [fill(floor(D, L), D(M*(1-r))); fill(ceil(D, L), D(M*r))]
group_start_idx = cumsum(group_sizes) .- (group_sizes .- 1)
S = Vector{T}(undef,n+1)
tempS = zeros(T, n+1,M)
poisbin_dc_fft!(S, tempS, p, k, group_sizes, group_start_idx)
return S
end
function poisbin_dc_group!(S::AbstractArray{T,1}, tempS::AbstractArray{T,2},
p::AbstractArray{T,1}, k::D,
group_sizes::AbstractArray{D,1},
group_start_idx::AbstractArray{D,1}) where {T <: Real, D <: Integer}
n = length(p)
M = size(tempS)[2]
tempS .= zero(T)
#convolve initial subsets
@inbounds for g in 1:M
@views poisbin_sum_taub!(tempS[1:(group_sizes[g]+1),g],
p[group_start_idx[g]:(group_start_idx[g]+group_sizes[g]-1)])
group_sizes[g] += 1
end
while M > 1
next_avail_col = 1
@inbounds for g in 1:2:M
m = group_sizes[g] + group_sizes[g+1] - 1
@views join_groups!(S[1:m], tempS[1:group_sizes[g+1],g+1], tempS[1:group_sizes[g],g])
@views copyto!(tempS[1:m,next_avail_col], S[1:m])
group_sizes[next_avail_col] = m
next_avail_col += 1
end
M = div(M,2)
end
end
function poisbin_dc_group(p::AbstractArray{T,1}, k::D=2) where {T <: Real, D <: Integer}
n = length(p)
k = min(floor(D, log2(n)), k)
M = 2^k
L = n/M
r = rem(n,M) / M
group_sizes = [fill(floor(D, L), D(M*(1-r))); fill(ceil(D, L), D(M*r))]
group_start_idx = cumsum(group_sizes) .- (group_sizes .- 1)
S = Vector{T}(undef,n+1)
tempS = zeros(T, n+1,M)
poisbin_dc_group!(S, tempS, p, k, group_sizes, group_start_idx)
return S
end
function poisbin_chen!(S::AbstractArray{T,1}, P::AbstractArray{T,1},
p::AbstractArray{T,1}) where T <: Real
n = length(p)
S[1] = one(T)
@inbounds for i in 1:n
S[1] *= (1 - p[i])
for j in 1:n
P[j] += (p[i] / (1 - p[i])) ^ j
end
end
@inbounds for i in 2:(n+1)
k = i-1
for j in 1:k
S[i] += (-one(T))^(j-1) * S[k-j+1] * P[j]
end
S[i] /= k
end
end
function poisbin_chen(p::AbstractArray{T,1}) where T <: Real
n = length(p)
S = zeros(T, n+1)
P = zeros(T, n)
poisbin_chen!(S,P,p)
return S
end | ElementarySymmetricFunctions | https://github.com/bdeonovic/ElementarySymmetricFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 73ff501c057bda1c12679545d85ecf11b2765b19 | code | 959 | function join_groups!(S::AbstractArray{T,1}, gamma1::AbstractArray{T,1},
gamma2::AbstractArray{T,1}) where T <: Real
k1 = length(gamma1) - 1
k2 = length(gamma2) - 1
fill!(S, zero(T))
@inbounds for g in 1:(k1+k2+1)
for i in 1:g
S[g] += (g-i+1 > length(gamma2)) || (i > length(gamma1)) ?
0.0 : gamma1[i] * gamma2[g-i+1]
end
end
end
function join_groups_reg!(S::AbstractArray{T,1}, gamma1::AbstractArray{T,1},
gamma2::AbstractArray{T,1}) where T <: Real
k1 = length(gamma1) - 1
k2 = length(gamma2) - 1
fill!(S, zero(T))
S[1] = one(T)
@inbounds for g in 2:(k1+k2+1)
for i in 1:g
S[g] += (g-i+1 > length(gamma2)) || (i > length(gamma1)) ?
0.0 : gamma1[i] * gamma2[g-i+1] *
exp(lbinom(k1,i-1)+lbinom(k2,g-i)-lbinom(k1+k2,g-1))
end
end
end
function lbinom(n::T, s::T) where T <: Real
-log(n+1.0) - logbeta(n-s+1.0, s+1.0)
end | ElementarySymmetricFunctions | https://github.com/bdeonovic/ElementarySymmetricFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 73ff501c057bda1c12679545d85ecf11b2765b19 | code | 1232 | using ElementarySymmetricFunctions
using Test
function naive_esf(x::AbstractVector{T}) where T <: Real
n = length(x)
S = zeros(T, n+1)
states = hcat(reverse.(digits.(0:2^n-1,base=2,pad=n))...)'
r_states = vec(mapslices(sum, states, dims=2))
for r in 0:n
idx = findall(r_states .== r)
S[r+1] = sum(mapslices(x->prod(x[x .!= 0]), states[idx, :] .* x', dims=2))
end
return S
end
function naive_pb(p::AbstractVector{T}) where T <: Real
x = p ./ (1 .- p)
naive_esf(x) * prod(1 ./ (1 .+ x))
end
@testset "ElementarySymmetricFunctions.jl" begin
x = [3.5118, .6219, .2905, .8450, 1.8648]
n = length(x)
naive_sol = naive_esf(x)
naive_sol_reg = naive_sol ./ binomial.(n,0:n)
@test esf_sum(x) β naive_sol
@test esf_sum_reg(x) β naive_sol_reg
@test esf_dc_group(x) β naive_sol
@test esf_dc_group_reg(x) β naive_sol_reg
@test esf_dc_fft(x) β naive_sol
p = x ./ (1 .+ x)
naive_sol = naive_pb(p)
@test poisbin_sum_taub(p) β naive_sol
@test poisbin_fft(p) β naive_sol
@test poisbin_fft_cf(p) β naive_sol
@test poisbin_chen(p) β naive_sol
@test poisbin_dc_fft(p) β naive_sol
@test poisbin_dc_group(p) β naive_sol
end
| ElementarySymmetricFunctions | https://github.com/bdeonovic/ElementarySymmetricFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 73ff501c057bda1c12679545d85ecf11b2765b19 | docs | 821 | # ElementarySymmetricFunctions
[](https://bdeonovic.github.io/ElementarySymmetricFunctions.jl/stable)
[](https://bdeonovic.github.io/ElementarySymmetricFunctions.jl/dev)
[](https://travis-ci.com/bdeonovic/ElementarySymmetricFunctions.jl)
[](https://ci.appveyor.com/project/bdeonovic/ElementarySymmetricFunctions-jl)
[](https://codecov.io/gh/bdeonovic/ElementarySymmetricFunctions.jl)
| ElementarySymmetricFunctions | https://github.com/bdeonovic/ElementarySymmetricFunctions.jl.git |
|
[
"MIT"
] | 0.1.0 | 73ff501c057bda1c12679545d85ecf11b2765b19 | docs | 164 | ```@meta
CurrentModule = ElementarySymmetricFunctions
```
# ElementarySymmetricFunctions
```@index
```
```@autodocs
Modules = [ElementarySymmetricFunctions]
```
| ElementarySymmetricFunctions | https://github.com/bdeonovic/ElementarySymmetricFunctions.jl.git |
|
[
"MIT"
] | 0.1.1 | 2fb3a2fb3e915c882ace77d2acbf2f515f0533c0 | code | 250 | module XCrySDenStructureFormat
using AtomsBase
using Unitful
using UnitfulAtomic
using PeriodicTable: PeriodicTable
const LENGTH_UNIT = u"Γ
"
const FORCE_UNIT = u"Eh_au" / u"Γ
"
export load_xsf
export save_xsf
include("fileio.jl")
end # module XSD
| XCrySDenStructureFormat | https://github.com/azadoks/XCrySDenStructureFormat.jl.git |
|
[
"MIT"
] | 0.1.1 | 2fb3a2fb3e915c882ace77d2acbf2f515f0533c0 | code | 11237 | const KNOWN_PERIODIC_KEYWORDS = ("PRIMVEC", "CONVVEC", "PRIMCOORD", "CONVCOORD")
# Count the number of boundary conditions which are AtomsBase.Periodic
function count_periodic_bcs(bcs::AbstractVector{<:BoundaryCondition})
return count(Base.Fix2(isa, Periodic), bcs)
end
count_periodic_bcs(system::AbstractSystem) = count_periodic_bcs(boundary_conditions(system))
function check_system_properties(system::AbstractSystem)
system_keys = keys(system)
for key in system_keys
if !in(key, (:bounding_box, :boundary_conditions,))
@warn "Ignoring unsupported property $(key)"
end
end
end
function check_atom_properties(atom::Atom)
atom_keys = keys(atom)
for key in atom_keys
if !in(key, (:atomic_symbol, :atomic_number, :atomic_mass, :position, :force,))
@warn "Ignoring unsupported atomic property $(key)"
end
end
end
function check_atomic_mass(atom::Atom)
if haskey(PeriodicTable.elements, atomic_symbol(atom))
if atomic_mass(atom) != PeriodicTable.elements[atomic_symbol(atom)].atomic_mass
@warn "Atom atomic_mass in XSF cannot be mutated"
end
end
end
function check_atomic_symbol(atom::Atom)
if atomic_symbol(atom) != Symbol(PeriodicTable.elements[atomic_number(atom)].symbol)
@warn "Atom atomic_symbol in XSF must agree with atomic_mass"
end
end
function check_system(system::AbstractSystem)
check_system_properties(system)
check_atom_properties.(system)
check_atomic_mass.(system)
check_atomic_symbol.(system)
return nothing
end
# Custom version of `iterate(::Base.EachLine)` which cleans lines for parsing
# and skips comment / empty lines
function iterate_xsf(itr::Base.EachLine)
eof(itr.stream) && return itr.ondone()
line = readline(itr.stream; keep=itr.keep)
clean_line = strip(first(split(line, "#")))
isempty(clean_line) && return iterate_xsf(itr)
return string(clean_line)
end
# Get boundary conditions in the XSF convention from the system type keyword
# NOTE: the XSF documentation talks about a keyword `MOLECULE`, but it isn't
# mentioned in the ase.io parser nor in any of the examples, so it is not
# implemented here
function parse_boundary_conditions(line)
occursin("ATOMS", line) && return [DirichletZero(), DirichletZero(), DirichletZero()]
occursin("POLYMER", line) && return [Periodic(), DirichletZero(), DirichletZero()]
occursin("SLAB", line) && return [Periodic(), Periodic(), DirichletZero()]
occursin("CRYSTAL", line) && return [Periodic(), Periodic(), Periodic()]
return error("Unknown structure type $(line)")
end
# Parse blocks like PRIMVEC and CONVVEC
function parse_vec_block(T::Type{<:Real}, lines)
return map(1:3) do _
return parse.(T, split(iterate_xsf(lines))) .* LENGTH_UNIT
end
end
# Parse an atomic position line, which is formed as either:
# atomic_number x y z
# or
# atomic_number x y z Fx Fy Fz
function parse_coord_line(T::Type{<:Real}, line)
words = split(line)
number = parse(Int, words[1])
atomic_symbol = Symbol(PeriodicTable.elements[number].symbol)
position = parse.(T, words[2:4]) .* LENGTH_UNIT
if length(words) == 7
force = parse.(T, words[5:7]) .* FORCE_UNIT
return Atom(; atomic_symbol, position, force=force)
else
return Atom(; atomic_symbol, position)
end
end
# Parse an entire block of atomic coordinates in a periodic system,
# i.e. when the number of atoms is provided explicitly
function parse_coord_block(T::Type{<:Real}, lines)
line = iterate_xsf(lines)
n_atoms = parse(Int, first(split(line)))
return map(1:n_atoms) do _
return parse_coord_line(T, iterate_xsf(lines))
end
end
# Parse an arbitrary block from a periodic file
# Supported keywords are PRIMVEC, CONVVEC, PRIMCOORD, and CONVCOORD, i.e.
# DATAGRID_[2,3]D blocks are not (yet) supported
function parse_xsf_block(T::Type{<:Real}, keyword, lines)
if keyword in ("PRIMVEC", "CONVVEC")
return parse_vec_block(T, lines)
elseif keyword in ("PRIMCOORD", "CONVCOORD")
return parse_coord_block(T, lines)
else
error("Unknown keyword $(keyword)")
end
end
# Parse a frame (ANIMSTEP in XSF lingo) from a periodic file
# The first frame will have at least PRIMVEC and PRIMCOORD and possibly CONVVEC
# and CONVCOORD blocks
# Following frames will have at least PRIMCOORD (PRIMVEC and others are optional)
# If a frame doesn't have PRIMVEC, the bounding box (PRIMVEC) from the previous frame needs
# to be brought forward
function parse_periodic_frame(T::Type{<:Real}, lines, bcs, previous_frame)
should_parse = true
i = 0
io_pos = position(lines.stream)
blocks = Dict()
# Maximum 4 blocks (PRIMVEC, PRIMCOORD, CONVVEC, CONVCOORD)
while should_parse && i <= 4
line = iterate_xsf(lines)
if isnothing(line) # We've reached the end of the file
should_parse = false
elseif (keyword = first(split(line))) in KNOWN_PERIODIC_KEYWORDS
if haskey(blocks, keyword)
# If we've already seen this keyword, we're in the next block
should_parse = false
seek(lines.stream, io_pos) # Go back to the keyword line
else
blocks[keyword] = parse_xsf_block(T, keyword, lines)
io_pos = position(lines.stream)
end
else # We've reached the end of the frame
should_parse = false
seek(lines.stream, io_pos)
end
end
@assert haskey(blocks, "PRIMCOORD") "Found no PRIMCOORD block in the current frame"
if !haskey(blocks, "PRIMVEC")
if !isnothing(previous_frame)
blocks["PRIMVEC"] = bounding_box(previous_frame)
else
error("Found no PRIMVEC block in the current frame and have no previous frame")
end
end
return atomic_system(blocks["PRIMCOORD"], blocks["PRIMVEC"], bcs)
end
# Parse a frame (ANIMSTEP in XSF lingo) from a non-periodic file (ATOMS)
function parse_atoms_frame(T::Type{<:Real}, lines)
line = iterate_xsf(lines)
atoms = []
# Go until the end of the file or until we see another frame (ATOMS) or data grid (BEGIN)
while !isnothing(line) && !startswith(line, "ATOMS") && !startswith(line, "BEGIN")
atom = parse_coord_line(T, line)
push!(atoms, atom)
line = iterate_xsf(lines)
end
return isolated_system(atoms)
end
# Load all the frames from an XSF file
function load_xsf(T::Type{<:Real}, file::Union{AbstractString,IOStream})
lines = eachline(file; keep=false)
# The first line should be "ANIMSTEPS [N]" for a trajectory (in which case, the system
# type is in the second line) or one of the system type keywords (ATOMS, POLYMER, SLAB,
# CRYSTAL), in which case the number of frames is 1
line = iterate_xsf(lines)
if occursin("ANIMSTEPS", line)
n_frames = parse(Int, last(split(line)))
line = iterate_xsf(lines)
else
n_frames = 1
end
bcs = BoundaryCondition[]
try
bcs = parse_boundary_conditions(line)
catch
return []
end
frames = AbstractSystem{3}[]
for _ in 1:n_frames
# Check how many boundary conditions are Periodic to determine whether we have
# ATOMS or one of POLYMER, SLAB, CRYSTAL
if count(Base.Fix2(isa, AtomsBase.Periodic), bcs) == 0
push!(frames, parse_atoms_frame(T, lines))
else
# We need to pass the previous frame because if the unit cell is fixed,
# it is written only in the first frame and we need to pass it through
# frame-by-frame
previous_frame = isempty(frames) ? nothing : last(frames)
push!(frames, parse_periodic_frame(T, lines, bcs, previous_frame))
end
end
return frames
end
# Set a default floating-point type of Float64
load_xsf(file::Union{AbstractString,IOStream}) = load_xsf(Float64, file)
function write_system_type(io::IO, n_periodic_bcs::Int)
types = Dict{Int,String}(1 => "POLYMER", 2 => "SLAB", 3 => "CRYSTAL")
println(io, types[n_periodic_bcs])
return nothing
end
function write_atom(io::IO, atom)
n = atomic_number(atom)
x, y, z = ustrip.(uconvert.(LENGTH_UNIT, position(atom)))
if haskey(atom, :force)
fx, fy, fz = ustrip.(uconvert.(FORCE_UNIT, get(atom, :force, nothing)))
println(io, "$(n) $(x) $(y) $(z) $(fx) $(fy) $(fz)")
else
println(io, "$(n) $(x) $(y) $(z)")
end
return nothing
end
# Write the atomic numbers, positions[, forces] of an ATOMS block or [PRIM,CONV]COORD block
function write_atoms(io::IO, system::AbstractSystem; header="")
!isempty(header) && println(io, strip(header))
return map(Base.Fix1(write_atom, io), system[:])
end
# Write a bounding box block (optionally with a header, i.e. PRIMVEC or CONVVEC)
function write_bounding_box(io::IO, system::AbstractSystem; header="")
!isempty(header) && println(io, strip(header))
for i in 1:3
x, y, z = ustrip.(uconvert.(LENGTH_UNIT, bounding_box(system)[i]))
println(io, "$(x) $(y) $(z)")
end
return nothing
end
# Write the PRIMVEC, CONVVEC, and PRIMCOORD blocks which make up a frame of a
# periodic trajectory
function write_periodic_frame(io::IO, system::AbstractSystem; frame="")
n_atoms = length(system)
write_bounding_box(io, system; header="PRIMVEC $(frame)")
write_bounding_box(io, system; header="CONVVEC $(frame)")
write_atoms(io, system; header="PRIMCOORD $(frame)\n$(n_atoms) 1")
return nothing
end
# Write an ATOMS frame or periodic frame depending on the periodicity of the system
# This function is used for writing the frames of animated files (not for single
# structures).
function write_frame(io::IO, system::AbstractSystem; frame="")
if count_periodic_bcs(system) == 0
write_atoms(io, system; header="ATOMS $(frame)")
else
write_periodic_frame(io, system; frame)
end
return nothing
end
function save_xsf(io::IO, frames::AbstractVector{<:AbstractSystem})
# Check the frames and warn about unsupported properties.
check_system.(frames)
# Make sure all structures have the same boundary conditions and get those boundary
# conditions.
bcs = only(unique(boundary_conditions.(frames)))
# Count the number of periodic boundary conditions.
n_periodic_bcs = count(Base.Fix2(isa, Periodic), bcs)
# Write the animation line if more than one frame is provided.
is_animated = length(frames) > 1
is_animated && println(io, "ANIMSTEPS $(length(frames))")
# For periodic systems, write the system type line.
n_periodic_bcs > 0 && write_system_type(io, n_periodic_bcs)
# Write the structural and force information for each frame.
for i in eachindex(frames)
write_frame(io, frames[i], frame=is_animated ? i : "")
end
return nothing
end
function save_xsf(file::AbstractString, frames::AbstractVector{<:AbstractSystem})
open(file, "w") do io
return save_xsf(io, frames)
end
end
function save_xsf(file_or_io::Union{IO, AbstractString}, frame::AbstractSystem)
return save_xsf(file_or_io, [frame])
end
| XCrySDenStructureFormat | https://github.com/azadoks/XCrySDenStructureFormat.jl.git |
|
[
"MIT"
] | 0.1.1 | 2fb3a2fb3e915c882ace77d2acbf2f515f0533c0 | docs | 1211 | # XCrySDenStructureFormat.jl
This package provides read / write functionality for [XCrySDen XSF](http://www.xcrysden.org/doc/XSF.html) atomic structure files.
It is **strongly** recommended **not** to use this package directly, but rather through [AtomsIO.jl](https://github.com/mfherbst/AtomsIO.jl), which provides a uniform interface (based on [AtomsBase](https://github.com/JuliaMolSim/AtomsBase.jl)) for reading and writing a large range of atomistic structure files.
## Feature support
Currently supports
- r/w of molecular structures (`ATOMS`)
- r/w of periodic structures (`POLYMER` 1D, `SLAB` 2D, `CRYSTAL` 3D)
- r/w of molecular/periodic trajectories (`.axsf` / `ANIMSTEPS`)
- r/w of forces, using the data key `:force` in parsed `AtomsBase.Atom` instances
Currently does _not_ support
- Data grids (2D and 3D)
- Band grids (`.bxsf`)
## Installation
This package is registered in the General registry, so installation of the latest stable release is as simple as pressing `]` to enter `pkg>` mode in the Julia REPL, and then entering:
```julia
pkg> add XCrySDenStructureFormat
```
or for the development version:
```julia
pkg> dev https://github.com/azadoks/XCrySDenStructureFormat.jl
```
| XCrySDenStructureFormat | https://github.com/azadoks/XCrySDenStructureFormat.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 1318 | using Pkg
CI = get(ENV, "CI", nothing) == "true" || get(ENV, "GITHUB_TOKEN", nothing) !== nothing
CI && Pkg.activate(@__DIR__)
CI && Pkg.instantiate()
using Documenter, Neighborhood
using DocumenterTools: Themes
for w in ("light", "dark")
header = read(joinpath(@__DIR__, "style.scss"), String)
theme = read(joinpath(@__DIR__, "$(w)defs.scss"), String)
write(joinpath(@__DIR__, "$(w).scss"), header*"\n"*theme)
end
Themes.compile(joinpath(@__DIR__, "light.scss"), joinpath(@__DIR__, "src/assets/themes/documenter-light.css"))
Themes.compile(joinpath(@__DIR__, "dark.scss"), joinpath(@__DIR__, "src/assets/themes/documenter-dark.css"))
# %% actually make the docs
makedocs(
modules=[Neighborhood],
sitename= "Neighborhood.jl",
authors = "George Datseris.",
format = Documenter.HTML(
prettyurls = CI,
assets = [
"assets/logo.ico",
asset("https://fonts.googleapis.com/css?family=Montserrat|Source+Code+Pro&display=swap", class=:css),
],
collapselevel = 2,
),
doctest=false,
pages = [
"Public API" => "index.md",
"Dev Docs" => "dev.md",
]
)
if CI
deploydocs(
repo = "github.com/JuliaNeighbors/Neighborhood.jl.git",
target = "build",
push_preview = true
)
end
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 316 | module Neighborhood
using Distances
export Euclidean, Chebyshev, Cityblock, Minkowski
include("util.jl")
include("api.jl")
include("theiler.jl")
include("bruteforce.jl")
include("kdtree.jl")
include("Testing.jl")
"Currently supported search structures"
const SSS = [BruteForce, KDTree]
end # module Neighborhood
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 4742 | """Utilities for testing search structures."""
module Testing
using Test
using Neighborhood
using Neighborhood: bruteforcesearch
export cmp_search_results, cmp_bruteforce, search_allfuncs, check_search_results,
test_bulksearch
"""
Get arguments tuple to `search`, using the 3-argument version if `skip=nothing`.
"""
get_search_args(ss, query, t, skip) = isnothing(skip) ? (ss, query, t) : (ss, query, t, skip)
"""
cmp_search_results(results...)::Bool
Compare two or more sets of search results (`(idxs, ds)` tuples) and check that
they are identical up to ordering.
"""
function cmp_search_results(results::Tuple{Vector, Vector}...)
length(results) < 2 && error("Expected at least two sets of results")
idxs1, ds1 = results[1]
rest = results[2:end]
idxset = Set(idxs1)
dist_map = Dict(i => d for (i, d) in zip(idxs1, ds1))
for (idxs_i, ds_i) in rest
Set(idxs_i) == idxset || return false
all(dist_map[i] == d for (i, d) in zip(idxs_i, ds_i)) || return false
end
return true
end
"""
cmp_bruteforce(results, data, metric, query, t[, skip])::Bool
Check whether `results` returned from [`search`](@ref) match those computed
with [`Neighborhood.bruteforcesearch`](@ref)`(data, metric, query, t[, skip])`
(up to order). `skip` may be `nothing`, which calls the 4-argument method.
**Caution:** results of a `NeighborNumber` search are only expected to match if
the distances from `query` to each point in `data` are all distinct, otherwise
there may be some ambiguity in which data points are included.
"""
function cmp_bruteforce(results, data, metric, query, t, skip=nothing)
bf = bruteforcesearch(data, metric, query, t, skip)
return cmp_search_results(results, bf)
end
"""
search_allfuncs(ss, query, t[, skip])
Call [`search`](@ref)`(ss, query, t[, skip])` and check that the result matches
those for [`isearch`](@ref) and [`knn`](@ref)/[`inrange`](@ref) (depending on
search type) for the equivalent arguments.
`skip` may be `nothing`, in which case the 3-argument methods of all functions
will be called. Uses `Test.@test` internally.
"""
function search_allfuncs(ss, query, t, skip=nothing)
args = get_search_args(ss, query, t, skip)
idxs, ds = result = search(args...)
@test Set(isearch(args...)) == Set(idxs)
cmp_search_results(result, _alt_search_func(args...))
return result
end
# Call inrange() or knn() given arguments to search()
function _alt_search_func(ss, query, t::WithinRange, args...)
inrange(ss, query, t.r, args...)
end
function _alt_search_func(ss, query, t::NeighborNumber, args...)
knn(ss, query, t.k, args...)
end
"""
check_search_results(data, metric, results, query, t[, skip])
Check that `results = search(ss, query, t[, skip])` make sense for a search
structure `ss` with data `data` and metric `metric`.
Note that this does not calculate the known correct value to compare to (which
may be expensive for large data sets), just that the results have the
expected properties. Use [`cmp_bruteforce`] for a more exact test.
`skip` may be `nothing`, in which case the 3-argument methods of all functions
will be called. Uses `Test.@test` internally.
Checks the following:
* `results` is a 2-tuple of `(idxs, ds)`.
* `ds[i] == metric(query, data[i])`.
* `skip(i)` is false for all `i` in `idxs`.
* For `t::NeighborNumber`:
* `length(idxs) <= t.k`.
* For `t::WithinRange`:
* `d <= t.r` for all `d` in `ds`.
"""
function check_search_results(data, metric, results, query, t, skip=nothing)
idxs, ds = results
@test ds == [metric(query, data[i]) for i in idxs]
!isnothing(skip) && @test !any(map(skip, idxs))
_check_search_results(data, metric, results, query, t, skip)
end
function _check_search_results(data, metric, (idxs, ds), query, t::NeighborNumber, skip)
@test length(idxs) <= t.k
end
function _check_search_results(data, metric, (idxs, ds), query, t::WithinRange, skip)
@test all(<=(t.r), ds)
end
"""
test_bulksearch(ss, queries, t[, skip=nothing])
Test that [`bulksearch`](@ref) gives the same results as individual applications
of [`search`](@ref).
`skip` may be `nothing`, in which case the 3-argument methods of both functions
will be called. Uses `Test.@test` internally.
"""
function test_bulksearch(ss, queries, t, skip=nothing)
args = get_search_args(ss, queries, t, skip)
bidxs, bds = bulksearch(args...)
@test bulkisearch(args...) == bidxs
for (i, query) in enumerate(queries)
result = if isnothing(skip)
search(ss, query, t)
else
iskip = j -> skip(i, j)
search(ss, query, t, iskip)
end
@test result == (bidxs[i], bds[i])
end
end
end # module
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 5277 | "`alwaysfalse(ags...; kwargs...) = false`"
alwaysfalse(ags...; kwargs...) = false
export WithinRange, NeighborNumber, SearchType
export searchstructure
export search, isearch, inrange, knn, inrangecount
export bulksearch, bulkisearch
"""
Supertype of all possible search types of the Neighborhood.jl common API.
"""
abstract type SearchType end
"""
searchstructure(S, data, metric; kwargs...) β ss
Create a search structure `ss` of type `S` (e.g. `KDTree, BKTree, VPTree` etc.) based on the
given `data` and `metric`. The data types and supported metric types are package-specific,
but typical choices are subtypes of `<:Metric` from Distances.jl.
Some common metrics are re-exported by Neighborhood.jl.
"""
function searchstructure(::Type{S}, data::D, metric::M; kwargs...) where
{S, D, M}
error("Given type $(S) has not implemented the Neighborhood.jl public API "*
"for data type $(D) and metric type $(M).")
end
"""
WithinRange(r::Real) <: SearchType
Search type representing all neighbors with distance `β€ r` from the query
(according to the search structure's metric).
"""
struct WithinRange{R} <: SearchType; r::R; end
"""
NeighborNumber(k::Int) <: SearchType
Search type representing the `k` nearest neighbors of the query (or approximate
neighbors, depending on the search structure).
"""
struct NeighborNumber <: SearchType; k::Int; end
"""
datatype(::Type{S}) :: Type
Get the type of the data points (arguments to the metric function) of a search
struture of type `S`.
"""
datatype(::Type) = Any
datatype(ss) = datatype(typeof(ss))
"""
getmetric(ss)
Get the metric function used by the search structure `ss`.
"""
function getmetric end
"""
search(ss, query, t::SearchType [, skip]; kwargs... ) β idxs, ds
Perform a neighbor search in the search structure `ss` for the given
`query` with search type `t` (see [`SearchType`](@ref)).
Return the indices of the neighbors (in the original data)
and the distances from the query. Available search types are:
- [`NeighborNumber`](@ref)
- [`WithinRange`](@ref)
Optional `skip` function takes as input the index of the found neighbor
(in the original data) `skip(i)` and returns `true` if this neighbor should be skipped.
Package-specific keywords are possible.
"""
function search(ss::S, query::Q, t::T; kwargs...) where {S, Q, T<: SearchType}
error("Given type $(S) has not implemented the Neighborhood.jl public API "*
"for data type $(Q) and search type $(T).")
end
function search(ss::S, query::Q, t::T, skip; kwargs...) where {S, Q, T<: SearchType}
error("Given type $(S) has not implemented the Neighborhood.jl public API "*
"for data type $(Q), search type $(T) and skip function.")
end
"""
isearch(args...; kwargs... ) β idxs
Same as [`search`](@ref) but only return the neighbor indices.
"""
isearch(args...; kwargs...) = search(args...; kwargs...)[1]
"""
inrange(ss, query, r::Real [, skip]; kwargs...)
[`search`](@ref) for `WithinRange(r)` search type.
"""
inrange(a, b, r, args...; kwargs...) = search(a, b, WithinRange(r), args...; kwargs...)
"""
inrangecount(ss, query, r::Real; kwargs....) β n::Int
Count the amount of points `n` in the search structure `ss` that are witnin range `r`
of the `query`. Typically provided that performs better than just getting the
`length` of [`search`](@ref) with `WithinRange(r)`.
"""
function inrangecount(a, b, r; kwargs...)
idxs = isearch(a, b, WithinRange(r); kwargs...)
return length(idxs)
end
"""
knn(ss, query, k::Int [, skip]; kwargs...)
[`search`](@ref) for `NeighborNumber(k)` search type.
"""
knn(a, b, k::Integer, args...; kwargs...) =
search(a, b, NeighborNumber(k), args...; kwargs...)
###########################################################################################
# Bulk
###########################################################################################
"""
bulksearch(ss, queries, t::SearchType [, skip]; kwargs... ) β vec_of_idxs, vec_of_ds
Same as [`search`](@ref) but many searches are done for many input query points.
In this case `skip` takes two arguments `skip(i, j)` where now `j` is simply
the index of the query that we are currently searching for (`j` is the index in
`queries` and goes from 1 to `length(queries)`).
"""
function bulksearch(ss, queries, t; kwargs...)
i1, d1 = search(ss, queries[1], t; kwargs...)
idxs, ds = [i1], [d1]
sizehint!(idxs, length(queries))
sizehint!(ds, length(queries))
for j in 2:length(queries)
i, d = search(ss, queries[j], t; kwargs...)
push!(idxs, i); push!(ds, d)
end
return idxs, ds
end
function bulksearch(ss, queries, t, skip; kwargs...)
i1, d1 = search(ss, queries[1], t, i -> skip(i, 1); kwargs...)
idxs, ds = [i1], [d1]
sizehint!(idxs, length(queries))
sizehint!(ds, length(queries))
for j in 2:length(queries)
sk = k -> skip(j, k)
i, d = search(ss, queries[j], t, sk; kwargs...)
push!(idxs, i); push!(ds, d)
end
return idxs, ds
end
"""
bulkisearch(ss, queries, t::SearchType [, skip]; kwargs... ) β vec_of_idxs
Same as [`bulksearch`](@ref) but return only the indices.
"""
bulkisearch(args...; kwargs...) = bulksearch(args...; kwargs...)[1]
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 1531 | export BruteForce
"""
bruteforcesearch(data, metric, query, t::SearchType[, skip])
Perform a brute-force search of type `t` against data array `data`
(by calculating the metric for `query` and and every point in `data`).
"""
function bruteforcesearch end
function bruteforcesearch(data, metric, query, t::NeighborNumber, skip=nothing)
dists = [metric(query, d) for d in data]
indices = sortperm(dists)
!isnothing(skip) && filter!(i -> !skip(i), indices)
length(indices) > t.k && resize!(indices, t.k)
return indices, dists[indices]
end
function bruteforcesearch(data, metric, query, t::WithinRange, skip=nothing)
indices = Int[]
dists = metricreturntype(metric, first(data))[]
for (i, datum) in enumerate(data)
!isnothing(skip) && skip(i) && continue
d = metric(query, datum)
if d <= t.r
push!(indices, i)
push!(dists, d)
end
end
return indices, dists
end
"""
BruteForce
A "search structure" which simply performs a brute-force search through the
entire data array.
"""
struct BruteForce{T, M}
data::Vector{T}
metric::M
BruteForce(data::Vector, metric) = new{eltype(data), typeof(metric)}(data, metric)
end
searchstructure(::Type{BruteForce}, data, metric) = BruteForce(data, metric)
datatype(::Type{<:BruteForce{T}}) where T = T
getmetric(bf::BruteForce) = bf.metric
function search(bf::BruteForce, query, t::SearchType, skip=alwaysfalse)
bruteforcesearch(bf.data, bf.metric, query, t, skip)
end
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 4120 | import NearestNeighbors
import NearestNeighbors: KDTree
export KDTree
###########################################################################################
# Standard API
###########################################################################################
function Neighborhood.searchstructure(::Type{KDTree}, data, metric; kwargs...)
return KDTree(data, metric; kwargs...)
end
datatype(::Type{<:KDTree{V}}) where V = V
getmetric(tree::KDTree) = tree.metric
function Neighborhood.search(tree::KDTree, query, t::NeighborNumber, skip=alwaysfalse; sortds=true)
return NearestNeighbors.knn(tree, query, t.k, sortds, skip)
end
function Neighborhood.search(tree::KDTree, query, t::WithinRange, skip=alwaysfalse; sortds=true)
idxs = NearestNeighbors.inrange(tree, query, t.r)
skip β alwaysfalse && filter!(!skip, idxs)
ds = _NN_get_ds(tree, query, idxs)
if sortds # sort according to distances
sp = sortperm(ds)
sort!(ds)
idxs = idxs[sp]
end
return idxs, ds
end
function Neighborhood.inrangecount(tree::KDTree, query, r::Real)
return NearestNeighbors.inrangecount(tree, query, r)
end
function _NN_get_ds(tree::KDTree, query, idxs)
if tree.reordered
ds = [
evaluate(tree.metric, query, tree.data[
findfirst(isequal(i), tree.indices)
]) for i in idxs]
else
ds = [evaluate(tree.metric, query, tree.data[i]) for i in idxs]
end
end
# Performance method when distances are not required (can't sort then)
function Neighborhood.isearch(tree::KDTree, query, t::WithinRange, skip=alwaysfalse; sortds=false)
idxs = NearestNeighbors.inrange(tree, query, t.r, sortds)
skip β alwaysfalse && filter!(!skip, idxs)
return idxs
end
###########################################################################################
# Bulk and skip predicates
###########################################################################################
function Neighborhood.bulksearch(tree::KDTree, queries, t::NeighborNumber; sortds=true)
return NearestNeighbors.knn(tree, queries, t.k, sortds)
end
# no easy skip version for knn bulk search
function Neighborhood.bulksearch(tree::KDTree, queries, t::NeighborNumber, skip; sortds=true)
k, N = t.k, length(queries)
dists = [Vector{eltype(queries[1])}(undef, k) for _ in 1:N]
idxs = [Vector{Int}(undef, k) for _ in 1:N]
for j in 1:N
# Notice that this `_skip` definition matches our API definition!
_skip = i -> skip(i, j)
@inbounds NearestNeighbors.knn_point!(tree, queries[j], sortds, dists[j], idxs[j], _skip)
end
return idxs, dists
end
# but there is an easy skip version for inrange bulk isearch!
function Neighborhood.bulkisearch(tree::KDTree, queries, t::WithinRange, skip=alwaysfalse)
vec_of_idxs = NearestNeighbors.inrange(tree, queries, t.r)
skip β alwaysfalse && vecskipfilter!(vec_of_idxs, skip)
return vec_of_idxs
end
function Neighborhood.bulksearch(tree::KDTree, queries, t::WithinRange, skip=alwaysfalse; sortds=true)
vec_of_idxs = NearestNeighbors.inrange(tree, queries, t.r)
vec_of_ds = [ _NN_get_ds(tree, queries[j], vec_of_idxs[j]) for j in 1:length(queries)]
skip β alwaysfalse && vecskipfilter!(vec_of_idxs, vec_of_ds, skip)
if sortds # sort according to distances
for i in 1:length(queries)
@inbounds ds = vec_of_ds[i]
length(ds) β€ 1 && continue
sp = sortperm(ds)
sort!(ds)
vec_of_idxs[i] = vec_of_idxs[i][sp]
end
end
return vec_of_idxs, vec_of_ds
end
function vecskipfilter!(vec_of_idxs, skip)
for j in 1:length(vec_of_idxs)
@inbounds idxs = vec_of_idxs[j]
filter!(i -> !skip(i, j), idxs)
end
return vec_of_idxs
end
function vecskipfilter!(vec_of_idxs, vec_of_ds, skip)
for j in 1:length(vec_of_idxs)
@inbounds idxs = vec_of_idxs[j]
todelete = [i for i in 1:length(idxs) if skip(idxs[i], j)]
deleteat!(idxs, todelete)
deleteat!(vec_of_ds[j], todelete)
end
return vec_of_idxs
end
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 1448 | """
Theiler(w::Int, nidxs = nothing)
Struct that generates skip functions representing a Theiler window of size `w β₯ 0`.
This is useful when the query of a search is also part of the data used to create the
search structure, typical in timeseries analysis. In this case, you do not want to
find the query itself as its neighbor, and you typically want to avoid points with
indices very close to the query as well.
Giving `w=0` excludes the query itself as its neighbor, see the boolean expressions below.
Let `theiler = Theiler(w)`. Then, `theiler` by itself can be used as a skip function
in [`bulksearch`](@ref), because `theiler(i, j) β‘ abs(i-j) β€ w`.
In addition, if the given argument `nidxs` is _not_ `nothing`, then
`theiler(i, j) β‘ abs(i-nidxs[j]) β€ w`. (useful to give as `nidxs` the indices of the
queries in the original data)
However `theiler` can also be used in single searches. `theiler(n)` (with one argument)
generates the function `i -> abs(i-n) β€ w`. So `theiler(n)` can be given to
[`search`](@ref) as the `skip` argument. Notice that `theiler` is an instance of `Theiler`,
and in summary you'd have to do `Theiler(w)(n)` and give that to [`search`](@ref).
"""
struct Theiler{R} <: Function
w::Int
nidxs::R
end
Theiler(w) = Theiler(w, nothing)
Theiler() = Theiler(0)
(t::Theiler)(n) = i -> abs(i-n) β€ t.w
(t::Theiler{Nothing})(i, j) = abs(i-j) β€ t.w
(t::Theiler)(i, j) = abs(i - t.nidxs[j]) β€ t.w
export Theiler
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 497 | """
metricreturntype(metric, x, y=x)
Get the expected return type of `metric(x, y)`. This is inferred statically if
possible, otherwise the function is actually called with the supplied arguments.
Always returns a concrete type.
"""
function metricreturntype(metric, x, y=x)
# The following should return a concrete type if metric is type stable:
T = Core.Compiler.return_type(metric, Tuple{typeof(x), typeof(y)})
isconcretetype(T) && return T
return typeof(metric(x, y))
end
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 338 | @testset "cmp_search_results" begin
idxs = shuffle!(randsubseq(1:100, 0.5))
ds = rand(Float64, length(idxs))
p = randperm(length(idxs))
@test cmp_search_results((idxs, ds), (idxs[p], ds[p]))
@test !cmp_search_results((idxs, ds), (idxs[2:end], ds[2:end]))
@test !cmp_search_results((idxs, ds), (idxs[p], ds))
end
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 1823 | metric = Euclidean()
dists = [metric(query, d) for d in data]
ss = searchstructure(BruteForce, data, metric)
skip3(i) = i % 3 == 0
skip3inv(i) = !skip3(i)
skip3bulk(i, j) = skip3(i + j)
@testset "Search structure attributes" begin
@test datatype(ss) === datatype(typeof(ss)) === eltype(data)
@test getmetric(ss) === ss.metric
end
@testset "NeighborNumber" begin
t = NeighborNumber(k)
# Using bruteforcesearch function
idxs1, ds1 = results1 = bruteforcesearch(data, metric, query, t)
@test idxs1 == sortperm(dists)[1:k]
check_search_results(data, metric, results1, query, t)
# Using BruteForce instance
@test search_allfuncs(ss, query, t) == results1
# Again with skip function
idxs2, ds2 = results2 = bruteforcesearch(data, metric, query, t, skip3)
@test idxs2 == filter(skip3inv, sortperm(dists))[1:k]
check_search_results(data, metric, results2, query, t, skip3)
@test search_allfuncs(ss, query, t, skip3) == results2
end
@testset "WithinRange" begin
t = WithinRange(r)
# Using bruteforcesearch function
idxs1, ds1 = results1 = bruteforcesearch(data, metric, query, t)
@test Set(ds1) == Set(filter(<=(r), dists))
check_search_results(data, metric, results1, query, t)
# Using BruteForce instance
@test search_allfuncs(ss, query, t) == results1
# Again with skip function
idxs2, ds2 = results2 = bruteforcesearch(data, metric, query, t, skip3)
@test idxs2 == filter(skip3inv, idxs1)
check_search_results(data, metric, results2, query, t, skip3)
@test search_allfuncs(ss, query, t, skip3) == results2
end
@testset "Bulk search" begin
for t in [NeighborNumber(k), WithinRange(r)]
for skip in [nothing, skip3bulk]
test_bulksearch(ss, queries, t, skip)
end
end
end
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 3541 | using Distances
@testset "KDTree" begin
tree1 = searchstructure(KDTree, data, Euclidean(); reorder = true)
tree2 = searchstructure(KDTree, data, Euclidean(); reorder = false)
@test datatype(tree1) === datatype(typeof(tree1)) === eltype(data)
@test getmetric(tree1) === tree1.metric
@test datatype(tree2) === datatype(typeof(tree2)) === eltype(data)
@test getmetric(tree2) === tree2.metric
idxs, ds = knn(tree1, query, 5)
@test issorted(ds)
@test isearch(tree1, query, NeighborNumber(5)) == idxs
@test search(tree1, query, NeighborNumber(5)) == (idxs, ds)
ridxs, rds = inrange(tree1, query, maximum(ds))
@test issorted(rds)
@test ridxs == idxs
@test rds == ds
ridxs_srt, rds_srt = inrange(tree2, query, maximum(ds))
@test ridxs_srt == idxs
@test rds_srt == ds
__idxs, = inrange(tree1, queries[1], 0.005)
@test sort!(__idxs) == 48:52
__idxs, = inrange(tree1, queries[1], 0.005, theiler1(nidxs[1]))
@test sort!(__idxs) == 50:52
__idxs, = inrange(tree1, queries[2], 0.005, theiler2(nidxs[2]))
@test isempty(__idxs)
vec_of_idxs, vec_of_ds = bulksearch(tree1, queries, NeighborNumber(k))
@test length(vec_of_idxs) == length(nidxs)
@test vec_of_idxs[1] == 48:52
@test vec_of_idxs[3] == 52:-1:48
@test sort(vec_of_idxs[1]) == sort(vec_of_idxs[2])
@test vec_of_ds[1] == vec_of_ds[3] == [(i-1)*0.001 for i in 1:5] # also tests sorting
vec_of_idxs, vec_of_ds = bulksearch(tree1, queries, NeighborNumber(k), theiler1)
@test 48 β vec_of_idxs[1]
@test 49 β vec_of_idxs[1]
@test 50 β vec_of_idxs[1]
@test 51 β vec_of_idxs[3]
@test 52 β vec_of_idxs[3]
@test 50 β vec_of_idxs[3]
@test 48 β vec_of_idxs[2]
@test 52 β vec_of_idxs[2]
@test 50 β vec_of_idxs[2]
vec_of_idxs, vec_of_ds = bulksearch(tree1, queries, NeighborNumber(k), theiler2)
@test vec_of_idxs[1] == isearch(tree1, queries[1], NeighborNumber(k), theiler2(48))
for j in 48:52
@test j β vec_of_idxs[2]
end
_vec_of_idxs = bulkisearch(tree1, queries, WithinRange(0.002))
@test length.(_vec_of_idxs) == [3, 5, 3]
vec_of_idxs, vec_of_ds = bulksearch(tree1, queries, WithinRange(0.002))
for ds in vec_of_ds
@test issorted(ds)
end
@test vec_of_idxs[1] == [48, 49, 50]
@test sort(vec_of_idxs[2]) == [48, 49, 50, 51, 52]
# final test, theiler window
vec_of_idxs, vec_of_ds = bulksearch(tree1, queries, WithinRange(0.002), theiler1)
@test vec_of_idxs[1] == vec_of_idxs[3] == [50]
@test vec_of_ds[1] == vec_of_ds[3] == [0.002]
@test vec_of_idxs[2] == [48, 52]
@test vec_of_ds[2] == [0.002, 0.002]
vec_of_idxs, vec_of_ds = bulksearch(tree1, queries, WithinRange(0.002), theiler2)
for (x, y) in zip(vec_of_idxs, vec_of_ds)
@test isempty(x)
@test isempty(y)
end
@testset "inrangecount" begin
N = 10
x = [rand(SVector{3}) for i = 1:N]
D = pairwise(Euclidean(), x)
ds = [sort(D[setdiff(1:N, i), i]) for i = 1:N]
min_ds = minimum.(ds)
max_ds = maximum.(ds)
tree = KDTree(x, Euclidean())
# Slightly extend bounds to check for both none and all neighbors.
rmins = [(min_ds[i] * 0.99) for (i, xα΅’) in enumerate(x)]
rmaxs = [(max_ds[i] * 1.01) for (i, xα΅’) in enumerate(x)]
ns_min = [inrangecount(tree, xα΅’, rmins[i]) - 1 for (i, xα΅’) in enumerate(x)]
ns_max = [inrangecount(tree, xα΅’, rmaxs[i]) - 1 for (i, xα΅’) in enumerate(x)]
@test all(ns_min .== 0)
@test all(ns_max .== N - 1)
# For each point, there should be exactly three neighbors within these radii
r3s = [(ds[i][3] * 1.01) for (i, xα΅’) in enumerate(x)]
ns_3s = [inrangecount(tree, xα΅’, r3s[i]) - 1 for (i, xα΅’) in enumerate(x)]
@test all(ns_3s .== 3)
end
end
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 754 | using Test, Neighborhood, StaticArrays, Random, Distances
using Neighborhood: datatype, getmetric, bruteforcesearch
using Neighborhood.Testing
Random.seed!(54525)
data = [rand(SVector{3}) for i in 1:1000]
query = SVector(0.99, 0.99, 0.99)
# Theiler window and skip predicate related
data[48:52] .= [SVector(0.0, 0.0, i*0.001) for i in 1:5]
nidxs = 48:2:52
queries = [data[i] for i in nidxs]
theiler1 = Theiler(1, nidxs)
theiler2 = Theiler(2, nidxs)
r = 0.1
k = 5
@testset "Neighborhood" begin
@testset "Utils" begin include("util.jl") end
@testset "Neighborhood.Testing" begin include("Testing.jl") end
@testset "Brute force" begin include("bruteforce.jl") end
@testset "NearestNeighbors" begin include("nearestneighbors.jl") end
end
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | code | 1128 | using Neighborhood: metricreturntype
const metric = Euclidean()
# Retuns same value as metric but compiler cannot infer type based on arguments
function typeunstable_metric(x, y)
isnan(x[1]) && return "foo" # Confuse the compiler
return metric(x, y)
end
# Wraps a function and records if it was called
struct CalledChecker{F}
f::F
called::Ref{Bool}
CalledChecker(f) = new{typeof(f)}(f, Ref(false))
end
function (c::CalledChecker)(args...)
c.called[] = true
return c.f(args...)
end
@testset "metricreturntype" begin
for T in [Float64, Float32, Int]
x = zeros(T, 3)
metric_checked = CalledChecker(metric)
@test metricreturntype(metric_checked, x) === typeof(metric(x, x))
@test !metric_checked.called[] # Function should never have actually been called
# Return value should itself be inferrable
@inferred metricreturntype(metric, x)
# No inference possible, but should still get the correct result by calling the function
@test metricreturntype(typeunstable_metric, x) === typeof(typeunstable_metric(x, x))
end
end
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | docs | 559 | # Neighborhood.jl
| **Documentation** | **Tests** | Gitter |
|:--------:|:-------------------:|:-----:|
|[](https://JuliaNeighbors.github.io/Neighborhood.jl/dev) | [](https://github.com/JuliaNeighbors/Neighborhood.jl/actions) | [](https://gitter.im/JuliaDynamics/Lobby)
A common API for finding nearest neighbors in Julia.
See the documentation for more!
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | docs | 2840 | # Dev Docs
Here is what you have to do to bring your package into Neighborhood.jl.
* Add your package as a dependency to Neighborhood.jl.
* Add the search type into the constant `SSS` in `src/Neighborhood.jl` and export it.
* Then, proceed through this page and add as many methods as you can.
An example implementation for KDTrees of NearestNeighbors.jl is in `src/kdtree.jl`.
## Mandatory methods
Let `S` be the type of the search structure of your package.
To participate in this common API you should extend the following methods:
```julia
searchstructure(::Type{S}, data, metric; kwargs...) β ss
search(ss::S, query, t::SearchType; kwargs...) β idxs, ds
```
for both types of `t`: `WithinRange, NeighborNumber`.
`search` returns the indices of the neighbors (in the original `data`) and their
distances from the `query`.
Notice that `::Type{S}` only needs the supertype, e.g. `KDTree`, without the type-parameters.
## Performance methods
The following methods are implemented automatically from Neighborhood.jl if you
extend the mandatory methods. However, if there are performance benefits you should
write your own extensions.
```julia
isearch(ss::S, query, t::SearchType; kwargs...) β idxs # only indices
bulksearch(ss::S, queries, ::SearchType; kwargs...) β vec_of_idxs, vec_of_ds
bulkisearch(ss::S, queries, ::SearchType; kwargs...) β vec_of_idxs
```
## Predicate methods
The following methods are **extremely useful** in e.g. timeseries analysis.
```julia
search(ss::S, query, t::SearchType, skip; kwargs...)
bulksearch(ss::S, queries, t::SearchType, skip; kwargs...)
```
(and their "i" complements, `isearch, bulkisearch`).
These methods "skip" found neighbors depending on `skip`. In the first method
`skip` takes one argument: `skip(i)` the index of the found neighbor (in the original data)
and returns `true` if this neighbor should be skipped.
In the second version, `skip` takes two arguments `skip(i, j)` where now `j` is simply
the index of the query that we are currently searching for.
You can kill two birds with one stone and directly implement one method:
```julia
search(ss::S, query, t::SearchType, skip = alwaysfalse; kwargs...)
```
to satisfy both mandatory API as well as this one.
## Insertion/deletion methods
Simply extend `Base.insert!` and `Base.deleteat!` for your search structure.
## Testing
The [`Neighborhood.Testing`](@ref) submodule contains utilities for testing the
return value of [`search`](@ref) and related functions for your search structure.
Most of these functions use `Test.@test` internally, so just call within a `@testset`
in your unit tests.
```@docs
Neighborhood.Testing
Neighborhood.Testing.cmp_search_results
Neighborhood.Testing.cmp_bruteforce
Neighborhood.Testing.search_allfuncs
Neighborhood.Testing.check_search_results
Neighborhood.Testing.test_bulksearch
```
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.2.4 | fdea60ca30d724e76cc3b3d90d7f9d29d3d5cab5 | docs | 1327 | # Public API
Neighborhood.jl is a Julia package that provides a unified interface for doing neighbor searches in Julia.
This interface is described in this page.
## Search Structures
```@docs
searchstructure
```
All currently supported search structures are:
```@example sss
using Neighborhood # hide
for ss in Neighborhood.SSS # hide
println(ss) # hide
end # hide
```
The following functions are defined for search structures:
```@docs
Neighborhood.datatype
Neighborhood.getmetric
```
## Search functions
```@docs
search
isearch
inrange
inrangecount
knn
```
## Search types
```@docs
SearchType
WithinRange
NeighborNumber
```
## Bulk searches
Some packages support higher performance when doing bulk searches (instead of individually calling `search` many times).
```@docs
bulksearch
bulkisearch
```
## Brute force searches
The [`BruteForce`](@ref) "search structure" performs a linear search
through its data array, calculating the distance from the query to each data
point. This is the slowest possible implementation but can be used to check
results from other search structures for correctness. The
[`Neighborhood.bruteforcesearch`](@ref) function can be used instead without
having to create the search structure.
```@docs
Neighborhood.bruteforcesearch
BruteForce
```
## Theiler window
```@docs
Theiler
```
| Neighborhood | https://github.com/JuliaNeighbors/Neighborhood.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 656 | push!(LOAD_PATH,"../src/")
using Documenter
using StatsModels, DataFrames, VegaLite
using LinearRegressionKit
makedocs(sitename="LinearRegressionKit.jl", modules = [LinearRegressionKit] ,
pages = Any[
"Home" => "index.md",
"Tutorials" => Any[
"Basic" => "basic_tutorial.md",
"Multiple regression" => "multi_tutorial.md",
"Weighted regression" => "weighted_regression_tutorial.md",
"Ridge regression" => "ridge_regression_tutorial.md" ]
])
deploydocs(
repo = "github.com/ericqu/LinearRegressionKit.jl.git",
push_preview = false,
devbranch = "main",
devurl = "dev",
)
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 38987 | module LinearRegressionKit
export regress, predict_in_sample, predict_out_of_sample, linRegRes, kfold, ridge, ridgeRegRes, sweep_linreg
using Base: Tuple, Int64, Float64, Bool
using StatsBase:eltype, isapprox, length, coefnames, push!, append!
using Distributions, HypothesisTests, LinearAlgebra
using Printf, NamedArrays, FreqTables # FreqTables for check_cardinality
using StatsBase, Random, StatsModels, DataFrames
using VegaLite
include("sweep_operator.jl")
include("utilities.jl")
include("vl_utilities.jl")
include("newey_west.jl")
include("kfold.jl")
include("ridge.jl")
"""
struct linRegRes
Store results from the regression
"""
struct linRegRes
extended_inverse::Matrix # Store the extended inverse matrix
coefs::Vector # Store the coefficients of the fitted model
white_types::Union{Nothing,Vector} # Store the type of White's covariance estimator(s) used
hac_types::Union{Nothing,Vector} # Store the type of White's covariance estimator(s) used
stderrors::Union{Nothing,Vector} # Store the standard errors for the fitted model
white_stderrors::Union{Nothing,Vector} # Store the standard errors modified for the White's covariance estimator
hac_stderrors::Union{Nothing,Vector} # Store the standard errors modified for the Newey-West covariance estimator
t_values::Union{Nothing,Vector} # Store the t values for the fitted model
white_t_values::Union{Nothing,Vector} # Store the t values modified for the White's covariance estimator
hac_t_values::Union{Nothing,Vector} # Store the t values modified for the Newey-West covariance estimator
p::Int64 # Store the number of parameters (including the intercept as a parameter)
MSE::Union{Nothing,Float64} # Store the Mean squared error for the fitted model
intercept::Bool # Indicate if the model has an intercept
R2::Union{Nothing,Float64} # Store the R-squared value for the fitted model
ADJR2::Union{Nothing,Float64} # Store the adjusted R-squared value for the fitted model
RMSE::Union{Nothing,Float64} # Store the Root mean square error for the fitted model
AIC::Union{Nothing,Float64} # Store the Akaike information criterion for the fitted model
ΟΜΒ²::Union{Nothing,Float64} # Store the ΟΜΒ² for the fitted model
p_values::Union{Nothing,Vector} # Store the p values for the fitted model
white_p_values::Union{Nothing,Vector} # Store the p values modified for the White's covariance estimator
hac_p_values::Union{Nothing,Vector} # Store the p values modified for the Newey-West covariance estimator
ci_up::Union{Nothing,Vector} # Store the upper values confidence interval of the coefficients
ci_low::Union{Nothing,Vector} # Store the lower values confidence interval of the coefficients
white_ci_up::Union{Nothing,Vector} # Store the upper values confidence interval of the coefficients for White covariance estimators
white_ci_low::Union{Nothing,Vector} # Store the upper values confidence interval of the coefficients for White covariance estimators
hac_ci_up::Union{Nothing,Vector} # Store the upper values confidence interval of the coefficients for Newey-West covariance estimators
hac_ci_low::Union{Nothing,Vector} # Store the upper values confidence interval of the coefficients for Newey-West covariance estimators
observations # Store the number of observations used in the model
t_statistic::Union{Nothing,Float64} # Store the t statistic
VIF::Union{Nothing,Vector} # Store the Variance inflation factor
Type1SS::Union{Nothing,Vector} # Store the Type 1 Sum of Squares
Type2SS::Union{Nothing,Vector} # Store the Type 2 Sum of Squares
pcorr1::Union{Nothing,Vector{Union{Missing, Float64}}} # Store the squared partial correlation coefficients using Type1SS
pcorr2::Union{Nothing,Vector{Union{Missing, Float64}}} # Store the squared partial correlation coefficients using Type2SS
scorr1::Union{Nothing,Vector{Union{Missing, Float64}}} # Store the squared semi-partial correlation coefficient using Type1SS
scorr2::Union{Nothing,Vector{Union{Missing, Float64}}} # Store the squared semi-partial correlation coefficient using Type2SS
modelformula # Store the model formula
dataschema # Store the dataschema
updformula # Store the updated model formula (after the dataschema has been applied)
alpha # Store the alpha used to compute the confidence interval of the coefficients
KS_test::Union{Nothing,String} # Store results of the Kolmogorov-Smirnov test
AD_test::Union{Nothing,String} # Store results of the AndersonβDarling test
JB_test::Union{Nothing,String} # Store results of the Jarque-Bera test
White_test::Union{Nothing,String} # Store results of the White test
BP_test::Union{Nothing,String} # Store results of the Breusch-Pagan test
weighted::Bool # Indicates if this is a weighted regression
weights::Union{Nothing,String} # Indicates which column of the dataframe contains the analytical weights
PRESS::Union{Nothing,Float64} # Store the PRESS statistic
cond::Union{Nothing,Float64} # Store Condition number of the design matrix
f_value::Union{Nothing,Float64} # Store F Value (also known as F Statistic) of the fitted model
f_pvalue::Union{Nothing,Float64} # Store p_value of F Value of the fitted model
dof_model::Union{Nothing,Float64} # Store degree of freedom of fitted model
dof_error::Union{Nothing,Float64} # Store degree of freedom of the error part
end
"""
function Base.show(io::IO, lr::linRegRes)
Display information about the fitted model
"""
function Base.show(io::IO, lr::linRegRes)
if lr.weighted
println(io, "Weighted regression")
end
println(io, "Model definition:\t", lr.modelformula)
println(io, "Used observations:\t", lr.observations)
if !isnothing(lr.cond)
println(io, "Condition number:\t", lr.cond)
end
println(io, "Model statistics:")
# Display stats when available
if !isnothing(lr.R2) && !isnothing(lr.ADJR2)
@printf(io, " RΒ²: %g\t\t\tAdjusted RΒ²: %g\n", lr.R2, lr.ADJR2)
elseif !isnothing(lr.R2)
@printf(io, " RΒ²: %g\n", lr.R2)
end
if !isnothing(lr.MSE) && !isnothing(lr.RMSE)
@printf(io, " MSE: %g\t\t\tRMSE: %g\n", lr.MSE, lr.RMSE)
elseif !isnothing(lr.MSE)
@printf(io, " MSE: %g\n", lr.MSE)
end
if !isnothing(lr.PRESS)
@printf(io, " PRESS: %g\n", lr.PRESS)
end
if length(lr.white_types) + length(lr.hac_types) == 0
if !isnothing(lr.ΟΜΒ²) && !isnothing(lr.AIC)
@printf(io, " ΟΜΒ²: %g\t\t\tAIC: %g\n", lr.ΟΜΒ², lr.AIC)
elseif !isnothing(lr.ΟΜΒ²)
@printf(io, " ΟΜΒ²: %g\n", lr.ΟΜΒ²)
elseif !isnothing(lr.AIC)
@printf(io, " AIC: %g\n", lr.AIC)
end
end
if !isnothing(lr.f_value)
@printf(io, " F Value: %g with degrees of freedom %g and %g, Pr > F (p-value): %g\n", lr.f_value, lr.dof_model, lr.dof_error, lr.f_pvalue)
end
if !isnothing(lr.ci_low) || !isnothing(lr.ci_up)
@printf(io, "Confidence interval: %g%%\n", (1 - lr.alpha) * 100 )
end
vec_stats_title = ["Coefs", "Std err", "t", "Pr(>|t|)", "code", "low ci", "high ci", "VIF",
"Type1 SS", "Type2 SS", "PCorr1", "PCorr2",
"SCorr1", "SCorr2"]
r_signif_codes::Union{Nothing,Vector{String}} = nothing
if length(lr.white_types) + length(lr.hac_types) == 0
r_signif_codes = nothing
if !isnothing(lr.p_values)
r_signif_codes = get_r_significance_code.(lr.p_values)
end
helper_print_table(io, "Coefficients statistics:",
[lr.coefs, lr.stderrors, lr.t_values, lr.p_values, r_signif_codes, lr.ci_low, lr.ci_up, lr.VIF,
lr.Type1SS, lr.Type2SS, lr.pcorr1, lr.pcorr2, lr.scorr1, lr.scorr2],
deepcopy(vec_stats_title),
lr.updformula)
if !isnothing(r_signif_codes)
@printf(io, "\n\tSignif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1\n")
end
end
if length(lr.white_types) > 0
for (cur_i, cur_type) in enumerate(lr.white_types)
r_signif_codes = nothing
if !isnothing(lr.p_values)
r_signif_codes = get_r_significance_code.(lr.white_p_values[cur_i])
end
helper_print_table(io, "White's covariance estimator ($(Base.Unicode.uppercase(string(cur_type)))):",
[lr.coefs, lr.white_stderrors[cur_i], lr.white_t_values[cur_i], lr.white_p_values[cur_i],
r_signif_codes,
lr.white_ci_low[cur_i], lr.white_ci_up[cur_i], lr.VIF, lr.Type1SS, lr.Type2SS,
lr.pcorr1, lr.pcorr2, lr.scorr1, lr.scorr2],
deepcopy(vec_stats_title),
lr.updformula)
if !isnothing(r_signif_codes)
@printf(io, "\n\tSignif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1\n")
end
end
end
if length(lr.hac_types) > 0
for (cur_i, cur_type) in enumerate(lr.hac_types)
r_signif_codes = nothing
if !isnothing(lr.p_values)
r_signif_codes = get_r_significance_code.(lr.hac_p_values[cur_i])
end
helper_print_table(io, "Newey-West's covariance estimator:",
[lr.coefs, lr.hac_stderrors[cur_i], lr.hac_t_values[cur_i], lr.hac_p_values[cur_i],
r_signif_codes,
lr.hac_ci_low[cur_i], lr.hac_ci_up[cur_i], lr.VIF, lr.Type1SS, lr.Type2SS,
lr.pcorr1, lr.pcorr2, lr.scorr1, lr.scorr2],
deepcopy(vec_stats_title),
lr.updformula)
if !isnothing(r_signif_codes)
@printf(io, "\n\tSignif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1\n")
end
end
end
if !isnothing(lr.KS_test) || !isnothing(lr.AD_test) || !isnothing(lr.JB_test) || !isnothing(lr.White_test) || !isnothing(lr.BP_test)
println(io, "\nDiagnostic Tests:\n")
!isnothing(lr.KS_test) && print(io, lr.KS_test)
!isnothing(lr.AD_test) && print(io, lr.AD_test)
!isnothing(lr.JB_test) && print(io, lr.JB_test)
!isnothing(lr.White_test) && print(io, lr.White_test)
!isnothing(lr.BP_test) && print(io, lr.BP_test)
end
end
"""
function getVIF(x, intercept, p)
(internal) Calculates the VIF, Variance Inflation Factor, for a given regression (the X).
When the regression has an intercept uses the simplified formula. When there is no intercept uses the classical formula.
"""
function getVIF(x, intercept, p)
if intercept
if p == 1
return [0., 1.]
end
return vcat(0, diag(inv(cor(@view(x[:, 2:end])))))
else
if p == 1
return [0.]
end
return diag(inv(cor(x)))
end
end
"""
function getSST(y, intercept)
(internal) Calculates "total sum of squares" see link for description.
https://en.wikipedia.org/wiki/Total_sum_of_squares
When the mode has no intercept the SST becomes the sum of squares of y
"""
function getSST(y, intercept)
SST = zero(eltype(y))
if intercept
yΜ = mean(y)
SST = sum(abs2.(y .- yΜ))
else
SST = sum(abs2.(y))
end
return SST
end
"""
function getSST(y, intercept, weights, ridge=false)
(internal) Calculates "total sum of squares" for weighted regression see link for description.
https://en.wikipedia.org/wiki/Total_sum_of_squares
When the mode has no intercept the SST becomes the sum of squares of y.
When called from ridge regression the ys are not weighted, when called from regression the ys are already weighted.
"""
function getSST(y, intercept, weights, ridge=false)
SST = zero(eltype(y))
unweightedys = nothing
if ridge
unweightedys = y
else
unweightedys = y ./ sqrt.(weights)
end
if intercept
yΜ = mean(unweightedys, aweights(weights))
SST = sum(weights .* abs2.(unweightedys .- yΜ))
else
SST = sum(weights .* abs2.(unweightedys))
end
return SST
end
"""
function lr_predict(xs, coefs, intercept::Bool)
(internal) Predict the yΜ given the x(s) and the coefficients of the linear regression.
"""
function lr_predict(xs, coefs, intercept::Bool)
if intercept
return muladd(@view(xs[:, 2:end]), @view(coefs[2:end]), coefs[1])
else
return muladd(xs, coefs, zero(eltype(coefs)))
end
end
"""
function hasintercept!(f::StatsModels.FormulaTerm)
(internal) return a tuple with the first item being true when the formula has an intercept term, the second item being the potentially updated formula.
If there is no intercept indicated add one.
If the intercept is specified as absent (y ~ 0 + x) then do not change.
"""
function hasintercept!(f::StatsModels.FormulaTerm)
intercept = true
if f.rhs isa ConstantTerm{Int64}
intercept = convert(Bool, f.rhs.n)
return intercept, f
elseif f.rhs isa Tuple
for t in f.rhs
if t isa ConstantTerm{Int64}
intercept = convert(Bool, t.n)
return intercept, f
end
end
end
f = FormulaTerm(f.lhs, InterceptTerm{true}() + f.rhs)
return intercept, f
end
"""
function get_pcorr(typess, sse, intercept)
(internal) Get squared partial correlation coefficient given a TYPE1SS or Type2SS.
"""
function get_pcorr(typess, sse, intercept)
pcorr = Vector{Union{Missing, Float64}}(undef, length(typess))
if intercept
@inbounds pcorr[1] = missing
@inbounds for i in 2:length(typess)
pcorr[i] = typess[i] / (typess[i] + sse)
end
else
@inbounds for i in 1:length(typess)
pcorr[i] = typess[i] / (typess[i] + sse)
end
end
return pcorr
end
"""
function get_scorr(typess, sst, intercept)
(internal) Get squared semi-partial correlation coefficient given a TYPE1SS or Type2SS.
"""
function get_scorr(typess, sst, intercept)
scorr = Vector{Union{Missing, Float64}}(undef, length(typess))
if intercept
@inbounds scorr[1] = missing
@inbounds for i in 2:length(typess)
scorr[i] = typess[i] / sst
end
else
@inbounds for i in 1:length(typess)
scorr[i] = typess[i] / sst
end
end
return scorr
end
"""
function design_matrix!(f::StatsModels.FormulaTerm, df::DataFrames.AbstractDataFrame;
weights::Union{Nothing,String}=nothing, remove_missing=false, contrasts=nothing)
(Internal) Give a design matrix (with x and y separated) given a formula and dataframe.
Uses weights, and contrasts when given.
Updates the formula and to removes ambiguity about the intercept term.
Potentially provide a new dataframe
"""
function design_matrix!(f::StatsModels.FormulaTerm, df::DataFrames.AbstractDataFrame;
weights::Union{Nothing,String}=nothing, remove_missing=false, contrasts=nothing, ridge=false)
intercept, f = hasintercept!(f)
copieddf = df
if remove_missing
copieddf = copy(df[: , Symbol.(keys(schema(f, df).schema))])
dropmissing!(copieddf)
end
if isa(weights, String)
if !in(Symbol(weights), propertynames(copieddf))
println("Weights have been specified being the column $(weights) however such colum does not exist in the dataframe provided. Regression will be done without weights")
weights = nothing
else
if remove_missing
copieddf[!, weights] = df[!, weights]
end
allowmissing!(copieddf, weights)
copieddf[!, weights][copieddf[!, weights] .<= 0] .= missing
dropmissing!(copieddf)
end
end
isweighted = !isnothing(weights)
if isnothing(contrasts)
dataschema = schema(f, copieddf)
else
dataschema = schema(f, copieddf, contrasts)
end
updatedformula = apply_schema(f, dataschema)
y, x = modelcols(updatedformula, copieddf)
n, p = size(x)
if isweighted && ridge == false
x = x .* sqrt.(copieddf[!, weights])
y = y .* sqrt.(copieddf[!, weights])
end
return x, y, n, p, intercept, f, copieddf, updatedformula, isweighted, dataschema
end
"""
function regress(f::StatsModels.FormulaTerm, df::AbstractDataFrame, req_plots; Ξ±::Float64=0.05, req_stats=["default"], weights::Union{Nothing,String}=nothing, remove_missing=false, cov=[:none], contrasts=nothing, plot_args=Dict("plot_width" => 400, "loess_bw" => 0.6, "residuals_with_density" => false))
Estimate the coefficients of the regression, given a dataset and a formula. and provide the requested plot(s).
A dictionary of the generated plots indexed by the descritption of the plots.
It is possible to indicate the width of the plots, and the bandwidth of the Loess smoother.
"""
function regress(f::StatsModels.FormulaTerm, df::AbstractDataFrame, req_plots; Ξ±::Float64=0.05, req_stats=["default"], weights::Union{Nothing,String}=nothing, remove_missing=false, cov=[:none], contrasts=nothing, plot_args=Dict("plot_width" => 400, "loess_bw" => 0.6, "residuals_with_density" => false))
all_plots = Dict{String,VegaLite.VLSpec}()
neededplots = get_needed_plots(req_plots)
lm = regress(f, df, Ξ±=Ξ±, req_stats=req_stats, remove_missing=remove_missing, cov=cov,
contrasts=contrasts, weights=weights)
results = predict_in_sample(lm, df, req_stats="all")
if :fit in neededplots
fitplot!(all_plots, results, lm, plot_args)
end
if :residuals in neededplots
residuals_plots!(all_plots, results, lm, plot_args)
end
if :normal_checks in neededplots
normality_plots!(all_plots, results, lm, plot_args)
end
if :homoscedasticity in neededplots
scalelocation_plot!(all_plots, results, lm, plot_args)
end
if :cooksd in neededplots
cooksd_plot!(all_plots, results, lm, plot_args)
end
if :leverage in neededplots
leverage_plot!(all_plots, results, lm, plot_args)
end
return (lm, all_plots)
end
"""
function regress(f::StatsModels.FormulaTerm, df::DataFrames.AbstractDataFrame; Ξ±::Float64=0.05, req_stats=["default"], weights::Union{Nothing,String}=nothing,
remove_missing=false, cov=[:none], contrasts=nothing)
Estimate the coefficients of the regression, given a dataset and a formula.
The formula details are provided in the StatsModels package and the behaviour aims to be similar as what the Julia GLM package provides.
The data shall be provided as a DataFrame without missing data.
If remove_missing is set to true a copy of the dataframe will be made and the row with missing data will be removed.
Some robust covariance estimator(s) can be requested through the `cov` argument.
Default contrast is dummy coding, other contrasts can be requested through the `contrasts` argument.
For a weighted regression, the name of column containing the analytical weights shall be identified by the `weights` argument.
"""
function regress(f::StatsModels.FormulaTerm, df::DataFrames.AbstractDataFrame; Ξ±::Float64=0.05, req_stats=["default"], weights::Union{Nothing,String}=nothing,
remove_missing=false, cov=[:none], contrasts=nothing)
(Ξ± > 0. && Ξ± < 1.) || throw(ArgumentError("Ξ± must be between 0 and 1"))
needed_stats = get_needed_model_stats(req_stats)
# stats initialization
total_scalar_stats = Set([:sse, :mse, :sst, :r2, :adjr2, :rmse, :aic, :sigma, :t_statistic, :press, :cond ])
total_vector_stats = Set([:coefs, :stderror, :t_values, :p_values, :ci, :vif, :t1ss, :t2ss, :pcorr1, :pcorr2, :scorr1, :scorr2])
total_diag_stats = Set([:diag_ks, :diag_ad, :diag_jb, :diag_white, :diag_bp])
scalar_stats = Dict{Symbol,Union{Nothing,Float64}}(intersect(total_scalar_stats, needed_stats) .=> nothing)
vector_stats = Dict{Symbol,Union{Nothing,Vector}}(intersect(total_vector_stats, needed_stats) .=> nothing)
diag_stats = Dict{Symbol,Union{Nothing,String}}(intersect(total_diag_stats, needed_stats) .=> nothing)
sse = nothing
x, y, n, p, intercept, f, copieddf, updatedformula, isweighted, dataschema = design_matrix!(f, df, weights=weights, remove_missing=remove_missing, contrasts=contrasts)
xy = [x y]
if :cond in needed_stats
scalar_stats[:cond] = cond(x)
end
xytxy = xy' * xy
try
if :t1ss in needed_stats
sse, vector_stats[:t1ss] = sweep_op_fullT1SS!(xytxy)
else
sse = sweep_op_full!(xytxy)
end
catch ae
throw(ae)
finally
check_cardinality(copieddf, updatedformula)
end
coefs = xytxy'[1:p, end]
mse = xytxy'[p + 1, p + 1] / (n - p)
if :sst in needed_stats
if isweighted
scalar_stats[:sst] = getSST(y, intercept, copieddf[!, weights])
else
scalar_stats[:sst] = getSST(y, intercept)
end
end
if :r2 in needed_stats
scalar_stats[:r2] = 1. - (sse / scalar_stats[:sst])
end
if :adjr2 in needed_stats
scalar_stats[:adjr2] = 1. - ((n - convert(Int64, intercept)) * (1. - scalar_stats[:r2])) / (n - p)
end
if :rmse in needed_stats
scalar_stats[:rmse] = real_sqrt(mse)
end
if :aic in needed_stats
scalar_stats[:aic] = n * log(sse / n) + 2p
end
if :sigma in needed_stats
scalar_stats[:sigma] = mse
end
if :t_statistic in needed_stats
scalar_stats[:t_statistic] = quantile(TDist(n - p), 1 - Ξ± / 2)
end
if :t2ss in needed_stats
vector_stats[:t2ss] = get_TypeIISS(xytxy)
end
if :pcorr1 in needed_stats
vector_stats[:pcorr1] = get_pcorr(vector_stats[:t1ss], sse, intercept)
end
if :pcorr2 in needed_stats
vector_stats[:pcorr2] = get_pcorr(vector_stats[:t2ss], sse, intercept)
end
if :scorr1 in needed_stats
vector_stats[:scorr1] = get_scorr(vector_stats[:t1ss], scalar_stats[:sst], intercept)
end
if :scorr2 in needed_stats
vector_stats[:scorr2] = get_scorr(vector_stats[:t2ss], scalar_stats[:sst], intercept)
end
if :stderror in needed_stats
vector_stats[:stderror] = real_sqrt.(diag(mse * @view(xytxy'[1:end - 1, 1:end - 1])))
end
if :t_values in needed_stats
vector_stats[:t_values] = coefs ./ vector_stats[:stderror]
end
if :p_values in needed_stats
vector_stats[:p_values] = ccdf.(Ref(FDist(1., (n - p))), abs2.(vector_stats[:t_values]))
end
if :f_stats in needed_stats
dof_model = p - 1
dof_error = n - 1 - dof_model
if !intercept
dof_model = p
dof_error = n -dof_model
end
ssmodel = scalar_stats[:sst] - sse
scalar_stats[:f_value] = ssmodel / dof_model / mse
scalar_stats[:dof_model] = dof_model
scalar_stats[:dof_error] = dof_error
scalar_stats[:f_pvalue] = ccdf.(Ref(FDist(dof_model, dof_error)), scalar_stats[:f_value])
end
if :ci in needed_stats
vector_stats[:ci] = vector_stats[:stderror] * scalar_stats[:t_statistic]
end
if :vif in needed_stats
vector_stats[:vif] = getVIF(x, intercept, p)
end
if length(intersect(needed_stats, Set([:diag_ks, :diag_ad, :diag_jb, :diag_white, :diag_bp]))) > 0
residuals = y - lr_predict(x, coefs, intercept)
if :diag_ks in needed_stats
diag_stats[:diag_ks] = present_kolmogorov_smirnov_test(residuals, Ξ±)
end
if :diag_ad in needed_stats
diag_stats[:diag_ad] = present_anderson_darling_test(residuals, Ξ±)
end
if :diag_jb in needed_stats
diag_stats[:diag_jb] = present_jarque_bera_test(residuals, Ξ±)
end
if :diag_white in needed_stats
if intercept && !isweighted
diag_stats[:diag_white] = present_white_test(x, residuals, Ξ±)
else
println("White test diagnostic for heteroscedasticity was requested but it requires a non-weighted model with intercept")
end
end
if :diag_bp in needed_stats
if intercept && !isweighted
diag_stats[:diag_bp] = present_breusch_pagan_test(x, residuals, Ξ±)
else
println("Breusch-Pagan test diagnostic for heteroscedasticity was requested but it requires a non weighted model with intercept")
end
end
end
needed_white, needed_hac = get_needed_robust_cov_stats(cov)
# robust estimators stats
white_types = Vector{Symbol}()
white_stds = Vector{Vector}()
white_t_vals = Vector{Vector}()
white_p_vals = Vector{Vector}()
white_ci_up = Vector{Vector}()
white_ci_low = Vector{Vector}()
hac_types = Vector{Symbol}()
hac_stds = Vector{Vector}()
hac_t_vals = Vector{Vector}()
hac_p_vals = Vector{Vector}()
hac_ci_up = Vector{Vector}()
hac_ci_low = Vector{Vector}()
# statistics requiring predictions (robust estimator and PRESS)
if length(needed_white) > 0 || length(needed_hac) > 0 || :press in needed_stats
predict_results = predict_internal(copieddf, f, updatedformula, isweighted, weights, xytxy, coefs, intercept,
length(needed_white) > 0, length(needed_hac) > 0, mse, scalar_stats[:t_statistic], p, n;
Ξ±= Ξ±, req_stats=[:residuals, :press], dropmissingvalues = false)
residuals = predict_results.residuals
presses = predict_results.press
scalar_stats[:press] = sum(presses.^2)
if length(needed_white) > 0
for t in needed_white
if t in white_types
continue
end
cur_type, cur_std = heteroscedasticity(t, x, y, residuals, n, p, xytxy')
push!(white_types, cur_type)
push!(white_stds, cur_std)
if !isnothing(get(vector_stats, :t_values, nothing))
cur_t_vals = coefs ./ cur_std
push!(white_t_vals, cur_t_vals)
else
white_t_vals = nothing
end
if !isnothing(get(vector_stats, :p_values, nothing))
cur_p_vals = ccdf.(Ref(FDist(1., (n - p))), abs2.(cur_t_vals))
push!(white_p_vals, cur_p_vals)
else
white_p_vals = nothing
end
if !isnothing(get(vector_stats, :ci, nothing))
cur_ci = cur_std * scalar_stats[:t_statistic]
cur_ci_up = coefs .+ cur_ci
cur_ci_low = coefs .- cur_ci
push!(white_ci_up, cur_ci_up)
push!(white_ci_low, cur_ci_low)
else
white_ci_up = nothing
white_ci_low = nothing
end
end
end
if length(needed_hac) > 0
for t in needed_hac
if t in hac_types
continue
end
cur_type, cur_std = HAC(t, x, y, residuals, n, p)
push!(hac_types, cur_type)
push!(hac_stds, cur_std)
if !isnothing(get(vector_stats, :t_values, nothing))
cur_t_vals = coefs ./ cur_std
push!(hac_t_vals, cur_t_vals)
else
hac_t_vals = nothing
end
if !isnothing(get(vector_stats, :p_values, nothing))
cur_p_vals = ccdf.(Ref(FDist(1., (n - p))), abs2.(cur_t_vals))
push!(hac_p_vals, cur_p_vals)
else
hac_p_vals = nothing
end
if !isnothing(get(vector_stats, :ci, nothing))
cur_ci = cur_std * scalar_stats[:t_statistic]
cur_ci_up = coefs .+ cur_ci
cur_ci_low = coefs .- cur_ci
push!(hac_ci_up, cur_ci_up)
push!(hac_ci_low, cur_ci_low)
else
hac_ci_up = nothing
hac_ci_low = nothing
end
end
end
end
sres = linRegRes(xytxy', coefs,
white_types, hac_types,
get(vector_stats, :stderror, nothing), white_stds, hac_stds,
get(vector_stats, :t_values, nothing), white_t_vals, hac_t_vals,
p, mse, intercept, get(scalar_stats, :r2, nothing),
get(scalar_stats, :adjr2, nothing), get(scalar_stats, :rmse, nothing), get(scalar_stats, :aic, nothing), get(scalar_stats, :sigma, nothing),
get(vector_stats, :p_values, nothing), white_p_vals, hac_p_vals,
haskey(vector_stats, :ci) ? coefs .+ vector_stats[:ci] : nothing,
haskey(vector_stats, :ci) ? coefs .- vector_stats[:ci] : nothing,
white_ci_up, white_ci_low,
hac_ci_up, hac_ci_low,
n, get(scalar_stats, :t_statistic, nothing), get(vector_stats, :vif, nothing),
get(vector_stats, :t1ss, nothing), get(vector_stats, :t2ss, nothing),
get(vector_stats, :pcorr1, nothing), get(vector_stats, :pcorr2, nothing),
get(vector_stats, :scorr1, nothing), get(vector_stats, :scorr2, nothing),
f, dataschema, updatedformula, Ξ±,
get(diag_stats, :diag_ks, nothing), get(diag_stats, :diag_ad, nothing), get(diag_stats, :diag_jb, nothing),
get(diag_stats, :diag_white, nothing), get(diag_stats, :diag_bp, nothing),
isweighted, weights, get(scalar_stats, :press, nothing),
get(scalar_stats, :cond, nothing),
get(scalar_stats, :f_value, nothing), # F value
get(scalar_stats, :f_pvalue, nothing), # p_value of F Value
get(scalar_stats, :dof_model, nothing), # degree of freedom (model)
get(scalar_stats, :dof_error, nothing), # degree of freedome (error)
)
return sres
end
"""
function HAC(t::Symbol, x, y, residuals, n, p)
(Internal) Return the relevant HAC (heteroskedasticity and autocorrelation consistent) estimator.
In the current version only Newey-West is implemented.
"""
function HAC(t::Symbol, x, y, residuals, n, p)
inv_xtx = inv(x' * x)
xe = x .* residuals
return (t, sqrt.(diag(n * inv_xtx * newey_west(xe) * inv_xtx)))
end
"""
function heteroscedasticity(t::Symbol, x, y, residuals, n, p, xytxy)
(Internal) Compute the standard errors modified for the White's covariance estimator.
Currently support HC0, HC1, HC2 and HC3. When :white is passed, select HC3 when the number of observation is below 250 otherwise select HC0.
"""
function heteroscedasticity(t::Symbol, x, y, residuals, n, p, xytxy)
inv_xtx = inv(x' * x)
XX = @view(xytxy[1:end - 1, 1:end - 1])
xe = x .* residuals
if t == :white && n <= 250
t = :hc3
elseif t == :white && n > 250
t = :hc0
end
if t == :hc0
xetxe = xe' * xe
return (:hc0, real_sqrt.(diag(XX * xetxe * XX)))
elseif t == :hc1
scale = (n / (n - p))
xetxe = xe' * xe
return (:hc1, real_sqrt.(diag(XX * xetxe * XX .* scale)))
elseif t == :hc2
leverage = diag(x * inv(x'x) * x')
scale = @.( 1. / (1. - leverage))
xe = @.(xe .* real_sqrt(scale))
xetxe = xe' * xe
return (t, sqrt.(diag(XX * xetxe * XX)))
elseif t == :hc3
leverage = diag(x * inv(x'x) * x')
scale = @.( 1. / (1. - leverage)^2)
xe = @.(xe .* real_sqrt(scale))
xetxe = xe' * xe
return (t, sqrt.(diag(XX * xetxe * XX)))
else
throw(error("Unknown symbol ($(t)) used as the White's covariance estimator"))
end
end
"""
function predict_internal(df::AbstractDataFrame, modelformula, updatedformula, weighted, weights, extended_inverse,
coefs, intercept, needed_white, needed_hac, ΟΜΒ², t_statistic, p, n, oos=false;
Ξ±=0.05, req_stats=["none"], dropmissingvalues=true)
Internal, users should use `predict_in_sample` or `predict_out_of_sample`. This should be used only when the `struct linRegRes` is not constructed yet.
"""
function predict_internal(df::AbstractDataFrame, modelformula, updatedformula, weighted, weights, extended_inverse,
coefs, intercept, needed_white, needed_hac, ΟΜΒ², t_statistic, p, n, oos=false;
Ξ±=0.05, req_stats=["none"], dropmissingvalues=true)
copieddf = df
if oos
copieddf = df[: , Symbol.(keys(schema(modelformula.rhs, df).schema))]
else
copieddf = df[: , Symbol.(keys(schema(modelformula, df).schema))]
end
if dropmissingvalues == true
dropmissing!(copieddf)
end
if weighted
if !in(Symbol(weights), propertynames(df))
println(io, "Weights have been specified being the column $(weights) however such colum does not exist in the dataframe provided. Regression will be done without weights")
weights = nothing
else
copieddf[!, weights] = df[!, weights]
allowmissing!(copieddf, weights)
copieddf[!, weights][copieddf[!, weights] .<= 0] .= missing
dropmissing!(copieddf)
end
end
y = nothing
x = nothing
if oos
x = modelcols(updatedformula.rhs, copieddf)
else
y, x = modelcols(updatedformula, copieddf)
end
needed, present = get_prediction_stats(req_stats)
needed_stats = Dict{Symbol,Vector}()
for sym in needed
needed_stats[sym] = zeros(length(n))
end
if :leverage in needed
pinverse = @view(extended_inverse[1:end - 1, 1:end - 1])
if weighted
needed_stats[:leverage] = copieddf[!, weights] .* diag(x * pinverse * x')
else
needed_stats[:leverage] = diag(x * pinverse * x')
end
end
if :predicted in needed
needed_stats[:predicted] = lr_predict(x, coefs, intercept)
end
if :residuals in needed && oos == false
needed_stats[:residuals] = y .- needed_stats[:predicted]
end
if :stdp in needed
if isnothing(ΟΜΒ²)
throw(ArgumentError(":stdp requires that the ΟΜΒ² (:sigma) was previously calculated through the regression"))
end
warn_sigma(needed_white, needed_hac, :stdp)
if weighted
needed_stats[:stdp] = real_sqrt.(needed_stats[:leverage] .* ΟΜΒ² ./ copieddf[!, weights])
else
needed_stats[:stdp] = real_sqrt.(needed_stats[:leverage] .* ΟΜΒ²)
end
end
if :stdi in needed
if isnothing(ΟΜΒ²)
throw(ArgumentError(":stdi requires that the ΟΜΒ² (:sigma) was previously calculated through the regression"))
end
warn_sigma(needed_white, needed_hac, :stdi)
if weighted
needed_stats[:stdi] = real_sqrt.((1. .+ needed_stats[:leverage]) .* ΟΜΒ² ./ copieddf[!, weights])
else
needed_stats[:stdi] = real_sqrt.((1. .+ needed_stats[:leverage]) .* ΟΜΒ²)
end
end
if :stdr in needed
if isnothing(ΟΜΒ²)
throw(ArgumentError(":stdr requires that the ΟΜΒ² (:sigma) was previously calculated through the regression"))
end
warn_sigma(needed_white, needed_hac, :stdr)
if weighted
needed_stats[:stdr] = real_sqrt.((1. .- needed_stats[:leverage]) .* ΟΜΒ² ./ copieddf[!, weights] )
else
needed_stats[:stdr] = real_sqrt.((1. .- needed_stats[:leverage]) .* ΟΜΒ²)
end
end
if :student in needed && oos == false
warn_sigma(needed_white, needed_hac, :student)
needed_stats[:student] = needed_stats[:residuals] ./ needed_stats[:stdr]
end
if :rstudent in needed && oos == false
warn_sigma(needed_white, needed_hac, :rstudent)
needed_stats[:rstudent] = needed_stats[:student] .* real_sqrt.( (n .- p .- 1 ) ./ (n .- p .- needed_stats[:student].^2 ) )
end
if :lcli in needed
warn_sigma(needed_white, needed_hac, :lcli)
needed_stats[:lcli] = needed_stats[:predicted] .- (t_statistic .* needed_stats[:stdi])
end
if :ucli in needed
warn_sigma(needed_white, needed_hac, :ucli)
needed_stats[:ucli] = needed_stats[:predicted] .+ (t_statistic .* needed_stats[:stdi])
end
if :lclp in needed
warn_sigma(needed_white, needed_hac, :lclp)
needed_stats[:lclp] = needed_stats[:predicted] .- (t_statistic .* needed_stats[:stdp])
end
if :uclp in needed
warn_sigma(needed_white, needed_hac, :uclp)
needed_stats[:uclp] = needed_stats[:predicted] .+ (t_statistic .* needed_stats[:stdp])
end
if :press in needed && oos == false
needed_stats[:press] = needed_stats[:residuals] ./ (1. .- needed_stats[:leverage])
end
if :cooksd in needed && oos == false
warn_sigma(needed_white, needed_hac, :cooksd)
needed_stats[:cooksd] = needed_stats[:stdp].^2 ./ needed_stats[:stdr].^2 .* needed_stats[:student].^2 .* (1 / p)
end
for sym in present
copieddf[!, sym] = needed_stats[sym]
end
return copieddf
end
"""
function predict_in_sample(lr::linRegRes, df::AbstractDataFrame; Ξ±=0.05, req_stats=["none"], dropmissingvalues=true)
Using the estimated coefficients from the regression make predictions, and calculate related statistics.
"""
function predict_in_sample(lr::linRegRes, df::AbstractDataFrame; Ξ±=0.05, req_stats=["none"], dropmissingvalues=true)
predict_internal(df, lr.modelformula, lr.updformula, lr.weighted, lr.weights, lr.extended_inverse, lr.coefs, lr.intercept,
lr.white_types, lr.hac_types, lr.ΟΜΒ², lr.t_statistic, lr.p, lr.observations;
Ξ±=Ξ±, req_stats=req_stats, dropmissingvalues=dropmissingvalues)
end
"""
function predict_out_of_sample(lr::linRegRes, df::AbstractDataFrame; Ξ±=0.05, req_stats=["none"], dropmissingvalues=true)
Similar to `predict_in_sample` although it does not expect a response variable nor produce statistics requiring a response variable.
"""
function predict_out_of_sample(lr::linRegRes, df::AbstractDataFrame; Ξ±=0.05, req_stats=["none"], dropmissingvalues=true)
predict_internal(df, lr.modelformula, lr.updformula, lr.weighted, lr.weights, lr.extended_inverse, lr.coefs, lr.intercept,
lr.white_types, lr.hac_types, lr.ΟΜΒ², lr.t_statistic, lr.p, lr.observations, true;
Ξ±=Ξ±, req_stats=req_stats, dropmissingvalues=dropmissingvalues)
end
end # end of module definition
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 1252 | """
function kfold(f, df, k, r = 1, shuffle=true; kwargs...)
Provide a simple `k` fold(s) cross-validation, repeated `r` time(s).
`kwargs` arguments are passed to the `regress` function call.
This feature overlap in part with the PRESS statistics.
"""
function kfold(f, df, k, r = 1, shuffle=true; kwargs...)
totalrows = nrow(df)
gindexes = Vector(undef, k)
resvec = Vector(undef, k*r)
sdf = @view df[!, :]
for cr in 1:r
if shuffle
sdf = @view df[shuffle(axes(df, 1)), :]
end
for ck in 1:k
gindexes[ck] = ck:k:totalrows
end
for ck in 1:k
training_range = reduce(vcat, map(x->gindexes[x], filter(x->x!=ck, 1:k)))
training = @view sdf[training_range, :]
testing = @view sdf[gindexes[ck], :]
lr = regress(f, training; kwargs...)
cres = predict_in_sample(lr, testing, req_stats=[:residuals])
t_mse= mean(cres.residuals .^2)
resvec[ (k * (cr -1)) + ck] = (R2 = lr.R2, ADJR2 = lr.ADJR2, TRAIN_MSE = lr.MSE,
TRAIN_RMSE = lr.RMSE,
TEST_MSE= t_mse , TEST_RMSE= βt_mse )
end
end
return DataFrame(resvec)
end
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 1039 | # Newey-West covariance estimator
# from https://github.com/mcreel/Econometrics/blob/508aee681ca42ff1f361fd48cd64de6565ece221/src/NP/NeweyWest.jl
# under MIT licence https://github.com/mcreel/Econometrics/blob/508aee681ca42ff1f361fd48cd64de6565ece221/LICENSE
# and adapted
"""
function newey_west(Z,nlags=0)
Returns the Newey-West estimator of the asymptotic variance matrix
INPUTS: Z, a nxk matrix with rows the vector zt'
nlags, the number of lags
OUTPUTS: omegahat, the Newey-West estimator of the covariance matrix
"""
function newey_west(Z,nlags=0)
n,k = size(Z)
# de-mean the variables
Z = Z .- mean(Z,dims=1)
omegahat = Z'*Z/n # sample variance
# automatic lags?
if nlags == 0
nlags = max(1, round(Int, n^0.25))
end
# sample autocovariances
for i = 1:nlags
Zlag = @view(Z[1:n-i,:])
ZZ = @view(Z[i+1:n,:])
gamma = (ZZ'*Zlag)/n
weight = 1.0 - (i/(nlags+1.0))
omegahat += weight*(gamma + gamma')
end
return omegahat
end | LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 9335 | # # Ridge regression adapted from the SAS fomulas
# # see https://blogs.sas.com/content/iml/2013/03/20/compute-ridge-regression.html
"""
function ridge(f::StatsModels.FormulaTerm, df::DataFrames.AbstractDataFrame, k::Float64 ;
weights::Union{Nothing,String}=nothing, remove_missing=false, contrasts=nothing)
Ridge regression, expects a k parameter (also known as k).
When weights are provided, result in a weighted ridge regression.
"""
function ridge(f::StatsModels.FormulaTerm, df::DataFrames.AbstractDataFrame, k::Float64 ;
weights::Union{Nothing,String}=nothing, remove_missing=false, contrasts=nothing)
X, y, n, p, intercept, f, copieddf, updatedformula, isweighted, dataschema =
design_matrix!(f, df, weights=weights, remove_missing=remove_missing, contrasts=contrasts, ridge=true)
cweights = nothing
if !isnothing(weights)
cweights = copieddf[!, weights]
end
coefs, vifs = iridge(X, y, intercept, k, cweights)
mse, rmse, r2, adjr2 = iridge_stats(X, y, coefs, intercept, n, p, cweights)
res_ridge_reg = ridgeRegRes(
k, p, n, intercept, coefs,
vifs, mse, rmse, r2, adjr2, f, updatedformula, dataschema, isweighted, weights)
return res_ridge_reg
end
"""
function ridge(f::StatsModels.FormulaTerm, df::DataFrames.AbstractDataFrame, ks::AbstractRange ;
weights::Union{Nothing,String}=nothing, remove_missing=false, contrasts=nothing, traceplots=false)
Ridge regression, expects a range of k parameter (also known as k).
When weights are provided, result in a weighted ridge regression.
When traceplots are requested, also return a dictionnary of trace plots.
"""
function ridge(f::StatsModels.FormulaTerm, df::DataFrames.AbstractDataFrame, ks::AbstractRange ;
weights::Union{Nothing,String}=nothing, remove_missing=false, contrasts=nothing,
traceplots = false )
X, y, n, p, intercept, f, copieddf, updatedformula, isweighted, dataschema =
design_matrix!(f, df, weights=weights, remove_missing=remove_missing, contrasts=contrasts, ridge=true)
vcoefs, vvifs = iridge(X, y, intercept, ks)
cweights = nothing
if !isnothing(weights)
cweights = copieddf[!, weights]
end
vcoefs, vvifs = iridge(X, y, intercept, ks, cweights)
vmse = Vector{Float64}(undef, length(ks))
vrmse = Vector{Float64}(undef, length(ks))
vr2 = Vector{Float64}(undef, length(ks))
vadjr2 = Vector{Float64}(undef, length(ks))
for (i, x) in enumerate(ks)
vmse[i], vrmse[i], vr2[i], vadjr2[i] = iridge_stats(X, y, vcoefs[i], intercept, n, p, cweights)
end
coefs_names = encapsulate_string(string.(StatsBase.coefnames(updatedformula.rhs)))
vifs_names = "vif " .* coefs_names
cv = [ks vmse vrmse vr2 vadjr2 transpose(hcat(vcoefs...)) transpose(hcat(vvifs...)) ]
all_names = ["k", "MSE", "RMSE", "R2", "ADJR2", coefs_names..., vifs_names... ]
df = DataFrame(cv, all_names)
if traceplots == false
return df
else
return df, ridge_traceplots(df)
end
end
"""
function prepare_ridge(X_orig, y_orig, intercept, weights::Union{Nothing,Vector{Float64}}=nothing)
(internal) Prepare the design matrix for ridge regression by centering the data (potentially weighted).
"""
function prepare_ridge(X_orig, y_orig, intercept, weights::Union{Nothing,Vector{Float64}}=nothing)
X = deepcopy(X_orig)
y = deepcopy(y_orig)
# removes the intercept (assumed to be the first column)
if intercept
X = X[:, deleteat!(collect(axes(X, 2)), 1)]
end
Xmeans = nothing
ymean = nothing
if !isnothing(weights)
Xmeans = mean(X, aweights(weights), dims=1)
ymean = mean(y, aweights(weights))
else
# get the means the Xs and ys
Xmeans = mean(X, dims=1)
ymean = mean(y)
end
# center the X and y
for i in 1:size(X, 2)
X[:, i] .-= Xmeans[i]
end
y .-= ymean
# if needed apply weights to the centered X and y
if !isnothing(weights)
X = X .* sqrt.(weights)
y = y .* sqrt.(weights)
end
XTX = X'X
D = Diagonal(diag(XTX))
Z = X / sqrt(D)
ZTZ = Z'Z
return XTX, D, Z, ZTZ, ymean, Xmeans, y, X
end
"""
function iridge(X_orig, y_orig, intercept, k::Float64, weights::Union{Nothing,Vector{Float64}}=nothing)
XTX, D, Z, ZTZ, ymean, Xmeans, y, X = prepare_ridge(X_orig, y_orig, intercept, weights)
(internal) compute the coefficient(s) and the VIF of a ridge regression given a scalar k.
"""
function iridge(X_orig, y_orig, intercept, k::Float64, weights::Union{Nothing,Vector{Float64}}=nothing)
XTX, D, Z, ZTZ, ymean, Xmeans, y, X = prepare_ridge(X_orig, y_orig, intercept, weights)
invZTZ = pinv(ZTZ + k * I)
coefs = invZTZ * (Z' * y) ./ (sqrt.(diag(XTX)))
vifs = diag(invZTZ * ZTZ * invZTZ)
if (intercept)
# get intercept back
interceptvalue = ymean - sum(vec(Xmeans) .* coefs)
coefs = vec([interceptvalue coefs...])
vifs = vec([0. vifs...])
end
return coefs, vifs
end
"""
function iridge(X_orig, y_orig, intercept, ks::AbstractRange, weights::Union{Nothing,Vector{Float64}}=nothing)
XTX, D, Z, ZTZ, ymean, Xmeans, y, X = prepare_ridge(X_orig, y_orig, intercept, weights)
(internal) compute the coefficient(s) and the VIF for each ridge regression with a range of k.
"""
function iridge(X_orig, y_orig, intercept, ks::AbstractRange, weights::Union{Nothing,Vector{Float64}}=nothing)
XTX, D, Z, ZTZ, ymean, Xmeans, y, X = prepare_ridge(X_orig, y_orig, intercept, weights)
vcoefs = Vector{Vector{}}(undef, length(ks))
vvifs = Vector{Vector{}}(undef, length(ks))
for (i, k) in enumerate(ks)
invZTZ = pinv(ZTZ + k * I)
vcoefs[i] = invZTZ * (Z' * y) ./ (sqrt.(diag(XTX)))
vvifs[i] = diag(invZTZ * ZTZ * invZTZ)
if (intercept)
# get intercept back
interceptvalue = ymean - sum(vec(Xmeans) .* vcoefs[i])
vcoefs[i] = vec([interceptvalue vcoefs[i]...])
vvifs[i] = vec([0. vvifs[i]...])
end
end
return vcoefs, vvifs
end
"""
function iridge_stats(X, y, coefs, intercept, n, p, weights::Union{Nothing,Vector{Float64}}=nothing)
(internal) compute the limited stats from a ridge regression.
"""
function iridge_stats(X, y, coefs, intercept, n, p, weights::Union{Nothing,Vector{Float64}}=nothing)
yΜ = lr_predict(X, coefs, intercept)
residuals = y .- yΜ
sse = nothing
if isnothing(weights)
sse = sum(residuals.^2)
else
sse = sum(residuals.^2, aweights(weights))
end
mse = sse / (n - p)
rmse = real_sqrt(mse)
sst = nothing
if isnothing(weights)
sst = getSST(y, intercept)
else
sst = getSST(y, intercept, weights, true)
end
r2 = 1. - (sse / sst)
adjr2 = 1. - ((n - convert(Int64, intercept)) * (1. - r2)) / (n - p)
return mse, rmse, r2, adjr2
end
"""
Store the result of a single ridge (potentially weighted) regression
"""
struct ridgeRegRes
k::Float64
p::Float64
observations
intercept::Bool
coefs::Vector
VIF::Vector
MSE::Float64
RMSE::Float64
R2::Float64
ADJR2::Float64
modelformula
updatedformula
dataschema
weighted::Bool
weights::Union{Nothing,String}
end
"""
function Base.show(io::IO, rr::ridgeRegRes)
Display information about the fitted ridge regression model
"""
function Base.show(io::IO, rr::ridgeRegRes)
if rr.weighted
println(io, "Weighted Ridge regression")
else
println(io, "Ridge Regression")
end
println(io, "Constant k:\t", rr.k)
println(io, "Model definition:\t", rr.modelformula)
println(io, "Used observations:\t", rr.observations)
println(io, "Model statistics:")
@printf(io, " RΒ²: %g\t\t\tAdjusted RΒ²: %g\n", rr.R2, rr.ADJR2)
@printf(io, " MSE: %g\t\t\tRMSE: %g\n", rr.MSE, rr.RMSE)
helper_print_table(io, "Coefficients statistics:",
[rr.coefs, rr.VIF], ["Coefs", "VIF"], rr.updatedformula)
end
"""
function predict_in_sample(rr::ridgeRegRes, df::AbstractDataFrame; dropmissingvalues=true)
Using the estimated coefficients from the regression make predictions, and calculate related statistics.
"""
function predict_in_sample(rr::ridgeRegRes, df::AbstractDataFrame; dropmissingvalues=true)
predict_internal(df, rr.modelformula, rr.updatedformula, rr.weighted, rr.weights, nothing, rr.coefs, rr.intercept,
nothing, nothing, nothing, nothing, rr.p, rr.observations, false;
Ξ±=nothing, req_stats=[:predicted, :residuals], dropmissingvalues=dropmissingvalues)
end
"""
function predict_out_of_sample(rr::ridgeRegRes, df::AbstractDataFrame; dropmissingvalues=true)
Similar to `predict_in_sample` although it does not expect a response variable nor produce statistics requiring a response variable.
"""
function predict_out_of_sample(rr::ridgeRegRes, df::AbstractDataFrame; dropmissingvalues=true)
predict_internal(df, rr.modelformula, rr.updatedformula, rr.weighted, rr.weights, nothing, rr.coefs, rr.intercept,
nothing, nothing, nothing, nothing, rr.p, rr.observations, true;
Ξ±=nothing, req_stats=[:predicted], dropmissingvalues=dropmissingvalues)
end
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 4100 | using LinearAlgebra
import LinearAlgebra:checksquare
"""
function sweep_linreg(x, y)
Convenience function when the design matrix (the xs and ys) are known. And when only the coefficients are needed.
Uses the normal equations and the sweep operator.
similar to the x \\ y operator.
"""
function sweep_linreg(x, y)
nn, p = size(x)
if ndims(y) == 2
n, pp = size(y)
else
pp = 1
end
back = pp - 1
xy = [x y]
xytxy = xy' * xy
sweep_op_full!(xytxy)
return xytxy'[1:p, end-back:end]
end
"""
function sweep_op_internal!(A::AbstractMatrix{Float64}, k::Int64)
Implement the naive sweep operator described by Goodnight, J. (1979). "A Tutorial on the SWEEP Operator." The American Statistician.
The algorithm is modified to work on column major to limit cache misses (although comment are referring to the original algorithm).
It gives the transpose of the results.
"""
function sweep_op_internal!(A::AbstractMatrix{Float64}, k::Int64)
p = checksquare(A)
if k >= p
throw(ArgumentError("Incorrect k value"))
end
@inbounds D = A[k, k] # step 1 D = Aββ
if D == zero(eltype(A))
throw(ArgumentError("sweep_op_internal!: the element $k,$k of the matrix is zero. Is the Design Matrix symmetric positive definite?"))
end
@inbounds for i in 1:p
A[i, k] = A[i, k] / D # step 2 divide row k by D
end
@inbounds for i in 1:(k - 1) # step 3: for every other row i != k
B = A[k, i] # let B = Aα΅’β
for j in 1:p
A[j, i] = A[j, i] - B * A[j, k] # Subtract B * row k from row i
end
A[k, i] = - B / D # set Aα΅’β = -B/D
end
@inbounds for i in k + 1:p # step 3: for every other row i != k
B = A[k, i] # let B = Aα΅’β
for j in 1:p
A[j, i] = A[j, i] - B * A[j, k] # Subtract B * row k from row i
end
A[k, i] = - B / D # set Aα΅’β = -B/D
end
@inbounds A[k,k] = 1 / D # step 4 Set Aββ = 1/D
return nothing
end
# function sweep_op!(A::AbstractMatrix{Float64}, k::Int64)
# sweep_op_internal!(A, k)
# end
# function sweep_op!(A::AbstractMatrix{Float64}, ks::AbstractVector{Int64})
# for k in ks
# sweep_op_internal!(A, k)
# end
# return A
# end
"""
function sweep_op_full!(A::AbstractMatrix{Float64})
(internal) Get SSE, error sum of squares, for the full model.
"""
function sweep_op_full!(A::AbstractMatrix{Float64})
n , p = size(A)
for k in 1:p-1
sweep_op_internal!(A, k)
end
return A[p,p]
end
"""
function sweep_op_fullT1SS!(A::AbstractMatrix{Float64})
(internal) Get SSE, error sum of squares for the full model.
Also give Type I SS for all independent variables.
"""
function sweep_op_fullT1SS!(A::AbstractMatrix{Float64})
n , p = size(A)
TypeISS = Vector{Float64}(undef, p-1)
for k in 1:p-1
preSSE = A[p,p]
sweep_op_internal!(A, k)
TypeISS[k] = preSSE - A[p,p]
end
return A[p,p], TypeISS
end
# """
# function sweep_op_fullT1SS!(A::AbstractMatrix{Float64})
# (internal) Get SSE, error sum of squares for all (p-1) models.
# Also give Type I SS for all independent variables.
# """
# function sweep_op_allT1SS!(A::AbstractMatrix{Float64})
# n , p = size(A)
# TypeISS = Vector{Float64}(undef, p-1)
# SSEs = Vector{Float64}(undef, p-1)
# for k in 1:p-1
# preSSE = A[p,p]
# sweep_op_internal!(A, k)
# SSEs[k] = A[p, p]
# TypeISS[k] = preSSE - A[p,p]
# end
# return SSEs, TypeISS
# end
"""
function get_TypeIISS(extended_inverse::AbstractMatrix{Float64})
(internal) Get Type II SS for all independent variables given an extended inverse (sweep operator already applied)
"""
function get_TypeIISS(extended_inverse::AbstractMatrix{Float64})
n, p = size(extended_inverse')
TypeIISS = Vector{Float64}(undef, p-1)
for k in 1:p-1
TypeIISS[k] = extended_inverse'[k, end]^2 / extended_inverse'[k, k]
end
return TypeIISS
end | LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 1656 |
# The content of this file is copied from https://github.com/JuliaStats/StatsModels.jl/blob/master/test/extension.jl (in December 2021)
# The license hereafter is associated with this content
# Copyright (c) 2016: Dave Kleinschmidt.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
poly(x, n) = x^n
abstract type PolyModel end
struct PolyTerm <: AbstractTerm
term::Symbol
deg::Int
end
PolyTerm(t::Term, deg::ConstantTerm) = PolyTerm(t.sym, deg.n)
StatsModels.apply_schema(t::FunctionTerm{typeof(poly)}, sch, ::Type{<:PolyModel}) =
PolyTerm(t.args_parsed...)
StatsModels.modelcols(p::PolyTerm, d::NamedTuple) =
reduce(hcat, [d[p.term].^n for n in 1:p.deg])
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 17066 | get_all_plots_types() = Set([:fit, :residuals, :normal_checks, :cooksd, :leverage, :homoscedasticity])
get_needed_plots(s::String) = return get_needed_plots([s])
get_needed_plots(s::Symbol) = return get_needed_plots([s])
get_needed_plots(s::Vector{String}) = return get_needed_plots(Symbol.(lowercase.(s)))
get_needed_plots(::Vector{Any}) = return get_needed_plots([:none])
get_needed_plots(::Set{Any}) = return get_needed_plots([:none])
get_needed_plots(s::Set{Symbol}) = return get_needed_plots(collect(s))
function get_needed_plots(p::Vector{Symbol})
needed_plots = Set{Symbol}()
length(p) == 0 && return needed_plots
:none in p && return needed_plots
if :all in p
return get_all_plots_types()
end
:fit in p && push!(needed_plots, :fit)
:residuals in p && push!(needed_plots, :residuals)
:normal_checks in p && push!(needed_plots, :normal_checks)
:cooksd in p && push!(needed_plots, :cooksd)
:leverage in p && push!(needed_plots, :leverage)
:homoscedasticity in p && push!(needed_plots, :homoscedasticity)
return needed_plots
end
"""
function get_robust_cov_stats()
Return all robust covariance estimators.
"""
get_all_robust_cov_stats() = Set([:white, :nw, :hc0, :hc1, :hc2, :hc3])
get_needed_robust_cov_stats(s::String) = return get_needed_robust_cov_stats([s])
get_needed_robust_cov_stats(s::Symbol) = return get_needed_robust_cov_stats([s])
get_needed_robust_cov_stats(s::Vector{String}) = return get_needed_robust_cov_stats(Symbol.(lowercase.(s)))
get_needed_robust_cov_stats(::Vector{Any}) = return get_needed_robust_cov_stats([:none])
get_needed_robust_cov_stats(::Set{Any}) = return get_needed_robust_cov_stats(Set([:none]))
get_needed_robust_cov_stats(s::Set{Symbol}) = return get_needed_robust_cov_stats(collect(s))
function get_needed_robust_cov_stats(s::Vector{Symbol})
needed_white = Vector{Symbol}()
needed_hac = Vector{Symbol}()
length(s) == 0 && return (needed_white, needed_hac)
:none in s && return (needed_white, needed_hac)
if :all in s
s = collect(get_all_robust_cov_stats())
end
:white in s && push!(needed_white, :white)
:hc0 in s && push!(needed_white, :hc0)
:hc1 in s && push!(needed_white, :hc1)
:hc2 in s && push!(needed_white, :hc2)
:hc3 in s && push!(needed_white, :hc3)
:nw in s && push!(needed_hac, :nw)
return (needed_white, needed_hac)
end
"""
function get_all_model_stats()
Returns all statistics availble for the fitted model.
"""
get_all_model_stats() = Set([:coefs, :sse, :mse, :sst, :rmse, :aic, :sigma, :t_statistic, :vif, :r2, :adjr2, :stderror, :t_values, :p_values, :ci,
:diag_normality, :diag_ks, :diag_ad, :diag_jb, :diag_heteroskedasticity, :diag_white, :diag_bp, :press,
:t1ss, :t2ss, :pcorr1, :pcorr2 , :scorr1, :scorr2, :cond, :f_stats ])
get_needed_model_stats(req_stats::String) = return get_needed_model_stats([req_stats])
get_needed_model_stats(req_stats::Symbol) = return get_needed_model_stats(Set([req_stats]))
get_needed_model_stats(req_stats::Vector{String}) = return get_needed_model_stats(Symbol.(lowercase.(req_stats)))
get_needed_model_stats(::Vector{Any}) = return get_needed_model_stats([:none])
get_needed_model_stats(::Set{Any}) = return get_needed_model_stats(Set([:none]))
get_needed_model_stats(req_stats::Set{Symbol}) = get_needed_model_stats(collect(req_stats))
"""
function get_needed_model_stats(req_stats::Vector{Symbol})
return the list of needed statistics given the list of statistics about the model the caller wants.
"""
function get_needed_model_stats(req_stats::Vector{Symbol})
needed = Set([:coefs, :sse, :mse])
default = Set([:coefs, :sse, :mse, :sst, :rmse, :sigma, :t_statistic, :r2, :adjr2, :stderror, :t_values, :p_values,
:ci, :f_stats])
full = get_all_model_stats()
unique!(req_stats)
length(req_stats) == 0 && return needed
:none in req_stats && return needed
:all in req_stats && return full
:default in req_stats && union!(needed, default)
:sst in req_stats && push!(needed, :sst)
:t1ss in req_stats && push!(needed, :t1ss)
:t2ss in req_stats && push!(needed, :t2ss)
:press in req_stats && push!(needed, :press)
:rmse in req_stats && push!(needed, :rmse)
:aic in req_stats && push!(needed, :aic)
:sigma in req_stats && push!(needed, :sigma)
:t_statistic in req_stats && push!(needed, :t_statistic)
:vif in req_stats && push!(needed, :vif)
:diag_ks in req_stats && push!(needed, :diag_ks)
:diag_ad in req_stats && push!(needed, :diag_ad)
:diag_jb in req_stats && push!(needed, :diag_jb)
:diag_white in req_stats && push!(needed, :diag_white)
:diag_bp in req_stats && push!(needed, :diag_bp)
:cond in req_stats && push!(needed, :cond)
# :f_stats in req_stats && push!(needed, :sst)
# :f_stats in req_stats && push!(needed, :f_stats)
if :f_stats in req_stats
push!(needed, :sst)
push!(needed, :f_stats)
end
if :diag_normality in req_stats
push!(needed, :diag_ks)
push!(needed, :diag_ad)
push!(needed, :diag_jb)
end
if :diag_heteroskedasticity in req_stats
push!(needed, :diag_white)
push!(needed, :diag_bp)
end
if :pcorr1 in req_stats
push!(needed, :t1ss)
push!(needed, :pcorr1)
end
if :pcorr2 in req_stats
push!(needed, :t2ss)
push!(needed, :pcorr2)
end
if :scorr1 in req_stats
push!(needed, :sst)
push!(needed, :t1ss)
push!(needed, :scorr1)
end
if :scorr2 in req_stats
push!(needed, :sst)
push!(needed, :t2ss)
push!(needed, :scorr2)
end
if :r2 in req_stats
push!(needed, :sst)
push!(needed, :r2)
end
if :adjr2 in req_stats
push!(needed, :sst)
push!(needed, :r2)
push!(needed, :adjr2)
end
if :stderror in req_stats
push!(needed, :sigma)
push!(needed, :stderror)
end
if :t_values in req_stats
push!(needed, :sigma)
push!(needed, :stderror)
push!(needed, :t_values)
end
if :p_values in req_stats
push!(needed, :sigma)
push!(needed, :stderror)
push!(needed, :t_values)
push!(needed, :p_values)
end
if :ci in req_stats
push!(needed, :sigma)
push!(needed, :stderror)
push!(needed, :t_statistic)
push!(needed, :ci)
end
return needed
end
"""
function get_all_prediction_stats()
get all the available statistics about the values predicted by a fitted model
"""
get_all_prediction_stats() = Set([:predicted, :residuals, :leverage, :stdp, :stdi, :stdr, :student, :rstudent, :lcli, :ucli, :lclp, :uclp, :press, :cooksd])
get_prediction_stats(req_stats::String) = return get_prediction_stats([req_stats])
get_prediction_stats(req_stats::Vector{String}) = return get_prediction_stats(Symbol.(lowercase.(req_stats)))
get_prediction_stats(::Vector{Any}) = return get_prediction_stats([:none])
get_prediction_stats(::Set{Any}) = return get_prediction_stats(Set([:none]))
get_prediction_stats(req_stats::Set{Symbol}) = return get_prediction_stats(collect(req_stats))
"""
function get_prediction_stats(req_stats::Vector{Symbol})
return the list of needed statistics and the statistics that need to be presentd given the list of statistics about the predictions the caller wants.
"""
function get_prediction_stats(req_stats::Vector{Symbol})
needed = Set([:predicted])
full = get_all_prediction_stats()
present = Set([:predicted])
unique!(req_stats)
length(req_stats) == 0 && return needed, present
:none in req_stats && return needed, present
:all in req_stats && return full, full
:leverage in req_stats && push!(present, :leverage)
:residuals in req_stats && push!(present, :residuals)
if :stdp in req_stats
push!(needed, :leverage)
push!(present, :stdp)
end
if :stdi in req_stats
push!(needed, :leverage)
push!(present, :stdi)
end
if :stdr in req_stats
push!(needed, :leverage)
push!(present, :stdr)
end
if :student in req_stats
push!(needed, :leverage)
push!(needed, :residuals)
push!(needed, :stdr)
push!(present, :student)
end
if :rstudent in req_stats
push!(needed, :leverage)
push!(needed, :residuals)
push!(needed, :stdr)
push!(needed, :student)
push!(present, :rstudent)
end
if :lcli in req_stats
push!(needed, :leverage)
push!(needed, :stdi)
push!(present, :lcli)
end
if :ucli in req_stats
push!(needed, :leverage)
push!(needed, :stdi)
push!(present, :ucli)
end
if :lclp in req_stats
push!(needed, :leverage)
push!(needed, :stdp)
push!(present, :lclp)
end
if :uclp in req_stats
push!(needed, :leverage)
push!(needed, :stdp)
push!(present, :uclp)
end
if :press in req_stats
push!(needed, :residuals)
push!(needed, :leverage)
push!(present, :press)
end
if :cooksd in req_stats
push!(needed, :leverage)
push!(needed, :stdp)
push!(needed, :stdr)
push!(needed, :residuals)
push!(needed, :student)
push!(present, :cooksd)
end
union!(needed, present)
return needed, present
end
"""
function encapsulate_string(s)
(internal) Only used to encapsulate a string into an array.
used exclusively to handle the function ```StatsBase.coefnames``` which sometime return an array or when there is only one element the element alone.
"""
function encapsulate_string(s::String)
return [s]
end
"""
function encapsulate_string(v)
(internal) Only used to encapsulate a string into an array.
used exclusively to handle the function ```StatsBase.coefnames``` which sometime return an array or when there is only one element the element alone.
"""
function encapsulate_string(v::Vector{String})
return v
end
import Printf
"""
macro gprintf(fmt::String)
(internal) used to format with %g
Taken from message published by user o314 at https://discourse.julialang.org/t/printf-with-variable-format-string/3805/6
"""
macro gprintf(fmt::String)
:((io::IO, arg) -> Printf.@printf(io, $fmt, arg))
end
"""
function fmt_pad(s::String, value, pad=0)
(internal) helper to format and pad string for results display
"""
function fmt_pad(s::String, value, pad=0)
fmt = @gprintf("%g")
return rpad(s * sprint(fmt, value), pad)
end
using NamedArrays
"""
function my_namedarray_print([io::IO = stdout], n::NamedArray)
(internal) Print the NamedArray without the type annotation (on the first line).
"""
function my_namedarray_print(io::IO, n)
tmpio = IOBuffer()
show(tmpio, MIME"text/plain"(), n)
println(io, split(replace(String(take!(tmpio)), @raw_str("\"")=>@raw_str(" ")), "\n", limit=2)[2])
end
my_namedarray_print(n::NamedArray) = my_namedarray_print(stdout::IO, n)
"""
function helper_print_table(io, title, stats::Vector, stats_name::Vector, updformula)
(Internal) Convenience function to display a table of statistics to the user.
"""
function helper_print_table(io::IO, title, stats::Vector, stats_name::Vector, updformula)
println(io, "\n$title")
todelete = [i for (i, v) in enumerate(stats) if isnothing(v)]
deleteat!(stats, todelete)
deleteat!(stats_name, todelete)
m_all_stats = reduce(hcat, stats)
if m_all_stats isa Vector
m_all_stats = reshape(m_all_stats, length(m_all_stats), 1)
end
na = NamedArray(m_all_stats)
setnames!(na, encapsulate_string(string.(StatsBase.coefnames(updformula.rhs))), 1)
setnames!(na, encapsulate_string(string.(stats_name)), 2)
setdimnames!(na, ("Terms", "Stats"))
my_namedarray_print(io, na)
end
function present_breusch_pagan_test(X, residuals, Ξ±)
bpt = HypothesisTests.BreuschPaganTest(X, residuals)
pval = pvalue(bpt)
alpha_value= round((1 - Ξ±)*100, digits=3)
topresent = string("Breush-Pagan Test (homoskedasticity of residuals):\n T*RΒ² statistic: $(round(bpt.lm, sigdigits=6)) degrees of freedom: $(round(bpt.dof, digits=6)) p-value: $(round(pval, digits=6))\n")
if pval > Ξ±
topresent *= " with $(alpha_value)% confidence: fail to reject null hyposthesis.\n"
else
topresent *= " with $(alpha_value)% confidence: reject null hyposthesis.\n"
end
return topresent
end
function present_white_test(X, residuals, Ξ±)
bpt = HypothesisTests.WhiteTest(X, residuals)
pval = pvalue(bpt)
alpha_value= round((1 - Ξ±)*100, digits=3)
topresent = string("White Test (homoskedasticity of residuals):\n T*RΒ² statistic: $(round(bpt.lm, sigdigits=6)) degrees of freedom: $(round(bpt.dof, digits=6)) p-value: $(round(pval, digits=6))\n")
if pval > Ξ±
topresent *= " with $(alpha_value)% confidence: fail to reject null hyposthesis.\n"
else
topresent *= " with $(alpha_value)% confidence: reject null hyposthesis.\n"
end
return topresent
end
function present_kolmogorov_smirnov_test(residuals, Ξ±)
fitted_residuals = fit(Normal, residuals)
kst = HypothesisTests.ApproximateOneSampleKSTest(residuals, fitted_residuals)
pval = pvalue(kst)
KS_stat = sqrt(kst.n)*kst.Ξ΄
alpha_value= round((1 - Ξ±)*100, digits=3)
topresent = string("Kolmogorov-Smirnov test (Normality of residuals):\n KS statistic: $(round(KS_stat, sigdigits=6)) observations: $(kst.n) p-value: $(round(pval, digits=6))\n")
if pval > Ξ±
topresent *= " with $(alpha_value)% confidence: fail to reject null hyposthesis.\n"
else
topresent *= " with $(alpha_value)% confidence: reject null hyposthesis.\n"
end
end
function present_anderson_darling_test(residuals, Ξ±)
fitted_residuals = fit(Normal, residuals)
adt = HypothesisTests.OneSampleADTest(residuals, fitted_residuals)
pval = pvalue(adt)
alpha_value= round((1 - Ξ±)*100, digits=3)
topresent = string("AndersonβDarling test (Normality of residuals):\n AΒ² statistic: $(round(adt.AΒ², digits=6)) observations: $(adt.n) p-value: $(round(pval, digits=6))\n")
if pval > Ξ±
topresent *= " with $(alpha_value)% confidence: fail to reject null hyposthesis.\n"
else
topresent *= " with $(alpha_value)% confidence: reject null hyposthesis.\n"
end
end
function present_jarque_bera_test(residuals, Ξ±)
jbt = HypothesisTests.JarqueBeraTest(residuals)
pval = pvalue(jbt)
alpha_value= round((1 - Ξ±)*100, digits=3)
topresent = string("Jarque-Bera test (Normality of residuals):\n JB statistic: $(round(jbt.JB, digits=6)) observations: $(jbt.n) p-value: $(round(pval, digits=6))\n")
if pval > Ξ±
topresent *= " with $(alpha_value)% confidence: fail to reject null hyposthesis.\n"
else
topresent *= " with $(alpha_value)% confidence: reject null hyposthesis.\n"
end
end
function warn_sigma(lm, stat)
warn_sigma(lm.white_types, lm.hac_types , stat)
end
function warn_sigma(white_needed, hac_needed, stat)
if length(white_needed) > 0 || length(hac_needed) > 0
println(io, "The $(stat) statistic that relies on Sigma^2 has been requested. At least one robust covariance have been requested indicating that the assumptions needed for Sigma^2 may not be present.")
end
end
function real_sqrt(x)
return @. real(sqrt(complex(x, 0)))
end
isnotintercept(t::AbstractTerm) = t isa InterceptTerm ? false : true
isnotconstant(t::AbstractTerm) = t isa ConstantTerm ? false : true
iscontinuousterm(t::AbstractTerm) = t isa ContinuousTerm ? true : false
iscategorical(t::AbstractTerm) = t isa CategoricalTerm ? true : false
function check_cardinality(df::AbstractDataFrame, f, verbose=false)
cate_terms = [a.sym for a in filter(iscategorical, terms(f.rhs))]
if length(cate_terms) > 0
freqt = freqtable(df, cate_terms...)
if count(i -> (i == 0), freqt) > 0
println("At least one group of categories have no observation. Use frequency tables to identify which one(s).")
println(my_namedarray_print(freqt))
elseif verbose == true
println("No issue identified.")
end
end
end
"""
get_r_significance_code(v::Real)
(internal) to transform the p value into the code used in lm (R language)
reference text from R: "Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1"
used exclusively to handle the function ```StatsBase.coefnames``` which sometime return an array or when there is only one element the element alone.
"""
function get_r_significance_code(v::Real)
if v < 0.001
return "***"
elseif v < 0.01
return "**"
elseif v < 0.05
return "*"
elseif v < 0.1
return "."
end
return " "
end | LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 13204 | using StatsBase, LinearAlgebra , Distributions # for the density/histogram plot
function fitplot!(all_plots, results, lm, plot_args)
lhs = terms(lm.updformula.lhs)
rhs_noint = filter(isnotintercept, terms(lm.updformula.rhs))
length(rhs_noint) == 0 && return nothing
length(rhs_noint) > 1 && begin
println("LinearRegressionKit: Fit plot was requested but not appropriate for regression with multiple independent variables")
return nothing
end
length(lhs) == 0 && return nothing
actx = first(rhs_noint).sym
acty = first(lhs).sym
acttitle = "Fit Plot: " * string(actx) * " vs " * string(acty)
pred_interval = @vlplot(
mark = {:errorband, color = "lightgrey"},
y = { field = :ucli, type = :quantitative, title = acty} ,
y2 = { field = :lcli, type = :quantitative },
x = {actx, type = :quantitative})
if lm.weighted
pred_interval = @vlplot(
mark = {:errorbar, ticks=true, color = "dimgrey"},
y = { field = :ucli, type = :quantitative, title = acty} ,
y2 = { field = :lcli, type = :quantitative },
x = {actx, type = :quantitative})
end
fp = select(results, [actx, acty, :ucli, :lcli, :uclp, :lclp, :predicted]) |> @vlplot() +
pred_interval+
@vlplot(
mark = {:errorband, color = "darkgrey" },
y = { field = :uclp, type = :quantitative, title = acty},
y2 = { field = :lclp, type = :quantitative },
x = {actx, type = :quantitative} ) +
@vlplot(
mark = { :line, color = "darkorange" },
x = {actx, type = :quantitative},
y = {:predicted, type = :quantitative}) +
@vlplot(
:point,
x = { actx, type = :quantitative, axis = {grid = false}, scale = {zero = false}},
y = { acty, type = :quantitative, axis = {grid = false}, scale = {zero = false}},
title = acttitle, width = plot_args["plot_width"], height = plot_args["plot_width"]
)
all_plots["fit"] = fp
end
function simple_residuals_plot(results, dep_var=nothing, show_density=false, st_res=false; plot_width=400, loess_bandwidth::Union{Nothing,Float64}=0.99)
if isnothing(dep_var)
dep_var = :predicted
end
yaxis = :residuals
if st_res == true
yaxis = :student
end
loess_p = @vlplot()
if !isnothing(loess_bandwidth)
loess_p = @vlplot(
transform = [ { loess = yaxis, on = dep_var, bandwidth = loess_bandwidth } ],
mark = {:line, color = "firebrick"},
x = {dep_var, type = :quantitative},
y = {yaxis, type = :quantitative} )
end
title = "Residuals plot: $(string(dep_var))"
if st_res == true
title = "st " * title
end
sresults = select(results, [yaxis, dep_var])
p = sresults |>
@vlplot(title = title, width = plot_width, height = plot_width,
x = {type = :quantitative, axis = {grid = false}, scale = {zero = false, padding = 5}},
y = {type = :quantitative, axis = {grid = false}}) +
@vlplot(:point,
x = {dep_var, type = :quantitative},
y = {yaxis, type = :quantitative}) +
loess_p +
@vlplot(mark = {:rule, color = :darkgrey}, y = {type = :quantitative, datum = 0})
if show_density == false
return p
end
mp = sresults |> @vlplot(
width = 100, height = plot_width,
mark = {:area, orient = "horizontal"},
transform = [{density = yaxis, bandwidth = 0.5}],
x = {"density:q", title = nothing, axis = nothing},
y = {"value:q", title = nothing, axis = nothing } )
tp = @vlplot(bounds = :flush, spacing = 5, config = {view = {stroke = :transparent}}) + [p mp]
return tp
end
function residuals_plots!(all_plots, results, lm, plot_args)
rhs_noint = filter(isnotintercept, terms(lm.updformula.rhs))
plots = Vector{VegaLite.VLSpec}()
length(rhs_noint) == 0 && return nothing
plot_width = get(plot_args, "plot_width", 400)
loess_bw = get(plot_args, "loess_bw", 0.6)
density_requested = get(plot_args, "residuals_with_density", false)
# main residual plot
all_plots["residuals"] = simple_residuals_plot(results, plot_width=plot_width, loess_bandwidth=loess_bw)
all_plots["st residuals"] = simple_residuals_plot(results,nothing, false, true, plot_width=plot_width, loess_bandwidth=loess_bw)
# additional plot per dependent variable
for c_dependent_var in rhs_noint
if ! isa(c_dependent_var, ConstantTerm)
c_sym = c_dependent_var.sym
show_density = density_requested && iscontinuousterm(c_dependent_var)
all_plots[string("residuals ", string(c_sym))] = simple_residuals_plot(results, c_sym, show_density, plot_width=plot_width, loess_bandwidth=loess_bw)
all_plots[string("st residuals ", string(c_sym))] = simple_residuals_plot(results, c_sym, show_density, true, plot_width=plot_width, loess_bandwidth=loess_bw)
end
end
end
function qqplot(results, fitted_residuals, plot_width)
n = length(results.residuals)
grid = [1 / (n - 1):1 / (n - 1):1 - (1 / (n - 1));]
qu_theo = quantile.(fitted_residuals, grid)
qu_empi = quantile(results.residuals, grid)
qqdf = DataFrame(x=qu_theo, y=qu_empi)
qqline = [first(qu_theo), last(qu_theo)]
qqldf = DataFrame(x=qqline, y=qqline)
qqplot = qqdf |> @vlplot() + @vlplot(
title = "Residuals QQ-Plot",
width = plot_width, height = plot_width,
:point,
x = { :x, type = :quantitative, title = "Theoritical quantiles", axis = {grid = false}, scale = {zero = false, padding = 5} },
y = { :y, type = :quantitative, title = "Empirical quantiles", axis = {grid = false}, scale = {zero = false, padding = 5} }
) + @vlplot(
{:line, color = "darkgrey"},
data = qqldf, x = {:x, type = :quantitative}, y = {:y, type = :quantitative} )
return qqplot
end
function default_range(dist::Distribution, alpha=0.0001)
minval = isfinite(minimum(dist)) ? minimum(dist) : quantile(dist, alpha)
maxval = isfinite(maximum(dist)) ? maximum(dist) : quantile(dist, 1 - alpha)
minval, maxval
end
rice_rule(obs) = round(Int, 2. * obs^(1 / 3))
function histogram_density(results, fitted_residuals, plot_width)
# data for the density curve
frmin, frmax = default_range(fitted_residuals)
rangetest = (frmax - frmin) / plot_width
qpdf = [pdf(fitted_residuals, x) for x in frmin:rangetest:frmax]
xs = [x for x in frmin:rangetest:frmax]
tdf = DataFrame(x=xs, y=qpdf)
# data for the histogram
hhh = fit(Histogram, results.residuals)
nhh = normalize(hhh)
all_edges = collect(first(nhh.edges))
bin_starts = [x for x in all_edges[1:end - 1]]
bin_ends = [x for x in all_edges[2:end]]
counts = nhh.weights
hdf = DataFrame(bs=bin_starts, be=bin_ends, y=counts)
step_size = bin_starts[2] - bin_starts[1]
hdplot = hdf |> @vlplot(width = plot_width, height = plot_width, title = "Residuals: histogram and PDF") +
@vlplot(
:bar,
x = {:bs, type = :quantitative, title = "residuals", bin = {binned = true, step = step_size}, axis = {grid = false}},
x2 = {:be, type = :quantitative},
y = {:y, type = :quantitative, stack = "zero", axis = {grid = false} } ) +
@vlplot(
data = tdf,
{:line, color = "darkorange"},
x = {:x, type = :quantitative, scale = {zero = false}, axis = {grid = false}},
y = {:y, type = :quantitative, scale = {zero = false}, axis = {grid = false}} )
return hdplot
end
function normality_plots!(all_plots, results, lm, plot_args)
# plots = Vector{VegaLite.VLSpec}()
plot_width = get(plot_args, "plot_width", nothing)
fitted_residuals = fit(Normal, results.residuals)
all_plots["qq plot"] = qqplot(results, fitted_residuals, plot_width)
all_plots["histogram density"] = histogram_density(results, fitted_residuals, plot_width)
end
function cooksd_plot!(all_plots, results, lm, plot_args)
plot_width = get(plot_args, "plot_width", nothing)
plot_height = plot_width / 2
sdf = select(results, [:cooksd])
sdf.Observations = rownumber.(eachrow(sdf))
threshold_cooksd = 4 / lm.observations
p = sdf |>
@vlplot(title = "Cook's Distance", width = plot_width, height = plot_height) +
@vlplot(mark = {:rule, color = :darkgrey}, y = {type = :quantitative, datum = threshold_cooksd}) +
@vlplot(
mark = {:rule, color = :steelblue},
x = {:Observations, type = :quantitative, axis = {grid = false}},
y = {type = :quantitative, datum = 0}, y2 = {:cooksd, type = :quantitative} )
all_plots["cooksd"] = p
end
function scalelocation_plot!(all_plots, results, lm, plot_args)
plot_width = get(plot_args, "plot_width", nothing)
sdf = select(results, [:predicted, :student])
sdf.sqrtstudent = sqrt.(abs.(sdf.student))
p = sdf |>
@vlplot() +
@vlplot(
title = "Scale and location plot" ,
width = plot_width , height = plot_width,
:point,
x = {:predicted, type = :quantitative, scale = {zero = false}, axis = { grid = false} },
y = {:sqrtstudent, type = :quantitative, title = "βstudent",
scale = {zero = false}, axis = { grid = false} }) + @vlplot(
transform = [ { loess = :sqrtstudent, on = :predicted, bandwidth = 0.6 } ],
mark = {:line, color = "firebrick"},
x = {:predicted, type = :quantitative}, y = {:sqrtstudent, type = :quantitative} )
all_plots["scale location"] = p
end
function leverage_plot!(all_plots, results, lm, plot_args)
threshold_leverage = 2 * lm.p / lm.observations
plot_width = plot_args["plot_width"]
p = select(results, [:leverage, :rstudent]) |>
@vlplot(title = "Leverage vs Rstudent", width = plot_width, height = plot_width,
x = {axis = {grid = false}}, y = {axis = {grid = false}} ) +
@vlplot(:point,
x = {:leverage, type = :quantitative},
y = {:rstudent, type = :quantitative}) +
@vlplot(mark = {:rule, color = :darkgrey}, y = {datum = -2}) +
@vlplot(mark = {:rule, color = :darkgrey}, x = {datum = threshold_leverage}) +
@vlplot(mark = {:rule, color = :darkgrey}, y = {datum = 2})
all_plots["leverage"] = p
end
function escape_javascript!(vec)
for i in 1:length(vec)
vec[i] = replace(vec[i], "&"=>" ampersand ")
end
end
function ridge_traceplots(rdf::AbstractDataFrame; width=400)
nb_coefs = round(Int, (ncol(rdf) - 5) / 2)
dfstats = names(rdf)[2:5]
dfcoefs = nothing
dfvifs = nothing
all_names = names(rdf)
escape_javascript!(all_names)
if all_names[6] == "(Intercept)"
dfcoefs = all_names[7:6 - 1 + nb_coefs]
push!(dfcoefs, "(Intercept)")
dfvifs = all_names[end - nb_coefs + 2:end]
else
dfcoefs = all_names[6:6 - 1 + nb_coefs]
dfvifs = all_names[end - nb_coefs + 2:end]
end
traceplot = rdf |> @vlplot(title = "Ridge trace plot", width = width, height = width,
transform = [
{fold = dfcoefs},
],
mark = :line,
x = {field = :k , type = :quantitative, scale = {zero = false}, axis = { grid = false}} ,
y = {field = :value, type = :quantitative, scale = {zero = false}, axis = { grid = false}},
color = {field = :key, type = :nominal},
)
viftraceplot = rdf |> @vlplot(title = "VIF trace plot", width = width, height = width,
transform = [
{fold = dfvifs},
],
mark = :line,
x = {field = :k, type = :quantitative, scale = {zero = false}, axis = { grid = false}} ,
y = {field = :value, type = :quantitative, scale = {type = :linear, zero = false}, axis = { grid = false} },
color = {field = :key, type = :nominal},
)
viftraceplotlog = rdf |> @vlplot(title = "VIF trace plot", width = width, height = width,
transform = [
{fold = dfvifs},
],
mark = :line,
x = {field = :k, type = :quantitative, scale = {zero = false}, axis = { grid = false}} ,
y = {field = :value, type = :quantitative, scale = {type = :log, zero = false}, axis = { grid = false} },
color = {field = :key, type = :nominal},
)
rmse_r2_plot = select(rdf, ["k", "RMSE", "R2"]) |> @vlplot(title = "Trace plot", width = width, height = width,
transform = [
{fold = ["RMSE", "R2"]},
],
mark = :line,
x = {field = :k, type = :quantitative, scale = {zero = false}, axis = { grid = false}} ,
y = {field = :value, type = :quantitative, scale = {zero = false}, axis = { grid = false}},
color = {field = :key, type = :nominal},
)
return Dict([("coefs traceplot", traceplot), ("vifs traceplot", viftraceplot), ("vifs traceplot log", viftraceplotlog), ("rmse r2 plot", rmse_r2_plot)])
end
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 371 | using LinearRegressionKit
using Test, DataFrames, StatsModels
leaq(a,b) = (a <= b) || (a β b)
include("test_sweep_operator.jl")
include("test_utilities.jl")
include("test_LinearRegression.jl")
include("test_cooksd.jl")
include("test_lessthanfullrank.jl")
include("test_noint.jl")
include("test_heteroscedasticity.jl")
include("test_kfold.jl")
include("test_ridge.jl")
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 15594 |
@testset "from glm" begin
fdf = DataFrame(Carb=[0.1,0.3,0.5,0.6,0.7,0.9], OptDen=[0.086,0.269,0.446,0.538,0.626,0.782])
lm1 = regress(@formula(OptDen ~ 1 + Carb), fdf, req_stats="all")
target_coefs = [0.005085714285713629, 0.8762857142857156]
@test isapprox(target_coefs, lm1.coefs)
@test lm1.p == 2
@test isapprox(lm1.R2, 0.9990466748057584)
@test isapprox(lm1.ADJR2, 0.998808343507198)
@test isapprox(lm1.AIC, -55.43694668654871) # using the SAS formula rather than the Julia-Statsmodel-GLM
@test isapprox(lm1.stderrors, [0.007833679251299831, 0.013534536322659505])
@test isapprox(lm1.t_values, [0.6492114525712505, 64.74442074669666])
@test isapprox(lm1.p_values, [0.5515952883836446, 3.409192065429258e-7])
@test isapprox(lm1.p_values, [0.5515952883836446, 3.409192065429258e-7])
@test isapprox(lm1.ci_low, [-0.016664066127247305, 0.8387078171615459])
@test isapprox(lm1.ci_up, [0.02683549469867456, 0.9138636114098853])
@test isapprox(lm1.t_statistic, 2.7764451051977934)
@test isapprox(lm1.VIF, [0., 1.])
@test isapprox(lm1.PRESS, 0.0010755278041106075)
@test isapprox(lm1.Type1SS, [1.2576681666666665, 0.3135496333333334])
@test isapprox(lm1.Type2SS, [3.152636815919868e-5, 0.31354963333333363])
@test isapprox(lm1.f_value, 4191.84)
@test leaq(lm1.f_pvalue, 0.001)
end
@testset "from glm regresspredict" begin
fdf = DataFrame(Carb=[0.1,0.3,0.5,0.6,0.7,0.9], OptDen=[0.086,0.269,0.446,0.538,0.626,0.782])
lm1 = regress(@formula(OptDen ~ 1 + Carb), fdf, Ξ±=0.05, req_stats=[:default, :aic, :vif, :press, :t1ss, :t2ss])
target_coefs = [0.005085714285713629, 0.8762857142857156]
@test isapprox(target_coefs, lm1.coefs)
@test lm1.p == 2
@test isapprox(lm1.R2, 0.9990466748057584)
@test isapprox(lm1.ADJR2, 0.998808343507198)
@test isapprox(lm1.AIC, -55.43694668654871) # using the SAS formula rather than the Julia-Statsmodel-GLM
@test isapprox(lm1.stderrors, [0.007833679251299831, 0.013534536322659505])
@test isapprox(lm1.t_values, [0.6492114525712505, 64.74442074669666])
@test isapprox(lm1.p_values, [0.5515952883836446, 3.409192065429258e-7])
@test isapprox(lm1.p_values, [0.5515952883836446, 3.409192065429258e-7])
@test isapprox(lm1.ci_low, [-0.016664066127247305, 0.8387078171615459])
@test isapprox(lm1.ci_up, [0.02683549469867456, 0.9138636114098853])
@test isapprox(lm1.t_statistic, 2.7764451051977934)
@test isapprox(lm1.VIF, [0., 1.])
@test isapprox(lm1.PRESS, 0.0010755278041106075)
@test isapprox(lm1.Type1SS, [1.2576681666666665, 0.3135496333333334])
@test isapprox(lm1.Type2SS, [3.152636815919868e-5, 0.31354963333333363])
lm1 = regress(@formula(OptDen ~ 1 + Carb), fdf, Ξ±=0.05, req_stats=["none"])
target_coefs = [0.005085714285713629, 0.8762857142857156]
@test isapprox(target_coefs, lm1.coefs)
@test lm1.p == 2
lm1 = regress(@formula(OptDen ~ 1 + Carb), fdf, Ξ±=0.05, req_stats=["r2", "P_values"])
target_coefs = [0.005085714285713629, 0.8762857142857156]
@test isapprox(target_coefs, lm1.coefs)
@test lm1.p == 2
@test isapprox(lm1.R2, 0.9990466748057584)
@test isapprox(lm1.p_values, [0.5515952883836446, 3.409192065429258e-7])
df = DataFrame(y=[1., 3., 3., 2., 2., 1.], x1=[1., 2., 3., 1., 2., 3.], x2=[1., 1., 1., -1., -1., -1.])
lm2 = regress(@formula(y ~ 1 + x1 + x2), df, req_stats=["default", "r2"])
@test isapprox([1.5, 0.25, 0.3333333333333333], lm2.coefs)
@test isapprox(0.22916666666666663, lm2.R2)
@test isapprox(0.445945946, lm2.f_value)
@test 2 == lm2.dof_model
@test 3 == lm2.dof_error
@test isapprox(0.676769425, lm2.f_pvalue)
lm2 = regress(@formula(y ~ 0 + x1 + x2), df)
@test isapprox(0.8210034013605, lm2.R2)
@test isapprox(9.1733966745843, lm2.f_value)
@test 2 == lm2.dof_model
@test 4 == lm2.dof_error
@test isapprox(0.032039782324494, lm2.f_pvalue)
y = [3.547744106900422, 9.972950249405148, 16.471345464154027, 22.46768807351274, 20.369933318011807, 21.18590757820348, 29.962620198209024, 30.684400502954748, 29.28429078492597, 34.272759386588824, 33.05504692986838, 45.09273876302829, 45.28374262744938, 54.54563960566191, 46.86173948296966, 46.85926120310666, 67.54337216713414, 66.0400205145086, 64.77001443647681, 63.98759256558095, 15.016939388490687, 12.380885701920008, 10.221963402745288, 24.826987790646672, 22.231511892187548, 18.05125492502642, 21.809661866284717, 28.533306224702308, 24.70476800009685, 39.592181057942916, 37.425282624708906, 33.89372811236659, 44.07442335927311, 42.61943719318272, 52.32531447897055, 44.12096163880534, 49.19965880584543, 52.304114239221036, 59.79488104919937, 65.00419916894333, 10.978506745605673, 12.944637189081874, 16.96525561080181, 15.93489355798633, 33.559135307088305, 28.887571687420433, 28.296418324622593, 32.2405215537489, 31.466024917490223, 37.78855308849255, 48.09725215994402, 40.70190542438069, 43.2525436240948, 44.75159242105558, 52.553066965996074, 53.265851121437095, 60.092700204726015, 62.78014347092756, 69.07895282754228, 74.27890668517966, 10.57283140105129, 7.290600131361426, 16.10299402050687, 14.428954773831242, 22.418180226673464, 28.21022852582732, 29.60436622143203, 33.648588929669636, 37.451930576147994, 43.85548900583812, 41.44168340404242, 43.48815266671309, 54.72835160956764, 53.55177468229062, 50.62088969800169, 50.863408563713335, 47.40251347184729, 64.29413401802115, 64.20687412126479, 73.66653216085827, 15.820723594639162, 21.973463928234743, 23.804440715385162, 15.139822408433913, 30.015089890369985, 28.08454394421385, 33.041880065463566, 28.49429418531324, 38.33763660905526, 34.503176013521724, 48.748946870573235, 45.45351516824085, 53.522908096188765, 45.95216131836423, 63.13018379633756, 63.4236036208151, 65.28265579397677, 55.43500146374922, 76.50187470137375, 67.22421998359037, 19.289152114521762, 27.63557010588222, 17.700031078096686, 25.462368278660957, 22.94613580060685, 36.19731649621813, 35.22216995579936, 25.526434727578838, 45.882864925557726, 38.71433797181679, 50.617762276434554, 41.96951039650285, 52.265123328453214, 45.383991138243765, 62.7923270665056, 64.40670612696276, 74.89775274405821, 72.89056716347118, 66.37071343209973, 76.32913721410918, 29.923781174452596, 16.465832617450324, 30.530915733275403, 30.512411156491954, 28.04012351007911, 26.315140074869376, 37.18491928428231, 41.958551085353626, 48.370736387628895, 49.419917216561835, 48.67296268029715, 55.484477112881166, 51.120639229597025, 54.797092949987224, 61.92608065418044, 69.79495109420618, 64.43521939892794, 74.35280205157255, 77.22355341160723, 68.90654715174155, 10.87752158238755, 20.25006748725279, 31.71957876614495, 31.42898857400639, 32.505052996368256, 34.1225641368903, 35.66496173153403, 31.76371648643483, 42.97107454773545, 47.41827763710622]
x2 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8]
x3 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
df = DataFrame(y=y, x2=x2, x3=x3)
lrint = regress(@formula(y ~ x2 +x3), df, req_stats=["default", "pcorr1", "pcorr2", "scorr1", "scorr2", "vif"])
end
@testset "weighted regression" begin
tw = [
2.3 7.4 0.058
3.0 7.6 0.073
2.9 8.2 0.114
4.8 9.0 0.144
1.3 10.4 0.151
3.6 11.7 0.119
2.3 11.7 0.119
4.6 11.8 0.114
3.0 12.4 0.073
5.4 12.9 0.035
6.4 14.0 0
] # data from https://blogs.sas.com/content/iml/2016/10/05/weighted-regression.html
df = DataFrame(tw, [:y,:x,:w])
f = @formula(y ~ x)
lm = regress(f, df, weights="w", req_stats=["default", "t1ss", "t2ss"])
@test isapprox([2.328237176867885, 0.08535712911515277], lm.coefs)
@test isapprox(0.014954934572439349, lm.R2)
@test isapprox(-0.10817569860600562, lm.ADJR2)
@test isapprox([2.551864989438224, 0.24492357920520605], lm.stderrors)
@test isapprox([0.3882424860021164, 0.7364546437428148], lm.p_values)
@test isapprox([10.272024999999998, 0.02220919946016564], lm.Type1SS)
@test isapprox([0.15221363313265734, 0.022209199460165682], lm.Type2SS)
res = predict_in_sample(lm, df, req_stats="all")
@test isapprox([2.9598799323200153, 2.976951358143046, 3.0281656356121376, 3.09645133890426, 3.2159513196654737, 3.326915587515172, 3.326915587515172, 3.335451300426688, 3.386665577895779, 3.4293441424533553], res.predicted)
@test isapprox([0.2149107957203927, 0.24394048660618184, 0.27451117660671137, 0.22039739135761363, 0.15181541173049415, 0.19864023422787455, 0.19864023422787455, 0.20135117925989024, 0.18147639530421758, 0.1143166949587325], res.leverage)
@test isapprox([1.957110958921382, 1.765206920504513, 1.4298044633303422, 1.244877562198597, 1.1810280676815839, 1.3571510747551465, 1.3571510747551465, 1.388160919947073, 1.7203164594367346, 2.412834568973736], res.stdi)
@test isapprox([0.8231374655094712, 0.7816957716988308, 0.6635670917274183, 0.5290286945664605, 0.4287722865207785, 0.5524810400784785, 0.5524810400784785, 0.568305570562634, 0.6742266146509985, 0.7728194750528609], res.stdp)
@test isapprox([1.5732681689017483, 1.3761754659974743, 1.0787484567368795, 0.9949760929369788, 1.0134771577471209, 1.1096794313749598, 1.1096794313749598, 1.1318340411046413, 1.4318958288930972, 2.151109196482762], res.stdr)
@test isapprox([0.02407872915271305, 4.5252332002240634e-5, 0.0026705586843115184, 0.414368722166146, 0.31984274220916814, 0.0075059979227238794, 0.10614122824505004, 0.15735260690358094, 0.008083642818377243, 0.054162396113877506], res.cooksd)
@test isapprox([1.0617215330136984, 1.1743576761339307, 1.4979771781033089, 1.8765089815923404, 2.2272006538876132, 2.0528920244723023, 2.0528920244723023, 2.024936304649691, 1.8318962164458708, 1.6472192372151455], res.lclp)
@test isapprox([4.858038331626332, 4.779545040152161, 4.5583540931209665, 4.3163936962161795, 4.204701985443334, 4.600939150558042, 4.600939150558042, 4.645966296203685, 4.941434939345688, 5.2114690476915655], res.uclp)
@test isapprox([-1.5532260320060822, -1.093623100031372, -0.2689693693610047, 0.225758532651414, 0.49249571179955565, 0.19731959703302637, 0.19731959703302637, 0.13434647869991867, -0.5803912914251206, -2.134662351163641], res.lcli)
@test isapprox([7.472985896646113, 7.047525816317464, 6.325300640585279, 5.967144145157105, 5.939406927531392, 6.456511577997318, 6.456511577997318, 6.536556122153457, 7.353722447216679, 8.993350636070351], res.ucli)
@test isapprox([-0.41943258330883165, 0.01674833073720578, -0.11880956567004286, 1.7121503453084896, -1.8904731152742318, 0.24609306504533465, -0.9254164387302033, 1.117256288156119, -0.2700375055877384, 0.9161114929771242], res.student)
@test isapprox([-0.39672963851269843, 0.015666903524005342, -0.1112343500884851, 2.0120993810497283, -2.3774333761244986, 0.23107529120204373, -0.9160676175777195, 1.1376116334771753, -0.25375610179858576, 0.9057709902138592], res.rstudent)
@test isapprox([-0.8405158658696843, 0.03048522166395697, -0.1766610752356951, 2.1851500267069777, -2.2588848537963426, 0.3407762956775175, -1.2814663667643953, 1.5833601286751433, -0.47239392447269685, 2.225011859577532], res.press)
end
@testset "predictions statistics" begin
t_carb = [0.1, 0.3, 0.5, 0.6, 0.7, 0.9]
t_optden = [0.086, 0.269, 0.446, 0.538, 0.626, 0.782]
t_leverage = [0.5918367346938774, 0.2816326530612245, 0.1673469387755102, 0.1836734693877552, 0.24897959183673485, 0.5265306122448984]
t_predicted = [0.09271428571428536, 0.26797142857142836, 0.44322857142857136, 0.5308571428571429, 0.6184857142857143, 0.7937428571428573]
t_residuals = [-0.006714285714285367, 0.0010285714285716563, 0.002771428571428647, 0.0071428571428571175, 0.007514285714285696, -0.011742857142857277]
t_stdp = [0.0066535244611503905, 0.00458978457544483, 0.0035380151243903845, 0.003706585424647827, 0.004315515434962329, 0.006275706318490394]
t_stdi = [0.01091189203370195, 0.009791124677432761, 0.009344386069745653, 0.009409504530540022, 0.009665592246181276, 0.010685714285717253]
t_stdr = [0.00552545131594831, 0.00733033952495041, 0.007891923021648551, 0.007814168189246363, 0.0074950868260910365, 0.005951093194035971]
t_student = [-1.2151560714878948, 0.1403170242074994, 0.3511727830880084, 0.9140905301586566, 1.0025615297914663, -1.9732268946192373]
t_rstudent = [-1.3249515797718379, 0.12181828539462737, 0.3089239861926457, 0.8900235681358146, 1.003419757326401, -10.478921731163984]
t_lcli = [0.06241801648886679, 0.24078690838638886, 0.4172843964641476, 0.5047321700609886, 0.5916497280049665, 0.7640745580187355]
t_ucli = [0.12301055493970393, 0.2951559487564679, 0.4691727463929951, 0.5569821156532972, 0.6453217005664621, 0.8234111562669791]
t_lclp = [0.07424114029181057, 0.2552281436530222, 0.43340546665434193, 0.520566011897882, 0.6065039225799076, 0.7763187030532258]
t_uclp = [0.11118743113676015, 0.2807147134898345, 0.4530516762028008, 0.5411482738164038, 0.630467505991521, 0.8111670112324888]
t_press = [-0.016449999999999146, 0.001431818181818499, 0.0033284313725491102, 0.00874999999999997, 0.010005434782608673, -0.02480172413793134]
t_cooksd = [1.0705381016035724, 0.0038594654616162225, 0.012392684477580572, 0.09400066844914513, 0.16661116000566892, 2.1649894168822432]
fdf = DataFrame([[0.1,0.3,0.5,0.6,0.7,0.9],[0.086,0.269,0.446,0.538,0.626,0.782]], [:Carb, :OptDen])
lm1 = regress(@formula(OptDen ~ 1 + Carb), fdf)
results = predict_in_sample(lm1, fdf, Ξ±=0.05, req_stats=["all"])
@test isapprox(t_leverage, results.leverage)
@test isapprox(t_predicted, results.predicted)
@test isapprox(t_residuals, results.residuals)
@test isapprox(t_stdp, results.stdp)
@test isapprox(t_stdi, results.stdi)
@test isapprox(t_stdr, results.stdr)
@test isapprox(t_student, results.student)
@test isapprox(t_rstudent, results.rstudent)
@test isapprox(t_lcli, results.lcli)
@test isapprox(t_ucli, results.ucli)
@test isapprox(t_lclp, results.lclp)
@test isapprox(t_uclp, results.uclp)
@test isapprox(t_press, results.press)
@test isapprox(t_cooksd, results.cooksd)
results = predict_in_sample(lm1, fdf, Ξ±=0.05, req_stats=["none"])
@test isapprox(t_predicted, results.predicted)
@test_throws ArgumentError("column name :leverage not found in the data frame") t_leverage == results.leverage
results = predict_out_of_sample(lm1, fdf)
@test isapprox(t_predicted, results.predicted)
@test_throws ArgumentError("column name :leverage not found in the data frame") t_leverage == results.leverage
end
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 1031 | # using DataFrames, CSV
# include("../src/LinearRegressionKit.jl")
@testset "Cook's Distance" begin
st_df = DataFrame(
Y=[6.4, 7.4, 10.4, 15.1, 12.3 , 11.4],
XA=[1.5, 6.5, 11.5, 19.9, 17.0, 15.5],
XB=[1.8, 7.8, 11.8, 20.5, 17.3, 15.8],
XC=[3., 13., 23., 39.8, 34., 31.],
# values from SAS proc reg
CooksD_base=[1.4068501943, 0.176809102, 0.0026655177, 1.0704009915, 0.0875726457, 0.1331183932],
CooksD_multi=[1.7122291956, 18.983407026, 0.000118078, 0.8470797843, 0.0715921999, 0.1105843157],
)
t_lm_base = regress(@formula(Y ~ 1+ XA), st_df)
results = predict_in_sample(t_lm_base, st_df, req_stats=["all"])
@test isapprox(st_df.CooksD_base, results.cooksd)
t_lm_multi = regress(@formula(Y ~ 1+ XA + XB), st_df)
results = predict_in_sample(t_lm_multi, st_df, req_stats=["all"])
@test isapprox(st_df.CooksD_multi, results.cooksd)
end | LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 70734 |
@testset "White's covariance estimators" begin
# using data from Nerlove Cobb-Douglas model from https://github.com/mcreel/Econometrics/blob/master/Examples/Data/nerlove.csv
cost = [0.082, 0.661, 0.99, 0.315, 0.197, 0.098, 0.949, 0.675, 0.525, 0.501, 1.194, 0.67, 0.349, 0.423, 0.501, 0.55, 0.795, 0.664, 0.705, 0.903, 1.504, 1.615, 1.127, 0.718, 2.414, 1.13, 0.992, 1.554, 1.225, 1.565, 1.936, 3.154, 2.599, 3.298, 2.441, 2.031, 4.666, 1.834, 2.072, 2.039, 3.398, 3.083, 2.344, 2.382, 2.657, 1.705, 3.23, 5.049, 3.814, 4.58, 4.358, 4.714, 4.357, 3.919, 3.442, 4.898, 3.584, 5.535, 4.406, 4.289, 6.731, 6.895, 5.112, 5.141, 5.72, 4.691, 6.832, 4.813, 6.754, 5.127, 6.388, 4.509, 7.185, 6.8, 7.743, 7.968, 8.858, 8.588, 6.449, 8.488, 8.877, 10.274, 6.024, 8.258, 13.376, 10.69, 8.308, 6.082, 9.284, 10.879, 8.477, 6.877, 15.106, 8.031, 8.082, 10.866, 8.596, 8.673, 15.437, 8.211, 11.982, 16.674, 12.62, 12.905, 11.615, 9.321, 12.962, 16.932, 9.648, 18.35, 17.333, 12.015, 11.32, 22.337, 19.035, 12.205, 17.078, 25.528, 24.021, 32.197, 26.652, 20.164, 14.132, 21.41, 23.244, 29.845, 32.318, 21.988, 35.229, 17.467, 22.828, 33.154, 32.228, 34.168, 40.594, 33.354, 64.542, 41.238, 47.993, 69.878, 44.894, 67.12, 73.05, 139.422, 119.939]
output = [2, 3, 4, 4, 5, 9, 11, 13, 13, 22, 25, 25, 35, 39, 43, 63, 68, 81, 84, 73, 99, 101, 119, 120, 122, 130, 138, 149, 196, 197, 209, 214, 220, 234, 235, 253, 279, 290, 290, 295, 299, 324, 333, 338, 353, 353, 416, 420, 456, 484, 516, 550, 563, 566, 592, 671, 696, 719, 742, 795, 800, 808, 811, 855, 860, 909, 913, 924, 984, 991, 1000, 1098, 1109, 1118, 1122, 1137, 1156, 1166, 1170, 1215, 1279, 1291, 1290, 1331, 1373, 1420, 1474, 1497, 1545, 1649, 1668, 1782, 1831, 1833, 1838, 1787, 1918, 1930, 2028, 2057, 2084, 2226, 2304, 2341, 2353, 2367, 2451, 2457, 2507, 2530, 2576, 2607, 2870, 2993, 3202, 3286, 3312, 3498, 3538, 3794, 3841, 4014, 4217, 4305, 4494, 4764, 5277, 5283, 5668, 5681, 5819, 6000, 6119, 6136, 7193, 7886, 8419, 8642, 8787, 9484, 9956, 11477, 11796, 14359, 16719]
labor = [2.09, 2.05, 2.05, 1.83, 2.12, 2.12, 1.98, 2.05, 2.19, 1.72, 2.09, 1.68, 1.81, 2.3, 1.75, 1.76, 1.98, 2.29, 2.19, 1.75, 2.2, 1.66, 1.92, 1.77, 2.09, 1.82, 1.8, 1.92, 1.92, 2.19, 1.92, 1.52, 1.92, 2.2, 2.11, 1.92, 2.05, 1.66, 1.8, 1.77, 1.7, 2.05, 2.19, 1.85, 2.19, 2.13, 1.54, 1.52, 2.09, 1.75, 2.3, 2.05, 2.32, 2.31, 1.92, 2.05, 1.76, 1.7, 2.04, 2.24, 1.7, 1.68, 2.29, 2.0, 2.31, 1.45, 1.7, 1.76, 1.7, 2.09, 1.55, 2.11, 2.05, 2.3, 2.19, 2.04, 2.31, 1.7, 2.05, 2.19, 2.0, 2.32, 1.55, 2.13, 2.2, 2.2, 1.85, 1.76, 1.8, 2.32, 1.8, 2.13, 1.98, 1.76, 1.45, 2.24, 1.69, 1.81, 2.11, 1.76, 1.77, 2.0, 2.3, 2.04, 1.69, 1.76, 2.04, 2.2, 1.76, 2.31, 1.92, 1.76, 1.76, 2.31, 2.3, 1.61, 1.68, 2.09, 2.09, 2.05, 2.29, 2.11, 1.53, 2.11, 2.04, 2.19, 1.92, 2.04, 2.11, 1.76, 1.79, 2.11, 1.54, 1.92, 2.12, 1.61, 2.32, 2.24, 2.31, 2.11, 1.68, 2.24, 2.12, 2.31, 2.3]
fuel = [17.9, 35.1, 35.1, 32.2, 28.6, 28.6, 35.5, 35.1, 29.1, 15.0, 17.9, 39.7, 22.6, 23.6, 42.8, 10.3, 35.5, 28.5, 29.1, 42.8, 36.2, 33.4, 22.5, 21.3, 17.9, 38.9, 20.2, 22.5, 29.1, 29.1, 22.5, 27.5, 22.5, 36.2, 24.4, 22.5, 35.1, 33.4, 20.2, 21.3, 26.9, 35.1, 29.1, 24.6, 29.1, 10.7, 26.2, 27.5, 30.0, 42.8, 23.6, 35.1, 31.9, 33.5, 22.5, 35.1, 10.3, 26.9, 20.7, 26.5, 26.9, 39.7, 28.5, 34.3, 33.5, 17.6, 26.9, 10.3, 26.9, 30.0, 28.2, 24.4, 35.1, 23.6, 29.1, 20.7, 33.5, 26.9, 35.1, 29.1, 34.3, 31.9, 28.2, 30.0, 36.2, 36.2, 24.6, 10.3, 20.2, 31.9, 20.2, 10.7, 35.5, 10.3, 17.6, 26.5, 12.9, 22.6, 24.4, 10.3, 21.3, 34.3, 23.6, 20.7, 12.9, 10.3, 20.7, 36.2, 10.3, 33.5, 22.5, 10.3, 10.3, 33.5, 23.6, 17.8, 28.8, 30.0, 30.0, 35.1, 28.5, 24.4, 18.1, 24.4, 20.7, 29.1, 29.1, 20.7, 24.4, 10.3, 18.5, 24.4, 26.2, 22.5, 28.6, 17.8, 31.9, 26.5, 33.5, 24.4, 28.8, 26.5, 28.6, 33.5, 23.6]
capital = [183, 174, 171, 166, 233, 195, 206, 150, 155, 188, 170, 167, 213, 164, 170, 161, 210, 158, 156, 176, 170, 192, 164, 175, 180, 176, 202, 227, 186, 183, 169, 168, 164, 164, 170, 158, 177, 195, 176, 188, 187, 152, 157, 163, 143, 167, 217, 144, 178, 176, 167, 158, 162, 198, 164, 164, 161, 174, 157, 185, 157, 203, 178, 183, 168, 196, 166, 172, 158, 174, 225, 168, 177, 161, 162, 158, 176, 183, 166, 164, 207, 175, 225, 178, 157, 138, 163, 168, 158, 177, 170, 183, 162, 177, 196, 164, 158, 157, 163, 161, 156, 217, 161, 183, 167, 161, 163, 170, 174, 197, 162, 155, 167, 176, 170, 183, 190, 170, 176, 159, 157, 161, 172, 203, 167, 195, 161, 159, 177, 157, 196, 183, 189, 160, 162, 178, 199, 182, 190, 165, 203, 151, 148, 212, 162]
ndf = DataFrame(cost=cost, output=output, labor=labor, fuel=fuel, capital=capital)
f = @formula(log(cost) ~ 1 + log(output) + log(labor) + log(fuel) + log(capital))
lr2 = regress(f, ndf, cov=[:hc0, :hc1, :hc2, :hc3, :nw])
@test [:hc0, :hc1, :hc2, :hc3] == lr2.white_types
@test [:nw] == lr2.hac_types
@test isapprox([1.6887096637865764, 0.03203057017940859, 0.2413635423569017, 0.07416986820706634, 0.3181801843194431], lr2.white_stderrors[1])
@test isapprox([-2.088282503859893, 22.490828975090466, 1.807817355217008, 5.750542145660426, -0.6910812225036552], first(lr2.white_t_values))
@test isapprox([0.03858351163194079, 2.515671235586196e-48, 0.07278163877804528, 5.358556751845478e-8, 0.4906588080040115], lr2.white_p_values[1])
@test isapprox([1.718600650948449, 0.03259752694088162, 0.24563579513121173, 0.07548271115809448, 0.3238121292347976], lr2.white_stderrors[2])
@test isapprox([-2.0519617765991467, 22.099654283167162, 1.7763746548274086, 5.650525087387196, -0.6790615017279884], lr2.white_t_values[2])
@test isapprox([0.04203687115328462, 1.7165028470144865e-47, 0.07784320772633291, 8.6256319578862e-8, 0.49821998002922296], lr2.white_p_values[2])
@test isapprox([1.7404789466388655, 0.033024377914589426, 0.24759483158894688, 0.07606559302904324, 0.327665011037017], lr2.white_stderrors[3])
@test isapprox([-2.0261680566690297, 21.814008964615727, 1.762319503962301, 5.607225765004714, -0.6710766891466415], lr2.white_t_values[3])
@test isapprox([0.04464696168598006, 7.061145309349196e-47, 0.0801976916093005, 1.0583954671852304e-7, 0.5032773613193346], lr2.white_p_values[3])
@test isapprox([1.794226207499838, 0.03405163218075088, 0.25404375612619956, 0.07802173599290865, 0.3375082813724026], lr2.white_stderrors[4])
@test isapprox([-1.96547282067551, 21.155933790655254, 1.717582858335255, 5.466642694307393, -0.6515050530368971], lr2.white_t_values[4])
@test isapprox([0.05133862483580867, 1.909213828296015e-45, 0.08808381706940926, 2.0436000920232382e-7, 0.5157885588309656], lr2.white_p_values[4])
@test isapprox([1.5413337169817793, 0.04558343812215214, 0.250278670841928, 0.07289639728036612, 0.2651138982712092], lr2.hac_stderrors[1])
@test isapprox([-2.287955428555851, 15.803855644877787, 1.7434214402754615, 5.851001818682364, -0.8294108765696443], lr2.hac_t_values[1])
@test isapprox([0.02363824106367065, 6.252423796307023e-33, 0.08345530926614206, 3.306228013288088e-8, 0.4082838999795607], first(lr2.hac_p_values))
end
@testset "Newey-West covariance estimators" begin
# using Data from https://raw.githubusercontent.com/mcreel/Econometrics/master/Examples/Data/sp500.csv
# based on the Econometrics code from https://github.com/mcreel/Econometrics/blob/master/Examples/TimeSeries/SP500.jl
y = [0.002583702575676502, 0.3079890371414663, 0.2218233792549798, 0.07975000051131587, 0.22310573843696402, 0.0016145767310154937, 0.00021506066259480462, 0.1347348238545566, 0.7244128703617047, 0.0011828566361204594, 0.07002289701258912, 0.37366185926285417, 0.0007702828176743898, 0.002139278700879984, 0.053836464007526154, 1.650675513088037, 1.16748122425437, 0.27856093459188397, 0.019058258961000864, 0.16136521150410382, 0.08224579148911378, 0.004024360208805437, 0.7653817220257503, 4.4510953200179815, 0.25957822569124633, 0.37137008090011087, 1.0357988095863304, 1.2389842387287384, 0.4324607874947091, 5.269593589329902, 0.5621803938941623, 0.03477934257291081, 1.4619965090681062, 1.7365164047084427, 0.028582510379680535, 1.1864719119647948, 0.00017403544878260159, 0.32998873142007473, 0.23548033193902543, 0.019210839894924763, 0.4530222091064397, 0.36620714829237433, 0.033215060313819496, 0.36313208130177793, 0.01484899619985411, 4.698840747702183e-6, 0.22943861446916383, 0.0693734390306599, 0.5194246266697191, 2.254208363566848, 0.000711991818247004, 0.03227740032396568, 0.0002067872176035323, 0.0008581956571378268, 0.25524877190609313, 0.0006603091802297528, 1.362499153127377, 0.08577309821496772, 0.8919014915087333, 0.5230148340571447, 0.3562150939685192, 0.3562150939685192, 0.1096908647583281, 0.20407089519132424, 0.1968720725392859, 0.48739080592383005, 0.04061200146027687, 0.20350016132170584, 0.6452477732061614, 0.4935279378347359, 0.08269231898382176, 0.010974942559581268, 1.6116941048352218, 1.1928108856953, 0.15213049376092777, 1.1501296327247874, 4.425046102227064, 0.8991160278122725, 0.6456958173927121, 0.4580013476284833, 1.0730853116158547, 0.02257630017653147, 0.12632124277322926, 0.19019934287306395, 0.05196206322855161, 0.02692636386162153, 0.635005506129145, 0.0972247251099256, 0.22760365264589139, 0.07938909838989823, 0.00028864197438840976, 0.014932056591780294, 0.03840574490115696, 0.8190214201900288, 0.30950765751926473, 0.01831369772284331, 0.021621005780058804, 0.9430252521456538, 0.0012103784072304167, 0.22155472706333956, 0.8627598292464865, 0.1347165223919831, 0.13961685134675358, 0.396375900478387, 0.6152459263002183, 0.061040907672904716, 0.17837935923383433, 0.3582749524196826, 0.011621160871505566, 0.2727183192294628, 0.03317357438251278, 0.005996550580435087, 0.0012854244709616827, 0.03435859450068371, 0.4084641127852834, 0.2156258703166822, 0.009693582456685656, 0.00017760650614569273, 0.11942646862758109, 0.5276094841987697, 0.09066065784318494, 0.009929957262853248, 0.052145347173973046, 0.5724172922392592, 0.014649269664895301, 0.03913792721866548, 0.0013455493423829188, 0.3953058983299156, 0.22439797677280424, 0.01227838224650002, 0.03605488081864467, 0.001879662186147144, 0.4409922694618947, 0.003946687216989506, 0.3092252935097444, 0.15935751264549847, 0.47063574021947513, 0.1922336294839871, 0.16143647680511083, 0.017894712219287973, 0.24489610764512534, 0.03229765751935813, 0.15918992781905314, 1.436885125027776, 1.0319324805303918, 0.04264171592814084, 0.2528556838239409, 0.033424334783545964, 0.0004900776090921828, 0.23388375032149153, 0.0009505823861797834, 0.1835680041079445, 5.796064815787655e-5, 4.0806986460067876, 0.08346313295260399, 0.5272289742049457, 0.9931513138005919, 1.0850581417353803e-6, 0.30014196696728745, 1.2968901911493473, 0.078242727820577, 0.02386976812780534, 0.4251328893584252, 0.1979793262794858, 2.6160527648452464e-7, 0.6926779466296006, 0.25803324018763996, 0.062248616253323265, 0.08765575761459778, 0.04120106917205882, 0.25067625966553186, 0.008562886172120192, 0.00022500000084360892, 0.020053628341366012, 0.1011039485999002, 0.004543950646515702, 0.004090312768752987, 0.024199736620066938, 0.23609140197946205, 0.08782228599374417, 0.4200776046483452, 0.1309926797100626, 0.006665392855877088, 0.34397553423331073, 0.006744069891373599, 0.5680539834354023, 0.015620473802026125, 0.24491747714212983, 0.0021386494864975964, 0.6407900944133236, 0.3431802914429819, 0.6204188359136701, 2.63173564256491, 0.6698705449027024, 0.054904693752213116, 0.06797321779763552, 1.767005817528848, 0.0011152537338764798, 1.2128283813183613, 0.0184390507113931, 2.3766787146086807, 3.0079507122335087, 4.258404245487947, 1.3115650352289032, 2.8165805348731867, 0.025905717699369488, 0.664907800923179, 0.0011444411916685549, 1.7241447201716487, 0.8370970621184224, 3.7284109849477085, 0.5113090444749049, 1.4257963182829891, 0.5167011440300261, 0.02526566428379101, 1.3757506587868202, 0.012755883561513673, 0.37649443520629744, 1.3193112989834972, 4.815314748655017e-5, 0.06997476757293467, 0.2934423572024434, 0.14542305380085527, 0.0015900672702848092, 0.10233979942471935, 0.005630546906068476, 0.006469621166966418, 0.002964489964671273, 0.0006010368661893317, 0.005049890932956196, 0.2841651168698957, 0.03239227448552545, 0.0437699743603815, 0.25004113429080693, 0.09896237374382906, 0.008348297582788964, 0.06578790089468022, 0.06653433188654503, 0.4642751904757431, 0.39519374748374775, 0.13476408975837945, 0.014546447433743066, 0.03048312954732133, 0.4971181043026032, 0.0018662766285030344, 2.6278158118829213, 0.1835022400764073, 2.47514809706536, 0.4902999631428375, 0.6470801794417882, 3.786416800758203, 5.710830088843509, 0.20926417831010427, 0.1581341399810609, 0.027658022936031913, 0.00026667083846736194, 0.1238659117684014, 0.0063843628083706545, 0.2568548171899579, 1.0711977658880973, 0.0013630664009920536, 3.30352005277502, 0.879937452995484, 1.3685169188596928, 3.1345899818323706, 0.6720940644275492, 0.6578195141762373, 0.07532735309925823, 0.34805685820061716, 0.842015572726283, 1.7087250120723894, 0.029488252214740676, 0.24182332479734264, 2.284138680208943, 0.31894040973928284, 0.061151112591218436, 1.7538966941844074, 1.8268492450043812, 0.8870968872656356, 1.6888231118637285, 1.6087801518278428, 2.019083717365777, 0.1684290942528512, 1.0410453692042765, 0.10908074809402096, 0.1908436042909946, 1.1155471867184612, 5.844220501773345e-8, 0.9173794532331626, 0.17207017942125807, 0.019631404095377558, 6.555129538971182e-5, 0.00982371335634011, 0.35698824532156515, 0.0007820153496087637, 0.08405923604797942, 0.008030119032879519, 0.019637432372691947, 0.08906498732956161, 0.34791881304822925, 0.1888647288187308, 0.18720383176890742, 0.013171865708443678, 2.044673633104564, 0.15459915234264454, 2.7340068188035773, 0.05613079127103884, 1.524501016813576, 0.3659967127896868, 1.767352893320553, 0.10153648063314294, 1.435364000364032, 0.2167276648091159, 0.8031751357843536, 0.032374505789632385, 0.3751207540409881, 2.149188191296807, 0.0569958712551922, 0.04589654437267971, 1.5084140556081869, 0.7492740902741857, 0.1656337374812022, 0.11493729848117477, 0.4580338241879098, 0.051352422239383536, 0.07530480863923807, 0.19846467652836752, 0.27377279396793475, 0.1987303527859933, 0.026353024881242783, 0.23472445370635528, 0.003034692953906944, 1.316931342061209, 0.8390005939171763, 0.01754342967885931, 0.2469866860516095, 0.057710642895835086, 0.051701050550059705, 0.17219583486604545, 0.07883443523184595, 0.15164278691054486, 1.021923484986976, 1.1332130435385828, 0.0956724321770484, 1.4033171247754346, 0.18694570389475104, 0.1313883092766454, 1.7455074139725115, 0.24915777761962332, 0.08740574923380957, 0.0006376809234389431, 1.0984572099386032, 0.00929021796449848, 0.08574866678963247, 0.0023646622324457658, 0.00929021785769423, 0.06095506448679574, 0.05073967134751741, 1.0752853369569975, 0.813834965372417, 0.013766648374547249, 0.4010969177159289, 0.03954033174282336, 0.0107168060720065, 0.051118343620808644, 0.7264977807132222, 0.0295394916335375, 0.40768478136598163, 0.0005107261219950305, 1.3835791642534696, 0.05082592741617635, 0.5182288196996737, 0.20847353016272271, 0.31457673259478647, 0.03854852878681378, 0.9355254491638486, 0.2808480238401504, 0.3979094654666121, 0.003576278721621333, 0.5286854122427138, 0.09093614772575773, 0.0008980702651302059, 4.506337756442705, 0.0628419448477344, 0.5120521952985422, 0.005285361135176275, 0.1292140484977512, 0.3828250468549696, 2.8142650757929815, 0.04403330176973427, 1.4896474672190718, 1.206052602820401, 0.19957580371050726, 0.0024332886984693245, 0.6014342222138763, 0.01140756420168566, 0.005377842609848153, 0.17604244197404836, 0.05760422812917011, 0.31250381829377694, 1.1490894921751678, 0.32317018839483974, 1.477363382486283, 0.5145477322334638, 0.00044536297985431025, 0.03722728387531214, 0.10378974208240024, 0.04986984116156657, 0.10029440871562356, 0.5982456162589183, 0.07998428351762868, 1.5670089066470347, 0.8722840563055405, 0.0073673753922654595, 0.017523748116534052, 0.15166069792511527, 0.2647333138240556, 0.07087804020640305, 0.6665559662655085, 4.465561571362131, 10.40555472376894, 16.357949944570567, 1.8201377363046138, 14.44889082735589, 5.777626475633906, 0.0012396712566665454, 0.6883883094000172, 8.861699000902632, 3.1943960598716226, 0.011490096079574411, 2.3567955489581562, 6.115447204722121, 1.8989595298996975, 0.26058211927209585, 0.18129924094686936, 0.14887662516273448, 1.6450471835892526, 0.7236768307417346, 0.05850914274620028, 2.662184941920464, 0.21830373822980817, 1.5939020924298253, 0.03694131138650948, 0.10559458810404099, 0.0024689384133387868, 6.783973349246389, 0.010398398911134211, 3.3524729267239373, 0.068279431966427, 2.0162990366951843, 3.28019301227849, 0.11927888704886429, 0.6265796292912164, 0.7748629517746713, 0.006076061991189065, 0.013128366319410133, 0.4488822636762288, 0.2354825292028072, 2.209225500280821, 0.19817768183084417, 0.002926153051489738, 0.019247637042804473, 0.36352165683014365, 2.7112274772259863, 1.2192855982919537, 0.044840887285913816, 0.056805932987708904, 1.3494592480103418, 0.0013576460417352443, 0.2242100355383555, 1.3618848691363492, 0.07975331131097356, 0.11782991085393398, 0.014381096371652689, 0.0017168415935754928, 0.9583105878100994, 0.024554390730871872, 0.11866843195091219, 1.9632576325544924, 1.2646413642305825, 2.155179950498865, 0.01581167729278044, 2.544402790787023, 0.011883784671174378, 0.14682683497024251, 0.014799396019231316, 0.016969808531168, 0.0002806611450338811, 0.0027712499496886185, 0.1768016741246593, 1.0107010248365031, 1.1670828567876885, 2.0758158009107004, 3.9421882611486314, 0.44865444108316177, 0.41854806373328796, 0.6038449480901684, 0.048182822003452724, 3.872119837986092, 0.24572661055217634, 1.0824271647473727, 2.03779299856089, 2.24255529227242, 3.263758245708823, 0.6011732417563199, 0.7879168968089538, 1.5050527610036375, 0.025441763685205794, 0.04546771822433159, 1.0865854705464866, 0.5184737462324385, 0.910142321772268, 2.286511011661547, 0.03842941438688528, 1.7247351272464149, 5.824146531510543, 1.2275181867210987, 0.007635716199518582, 0.6376029845125641, 6.433776369887999, 2.6511260306327085, 4.7012118510676935, 0.004347054808087411, 1.3659555616858692, 0.24570121720140126, 4.073627148727174, 2.421056718500661, 1.9251272852839614, 1.1711801545005558, 0.27715925852263584, 5.565322930135341, 0.0009579624668431065, 3.481273317628789, 0.2516299228341998, 0.021401362682114608, 3.4939583654402, 1.983427037787052, 0.0035237228726767754, 0.0020569783634058533, 1.5677078198252883, 3.7527934289121045, 2.662250670402008, 2.710828525609125, 0.21585584855319767, 4.351194233101605e-6, 2.0510428949246498, 1.5360618675912747, 0.18582604017048446, 1.306272035777689, 0.03919095545102478, 0.6862750084437098, 5.573613903979422, 0.1703233717825925, 0.13130228089646268, 0.09800057883604318, 0.009598337681838763, 1.2360761565697687, 0.2301996571176748, 0.0012030825891027046, 2.5884984445887227, 0.014820775781172928, 0.03219035087883163, 0.3389309399247959, 0.40782563342972156, 0.174415708647101, 0.013136952864563189, 0.004999242998317165, 0.4249259448217729, 0.0013205614013213534, 0.0028123552076516883, 0.7868471672769553, 0.1833852057359362, 0.04349210760699473, 0.4056241084484814, 0.11084892073550408, 1.0130429443072781, 1.0914632328925065, 1.4490325408381783, 0.07340161990758948, 0.07208089577358524, 0.9189331941156488, 0.9950856118029908, 1.4757545294127993e-5, 0.009985690118679568, 0.4440624343427734, 0.09396965438944942, 0.005941963128194226, 0.26524574628999026, 8.22865063234208e-6, 0.03219996480514713, 0.029022332981481352, 0.03705667977313132, 0.8565969851329813, 0.2667246033735577, 0.5873038079830065, 0.7544028206125967, 0.3534000062803314, 0.0010673040857031, 0.10641747032439862, 0.006743164041479742, 1.5151748468672672, 0.9082197477491177, 6.00611111378366e-5, 0.7592059305274265, 0.9514315044323035, 0.9155569359195882, 0.0005729044894809509, 0.13396099842071768, 0.35612588639461673, 0.03291967439965614, 1.7835250613851086, 0.46535213486548405, 2.2890401090842216e-5, 0.1697874694586328, 0.009181282586632885, 0.014428021431883162, 0.07327111169524764, 0.07719069688098099, 0.2386765746730553, 0.021151820703514634, 0.10117839668330411, 0.028587544356908454, 0.8442957886076867, 0.6669902604644344, 0.03311263636671677, 0.0260970648495329, 0.08500568162075645, 0.10965349882925712, 0.3466110038677883, 0.0662619529917042, 0.025608597520586286, 1.712442003477771, 13.300237353327269, 3.3009254902353966, 3.0610154169935098, 2.838291793764177, 1.8598323545243238, 0.030852749611046233, 0.4716075610867205, 0.29537007408977667, 0.008282943401236243, 2.2468064683584106, 0.1256023130608896, 0.49645792315995513, 5.527171786059049e-5, 0.27374446623212906, 0.00952083061718175, 0.06180425502658006, 0.01894107118695491, 0.1847286840616142, 0.1401438184149155, 0.20231760203480303, 0.08177228385295082, 0.00079104764983973, 0.010017859656537101, 0.01863291170930948, 0.02365535332003779, 0.014329635910613195, 0.39443315701474363, 0.08586140751153488, 0.0019274133276433627, 0.6851756940473963, 0.005311998850043209, 0.0017024959555922987, 0.079688894718958, 0.22008320587901115, 0.008130608536134967, 0.08510524420987187, 0.29682965621648394, 0.028338802589761864, 0.05011453282274803, 0.02104371353267891, 0.0018935209969263623, 0.03713120970097753, 0.26128043499760933, 0.01942085910485509, 0.02409474450511849, 0.2586053664787466, 0.038606498086274865, 0.06127251225564271, 4.7751707769901346e-5, 0.1835721577393485, 0.09056041030997672, 0.00027112998387032923, 0.052429812659836525, 6.03971060870506, 2.073913523576319, 2.2353248178138454, 0.003027384071535517, 1.0051156181575258, 0.15144269337829147, 0.00010578097131303075, 0.00040398676844760964, 1.189210215892407, 0.4085511322933493, 0.3050386957011653, 0.729340228548671, 0.3861413223970852, 0.2689323689592784, 0.856462644552892, 0.6109684123883636, 0.10548780428401248, 0.2340838798352385, 0.1777146984711078, 0.002958576511911419, 0.10710288496433198, 0.2103252201669941, 1.539517691333209, 0.013022405694318602, 0.09721162451566145, 0.0004450999776792177, 0.10044411808050144, 0.37033849738013236, 0.053624759748461814, 0.02230205845284987, 7.067652115930854e-5, 0.21197286681136482, 0.1226659239582447, 0.0319877674614164, 0.08340411873281124, 0.10434005192264394, 0.00010704532120576496, 0.469542294582416, 0.42238872403469113, 0.19906499754072418, 0.025611051627757477, 4.7584503819435815, 0.15288208712720827, 1.1719774681176782, 0.046314819022991664, 0.02596355838457836, 0.00011293349290798197, 0.5438027191907789, 0.025513367954121367, 0.21552863937083835, 0.053438858962943384, 0.5447817034365408, 0.04341513767062041, 0.006306507791694165, 0.15943016116215442, 0.2675947791253387, 0.016672345318286222, 0.06227164917694632, 0.1402194735857296, 0.002166155678603786, 0.3300636020729472, 0.11718001922823885, 1.6385072924932298, 0.05414217650787199, 0.3617840605745264, 0.013052553097741575, 0.41975632687895537, 0.6506558695291546, 0.1451019274177716, 0.031169863735291094, 0.04177842691590757, 0.12613322282896455, 0.05627174041462594, 0.03593303679676961, 0.012905521172021369, 0.05407880270522265, 0.6953072159182692, 0.000619616509593883, 0.18444785797066962, 0.6182056858262527, 0.33837639055534086, 0.005093817751051703, 0.12886772639154007, 0.122308865377569, 0.00013129235104194828, 0.0732684331815726, 0.04497864379020792, 0.03514296697730362, 0.08832896925862935, 0.031055691034788224, 0.13043429709465412, 0.10941541868394032, 0.06314815563956179, 0.41099443705370803, 0.649567483868346, 0.005868020937729122, 0.009268333268147229, 0.3735101732583879, 0.004563294063522422, 0.0002635501993347267, 0.004327989402044048, 0.5052475934321649, 0.039312233429900666, 0.00014915203351702864, 0.005954858596661586, 0.3230989285791853, 0.13772568488281842, 0.26392342935831414, 0.1558730238088185, 0.24761261238104038, 0.006686102464138949, 0.025214691642705556, 0.3555708368894157, 0.010309331346734675, 0.0025358538925498776, 0.018647625366932926, 0.013679714648336727, 0.060052297952910345, 1.7956473189152413, 0.35439058459450185, 0.0019429284643071047, 0.09083090787269694, 0.09447004403016364, 0.04575136490335676, 0.006459371442439489, 0.11261868468888157, 0.0008209421471136433, 0.12181384224474495, 0.6576428425169688, 0.020840193947308763, 0.018653988817572152, 0.03556092834428674, 1.5674535023655984, 0.03843923840553641, 0.011252969161252612, 0.005254626037166597, 0.013576570624271495, 0.5195953788449381, 0.012800912700290658, 0.08391698079169817, 0.05250545865232042, 0.024563216320698793, 0.001980376901751918, 0.0933614612590033, 0.041709311922108486, 0.00670696954123109, 0.003734452871888666, 0.017996718703772972, 0.1540165935245423, 0.4461858257869078, 0.7288837326380754, 0.0875387940731562, 0.03252182159753454, 0.594101714982668, 0.09108890408323, 1.1342438934197567, 0.3712756969567675, 0.0018965862018593643, 0.0031481901627399447, 0.04113192757760038, 0.043020824844486105, 0.005927464499773701, 0.01576473650159528, 0.004771527495940785, 0.16513905402476844, 0.000166970350633535, 0.007524462046500157, 0.011935856891056891, 0.04110830032928077, 0.027671082401395663, 0.23876646919202865, 0.005310444163454746, 3.2854037560380474, 0.12292849398403076, 0.44532821773541253, 0.2662274979472321, 0.038634887727501496, 0.05594401038358628, 0.19755036943015952, 0.0004998730758698305, 0.012231449286693084, 0.0022726809126874096, 0.5652303300137818, 0.11488282090519102, 0.006013619799989586, 0.07881511743641124, 0.02266188087562861, 0.0008754202237340968, 0.011336656440702482, 0.00506698056456099, 0.19819404671256646, 0.010338183204109248, 0.047905709657831086, 0.0004568750550543027, 0.6708609989917624, 0.4217972007628798, 0.0026318892298838295, 0.002719595459366587, 0.020284471517469056, 0.0013320109303731043, 0.630492468309407, 0.7277083660370265, 0.7241872447638558, 0.023724489647558626, 0.05138674088550191, 0.021448912084270546, 0.8619674915569887, 0.38240964212404543, 0.011394573776611744, 0.006195031904820567, 0.5321505794035981, 0.03476180036672574, 0.20761893732966868, 6.614553720211809e-5, 0.0045540622128015075, 0.2700275145518658, 0.0004590234378390465, 0.004780601680297542, 0.003162747088893576, 0.06484165500543976, 0.0006062057863821437, 0.009312544262156252, 0.018338734134179264, 0.0024768190702265484, 0.04608470029150553, 0.0015657264045072893, 0.04264129020384843, 0.03034473852077423, 0.03137127332388284, 0.05535957456012246, 0.0016332519194396443, 2.104519814572286, 0.016457382385387628, 0.9458640190004752, 0.00044500592757794745, 0.01922651966030125, 2.3971676820772814, 0.038115265259816185, 0.012377547214999052, 0.9891011251988212, 0.11912349644921691, 0.043634576818667274, 0.03473849386968508, 0.0005251161795553847, 0.009394108093578026, 0.2012677700571407, 0.33105007341867876, 0.03670862278189504, 0.557441397840061, 0.10245342291065757, 0.0006738403062835575, 0.01995637730106744, 1.1293509658245315, 0.12438037953709719, 0.003009163689409014, 0.005973332697882615, 0.020769058359085234, 0.029675107098423546, 0.010852212268150191, 0.00289880220414353, 0.08264763970799936, 0.0011547941025428492, 0.04131703753618656, 0.0004851928442598332, 0.15876421970281124, 0.011499295615757537, 0.13069478215454425, 0.16362702728250397, 0.04753117394466431, 0.014462090488644521, 0.3223872165627712, 0.01623668829463393, 0.028773205740287088, 0.05903144938946727, 0.027584016345301467, 0.03135092212005554, 0.01465998756732385, 0.023170153237071733, 0.005515099777242529, 0.004997741362145888, 0.0019802775112162108, 0.2394790336151675, 0.13777829486273757, 0.02355171060769022, 0.22357412371274132, 0.015639662820261795, 0.6477503708567071, 0.10550058390024825, 0.008912867718160461, 0.02655740743491055, 0.0003319678551520233, 0.09634330959638235, 0.01673604647160506, 0.0036267047995161283, 0.03330076447941435, 0.1385829568068526, 0.00792529183858699, 0.009362805469398846, 0.055454328812437115, 0.2959020601481385, 0.6536180197581768, 0.07140230259919765, 0.018291032043038915, 0.42956359413553497, 0.006847680885935154, 0.04203295066189019, 0.0012501854265941762, 0.9716207558868816, 0.0023378035023157266, 0.6465288454659239, 0.04200635699421544, 0.010768580670454402, 0.13253355422904062]
x1 = [2.6788212845035386, 0.002583702575676502, 0.3079890371414663, 0.2218233792549798, 0.07975000051131587, 0.22310573843696402, 0.0016145767310154937, 0.00021506066259480462, 0.1347348238545566, 0.7244128703617047, 0.0011828566361204594, 0.07002289701258912, 0.37366185926285417, 0.0007702828176743898, 0.002139278700879984, 0.053836464007526154, 1.650675513088037, 1.16748122425437, 0.27856093459188397, 0.019058258961000864, 0.16136521150410382, 0.08224579148911378, 0.004024360208805437, 0.7653817220257503, 4.4510953200179815, 0.25957822569124633, 0.37137008090011087, 1.0357988095863304, 1.2389842387287384, 0.4324607874947091, 5.269593589329902, 0.5621803938941623, 0.03477934257291081, 1.4619965090681062, 1.7365164047084427, 0.028582510379680535, 1.1864719119647948, 0.00017403544878260159, 0.32998873142007473, 0.23548033193902543, 0.019210839894924763, 0.4530222091064397, 0.36620714829237433, 0.033215060313819496, 0.36313208130177793, 0.01484899619985411, 4.698840747702183e-6, 0.22943861446916383, 0.0693734390306599, 0.5194246266697191, 2.254208363566848, 0.000711991818247004, 0.03227740032396568, 0.0002067872176035323, 0.0008581956571378268, 0.25524877190609313, 0.0006603091802297528, 1.362499153127377, 0.08577309821496772, 0.8919014915087333, 0.5230148340571447, 0.3562150939685192, 0.3562150939685192, 0.1096908647583281, 0.20407089519132424, 0.1968720725392859, 0.48739080592383005, 0.04061200146027687, 0.20350016132170584, 0.6452477732061614, 0.4935279378347359, 0.08269231898382176, 0.010974942559581268, 1.6116941048352218, 1.1928108856953, 0.15213049376092777, 1.1501296327247874, 4.425046102227064, 0.8991160278122725, 0.6456958173927121, 0.4580013476284833, 1.0730853116158547, 0.02257630017653147, 0.12632124277322926, 0.19019934287306395, 0.05196206322855161, 0.02692636386162153, 0.635005506129145, 0.0972247251099256, 0.22760365264589139, 0.07938909838989823, 0.00028864197438840976, 0.014932056591780294, 0.03840574490115696, 0.8190214201900288, 0.30950765751926473, 0.01831369772284331, 0.021621005780058804, 0.9430252521456538, 0.0012103784072304167, 0.22155472706333956, 0.8627598292464865, 0.1347165223919831, 0.13961685134675358, 0.396375900478387, 0.6152459263002183, 0.061040907672904716, 0.17837935923383433, 0.3582749524196826, 0.011621160871505566, 0.2727183192294628, 0.03317357438251278, 0.005996550580435087, 0.0012854244709616827, 0.03435859450068371, 0.4084641127852834, 0.2156258703166822, 0.009693582456685656, 0.00017760650614569273, 0.11942646862758109, 0.5276094841987697, 0.09066065784318494, 0.009929957262853248, 0.052145347173973046, 0.5724172922392592, 0.014649269664895301, 0.03913792721866548, 0.0013455493423829188, 0.3953058983299156, 0.22439797677280424, 0.01227838224650002, 0.03605488081864467, 0.001879662186147144, 0.4409922694618947, 0.003946687216989506, 0.3092252935097444, 0.15935751264549847, 0.47063574021947513, 0.1922336294839871, 0.16143647680511083, 0.017894712219287973, 0.24489610764512534, 0.03229765751935813, 0.15918992781905314, 1.436885125027776, 1.0319324805303918, 0.04264171592814084, 0.2528556838239409, 0.033424334783545964, 0.0004900776090921828, 0.23388375032149153, 0.0009505823861797834, 0.1835680041079445, 5.796064815787655e-5, 4.0806986460067876, 0.08346313295260399, 0.5272289742049457, 0.9931513138005919, 1.0850581417353803e-6, 0.30014196696728745, 1.2968901911493473, 0.078242727820577, 0.02386976812780534, 0.4251328893584252, 0.1979793262794858, 2.6160527648452464e-7, 0.6926779466296006, 0.25803324018763996, 0.062248616253323265, 0.08765575761459778, 0.04120106917205882, 0.25067625966553186, 0.008562886172120192, 0.00022500000084360892, 0.020053628341366012, 0.1011039485999002, 0.004543950646515702, 0.004090312768752987, 0.024199736620066938, 0.23609140197946205, 0.08782228599374417, 0.4200776046483452, 0.1309926797100626, 0.006665392855877088, 0.34397553423331073, 0.006744069891373599, 0.5680539834354023, 0.015620473802026125, 0.24491747714212983, 0.0021386494864975964, 0.6407900944133236, 0.3431802914429819, 0.6204188359136701, 2.63173564256491, 0.6698705449027024, 0.054904693752213116, 0.06797321779763552, 1.767005817528848, 0.0011152537338764798, 1.2128283813183613, 0.0184390507113931, 2.3766787146086807, 3.0079507122335087, 4.258404245487947, 1.3115650352289032, 2.8165805348731867, 0.025905717699369488, 0.664907800923179, 0.0011444411916685549, 1.7241447201716487, 0.8370970621184224, 3.7284109849477085, 0.5113090444749049, 1.4257963182829891, 0.5167011440300261, 0.02526566428379101, 1.3757506587868202, 0.012755883561513673, 0.37649443520629744, 1.3193112989834972, 4.815314748655017e-5, 0.06997476757293467, 0.2934423572024434, 0.14542305380085527, 0.0015900672702848092, 0.10233979942471935, 0.005630546906068476, 0.006469621166966418, 0.002964489964671273, 0.0006010368661893317, 0.005049890932956196, 0.2841651168698957, 0.03239227448552545, 0.0437699743603815, 0.25004113429080693, 0.09896237374382906, 0.008348297582788964, 0.06578790089468022, 0.06653433188654503, 0.4642751904757431, 0.39519374748374775, 0.13476408975837945, 0.014546447433743066, 0.03048312954732133, 0.4971181043026032, 0.0018662766285030344, 2.6278158118829213, 0.1835022400764073, 2.47514809706536, 0.4902999631428375, 0.6470801794417882, 3.786416800758203, 5.710830088843509, 0.20926417831010427, 0.1581341399810609, 0.027658022936031913, 0.00026667083846736194, 0.1238659117684014, 0.0063843628083706545, 0.2568548171899579, 1.0711977658880973, 0.0013630664009920536, 3.30352005277502, 0.879937452995484, 1.3685169188596928, 3.1345899818323706, 0.6720940644275492, 0.6578195141762373, 0.07532735309925823, 0.34805685820061716, 0.842015572726283, 1.7087250120723894, 0.029488252214740676, 0.24182332479734264, 2.284138680208943, 0.31894040973928284, 0.061151112591218436, 1.7538966941844074, 1.8268492450043812, 0.8870968872656356, 1.6888231118637285, 1.6087801518278428, 2.019083717365777, 0.1684290942528512, 1.0410453692042765, 0.10908074809402096, 0.1908436042909946, 1.1155471867184612, 5.844220501773345e-8, 0.9173794532331626, 0.17207017942125807, 0.019631404095377558, 6.555129538971182e-5, 0.00982371335634011, 0.35698824532156515, 0.0007820153496087637, 0.08405923604797942, 0.008030119032879519, 0.019637432372691947, 0.08906498732956161, 0.34791881304822925, 0.1888647288187308, 0.18720383176890742, 0.013171865708443678, 2.044673633104564, 0.15459915234264454, 2.7340068188035773, 0.05613079127103884, 1.524501016813576, 0.3659967127896868, 1.767352893320553, 0.10153648063314294, 1.435364000364032, 0.2167276648091159, 0.8031751357843536, 0.032374505789632385, 0.3751207540409881, 2.149188191296807, 0.0569958712551922, 0.04589654437267971, 1.5084140556081869, 0.7492740902741857, 0.1656337374812022, 0.11493729848117477, 0.4580338241879098, 0.051352422239383536, 0.07530480863923807, 0.19846467652836752, 0.27377279396793475, 0.1987303527859933, 0.026353024881242783, 0.23472445370635528, 0.003034692953906944, 1.316931342061209, 0.8390005939171763, 0.01754342967885931, 0.2469866860516095, 0.057710642895835086, 0.051701050550059705, 0.17219583486604545, 0.07883443523184595, 0.15164278691054486, 1.021923484986976, 1.1332130435385828, 0.0956724321770484, 1.4033171247754346, 0.18694570389475104, 0.1313883092766454, 1.7455074139725115, 0.24915777761962332, 0.08740574923380957, 0.0006376809234389431, 1.0984572099386032, 0.00929021796449848, 0.08574866678963247, 0.0023646622324457658, 0.00929021785769423, 0.06095506448679574, 0.05073967134751741, 1.0752853369569975, 0.813834965372417, 0.013766648374547249, 0.4010969177159289, 0.03954033174282336, 0.0107168060720065, 0.051118343620808644, 0.7264977807132222, 0.0295394916335375, 0.40768478136598163, 0.0005107261219950305, 1.3835791642534696, 0.05082592741617635, 0.5182288196996737, 0.20847353016272271, 0.31457673259478647, 0.03854852878681378, 0.9355254491638486, 0.2808480238401504, 0.3979094654666121, 0.003576278721621333, 0.5286854122427138, 0.09093614772575773, 0.0008980702651302059, 4.506337756442705, 0.0628419448477344, 0.5120521952985422, 0.005285361135176275, 0.1292140484977512, 0.3828250468549696, 2.8142650757929815, 0.04403330176973427, 1.4896474672190718, 1.206052602820401, 0.19957580371050726, 0.0024332886984693245, 0.6014342222138763, 0.01140756420168566, 0.005377842609848153, 0.17604244197404836, 0.05760422812917011, 0.31250381829377694, 1.1490894921751678, 0.32317018839483974, 1.477363382486283, 0.5145477322334638, 0.00044536297985431025, 0.03722728387531214, 0.10378974208240024, 0.04986984116156657, 0.10029440871562356, 0.5982456162589183, 0.07998428351762868, 1.5670089066470347, 0.8722840563055405, 0.0073673753922654595, 0.017523748116534052, 0.15166069792511527, 0.2647333138240556, 0.07087804020640305, 0.6665559662655085, 4.465561571362131, 10.40555472376894, 16.357949944570567, 1.8201377363046138, 14.44889082735589, 5.777626475633906, 0.0012396712566665454, 0.6883883094000172, 8.861699000902632, 3.1943960598716226, 0.011490096079574411, 2.3567955489581562, 6.115447204722121, 1.8989595298996975, 0.26058211927209585, 0.18129924094686936, 0.14887662516273448, 1.6450471835892526, 0.7236768307417346, 0.05850914274620028, 2.662184941920464, 0.21830373822980817, 1.5939020924298253, 0.03694131138650948, 0.10559458810404099, 0.0024689384133387868, 6.783973349246389, 0.010398398911134211, 3.3524729267239373, 0.068279431966427, 2.0162990366951843, 3.28019301227849, 0.11927888704886429, 0.6265796292912164, 0.7748629517746713, 0.006076061991189065, 0.013128366319410133, 0.4488822636762288, 0.2354825292028072, 2.209225500280821, 0.19817768183084417, 0.002926153051489738, 0.019247637042804473, 0.36352165683014365, 2.7112274772259863, 1.2192855982919537, 0.044840887285913816, 0.056805932987708904, 1.3494592480103418, 0.0013576460417352443, 0.2242100355383555, 1.3618848691363492, 0.07975331131097356, 0.11782991085393398, 0.014381096371652689, 0.0017168415935754928, 0.9583105878100994, 0.024554390730871872, 0.11866843195091219, 1.9632576325544924, 1.2646413642305825, 2.155179950498865, 0.01581167729278044, 2.544402790787023, 0.011883784671174378, 0.14682683497024251, 0.014799396019231316, 0.016969808531168, 0.0002806611450338811, 0.0027712499496886185, 0.1768016741246593, 1.0107010248365031, 1.1670828567876885, 2.0758158009107004, 3.9421882611486314, 0.44865444108316177, 0.41854806373328796, 0.6038449480901684, 0.048182822003452724, 3.872119837986092, 0.24572661055217634, 1.0824271647473727, 2.03779299856089, 2.24255529227242, 3.263758245708823, 0.6011732417563199, 0.7879168968089538, 1.5050527610036375, 0.025441763685205794, 0.04546771822433159, 1.0865854705464866, 0.5184737462324385, 0.910142321772268, 2.286511011661547, 0.03842941438688528, 1.7247351272464149, 5.824146531510543, 1.2275181867210987, 0.007635716199518582, 0.6376029845125641, 6.433776369887999, 2.6511260306327085, 4.7012118510676935, 0.004347054808087411, 1.3659555616858692, 0.24570121720140126, 4.073627148727174, 2.421056718500661, 1.9251272852839614, 1.1711801545005558, 0.27715925852263584, 5.565322930135341, 0.0009579624668431065, 3.481273317628789, 0.2516299228341998, 0.021401362682114608, 3.4939583654402, 1.983427037787052, 0.0035237228726767754, 0.0020569783634058533, 1.5677078198252883, 3.7527934289121045, 2.662250670402008, 2.710828525609125, 0.21585584855319767, 4.351194233101605e-6, 2.0510428949246498, 1.5360618675912747, 0.18582604017048446, 1.306272035777689, 0.03919095545102478, 0.6862750084437098, 5.573613903979422, 0.1703233717825925, 0.13130228089646268, 0.09800057883604318, 0.009598337681838763, 1.2360761565697687, 0.2301996571176748, 0.0012030825891027046, 2.5884984445887227, 0.014820775781172928, 0.03219035087883163, 0.3389309399247959, 0.40782563342972156, 0.174415708647101, 0.013136952864563189, 0.004999242998317165, 0.4249259448217729, 0.0013205614013213534, 0.0028123552076516883, 0.7868471672769553, 0.1833852057359362, 0.04349210760699473, 0.4056241084484814, 0.11084892073550408, 1.0130429443072781, 1.0914632328925065, 1.4490325408381783, 0.07340161990758948, 0.07208089577358524, 0.9189331941156488, 0.9950856118029908, 1.4757545294127993e-5, 0.009985690118679568, 0.4440624343427734, 0.09396965438944942, 0.005941963128194226, 0.26524574628999026, 8.22865063234208e-6, 0.03219996480514713, 0.029022332981481352, 0.03705667977313132, 0.8565969851329813, 0.2667246033735577, 0.5873038079830065, 0.7544028206125967, 0.3534000062803314, 0.0010673040857031, 0.10641747032439862, 0.006743164041479742, 1.5151748468672672, 0.9082197477491177, 6.00611111378366e-5, 0.7592059305274265, 0.9514315044323035, 0.9155569359195882, 0.0005729044894809509, 0.13396099842071768, 0.35612588639461673, 0.03291967439965614, 1.7835250613851086, 0.46535213486548405, 2.2890401090842216e-5, 0.1697874694586328, 0.009181282586632885, 0.014428021431883162, 0.07327111169524764, 0.07719069688098099, 0.2386765746730553, 0.021151820703514634, 0.10117839668330411, 0.028587544356908454, 0.8442957886076867, 0.6669902604644344, 0.03311263636671677, 0.0260970648495329, 0.08500568162075645, 0.10965349882925712, 0.3466110038677883, 0.0662619529917042, 0.025608597520586286, 1.712442003477771, 13.300237353327269, 3.3009254902353966, 3.0610154169935098, 2.838291793764177, 1.8598323545243238, 0.030852749611046233, 0.4716075610867205, 0.29537007408977667, 0.008282943401236243, 2.2468064683584106, 0.1256023130608896, 0.49645792315995513, 5.527171786059049e-5, 0.27374446623212906, 0.00952083061718175, 0.06180425502658006, 0.01894107118695491, 0.1847286840616142, 0.1401438184149155, 0.20231760203480303, 0.08177228385295082, 0.00079104764983973, 0.010017859656537101, 0.01863291170930948, 0.02365535332003779, 0.014329635910613195, 0.39443315701474363, 0.08586140751153488, 0.0019274133276433627, 0.6851756940473963, 0.005311998850043209, 0.0017024959555922987, 0.079688894718958, 0.22008320587901115, 0.008130608536134967, 0.08510524420987187, 0.29682965621648394, 0.028338802589761864, 0.05011453282274803, 0.02104371353267891, 0.0018935209969263623, 0.03713120970097753, 0.26128043499760933, 0.01942085910485509, 0.02409474450511849, 0.2586053664787466, 0.038606498086274865, 0.06127251225564271, 4.7751707769901346e-5, 0.1835721577393485, 0.09056041030997672, 0.00027112998387032923, 0.052429812659836525, 6.03971060870506, 2.073913523576319, 2.2353248178138454, 0.003027384071535517, 1.0051156181575258, 0.15144269337829147, 0.00010578097131303075, 0.00040398676844760964, 1.189210215892407, 0.4085511322933493, 0.3050386957011653, 0.729340228548671, 0.3861413223970852, 0.2689323689592784, 0.856462644552892, 0.6109684123883636, 0.10548780428401248, 0.2340838798352385, 0.1777146984711078, 0.002958576511911419, 0.10710288496433198, 0.2103252201669941, 1.539517691333209, 0.013022405694318602, 0.09721162451566145, 0.0004450999776792177, 0.10044411808050144, 0.37033849738013236, 0.053624759748461814, 0.02230205845284987, 7.067652115930854e-5, 0.21197286681136482, 0.1226659239582447, 0.0319877674614164, 0.08340411873281124, 0.10434005192264394, 0.00010704532120576496, 0.469542294582416, 0.42238872403469113, 0.19906499754072418, 0.025611051627757477, 4.7584503819435815, 0.15288208712720827, 1.1719774681176782, 0.046314819022991664, 0.02596355838457836, 0.00011293349290798197, 0.5438027191907789, 0.025513367954121367, 0.21552863937083835, 0.053438858962943384, 0.5447817034365408, 0.04341513767062041, 0.006306507791694165, 0.15943016116215442, 0.2675947791253387, 0.016672345318286222, 0.06227164917694632, 0.1402194735857296, 0.002166155678603786, 0.3300636020729472, 0.11718001922823885, 1.6385072924932298, 0.05414217650787199, 0.3617840605745264, 0.013052553097741575, 0.41975632687895537, 0.6506558695291546, 0.1451019274177716, 0.031169863735291094, 0.04177842691590757, 0.12613322282896455, 0.05627174041462594, 0.03593303679676961, 0.012905521172021369, 0.05407880270522265, 0.6953072159182692, 0.000619616509593883, 0.18444785797066962, 0.6182056858262527, 0.33837639055534086, 0.005093817751051703, 0.12886772639154007, 0.122308865377569, 0.00013129235104194828, 0.0732684331815726, 0.04497864379020792, 0.03514296697730362, 0.08832896925862935, 0.031055691034788224, 0.13043429709465412, 0.10941541868394032, 0.06314815563956179, 0.41099443705370803, 0.649567483868346, 0.005868020937729122, 0.009268333268147229, 0.3735101732583879, 0.004563294063522422, 0.0002635501993347267, 0.004327989402044048, 0.5052475934321649, 0.039312233429900666, 0.00014915203351702864, 0.005954858596661586, 0.3230989285791853, 0.13772568488281842, 0.26392342935831414, 0.1558730238088185, 0.24761261238104038, 0.006686102464138949, 0.025214691642705556, 0.3555708368894157, 0.010309331346734675, 0.0025358538925498776, 0.018647625366932926, 0.013679714648336727, 0.060052297952910345, 1.7956473189152413, 0.35439058459450185, 0.0019429284643071047, 0.09083090787269694, 0.09447004403016364, 0.04575136490335676, 0.006459371442439489, 0.11261868468888157, 0.0008209421471136433, 0.12181384224474495, 0.6576428425169688, 0.020840193947308763, 0.018653988817572152, 0.03556092834428674, 1.5674535023655984, 0.03843923840553641, 0.011252969161252612, 0.005254626037166597, 0.013576570624271495, 0.5195953788449381, 0.012800912700290658, 0.08391698079169817, 0.05250545865232042, 0.024563216320698793, 0.001980376901751918, 0.0933614612590033, 0.041709311922108486, 0.00670696954123109, 0.003734452871888666, 0.017996718703772972, 0.1540165935245423, 0.4461858257869078, 0.7288837326380754, 0.0875387940731562, 0.03252182159753454, 0.594101714982668, 0.09108890408323, 1.1342438934197567, 0.3712756969567675, 0.0018965862018593643, 0.0031481901627399447, 0.04113192757760038, 0.043020824844486105, 0.005927464499773701, 0.01576473650159528, 0.004771527495940785, 0.16513905402476844, 0.000166970350633535, 0.007524462046500157, 0.011935856891056891, 0.04110830032928077, 0.027671082401395663, 0.23876646919202865, 0.005310444163454746, 3.2854037560380474, 0.12292849398403076, 0.44532821773541253, 0.2662274979472321, 0.038634887727501496, 0.05594401038358628, 0.19755036943015952, 0.0004998730758698305, 0.012231449286693084, 0.0022726809126874096, 0.5652303300137818, 0.11488282090519102, 0.006013619799989586, 0.07881511743641124, 0.02266188087562861, 0.0008754202237340968, 0.011336656440702482, 0.00506698056456099, 0.19819404671256646, 0.010338183204109248, 0.047905709657831086, 0.0004568750550543027, 0.6708609989917624, 0.4217972007628798, 0.0026318892298838295, 0.002719595459366587, 0.020284471517469056, 0.0013320109303731043, 0.630492468309407, 0.7277083660370265, 0.7241872447638558, 0.023724489647558626, 0.05138674088550191, 0.021448912084270546, 0.8619674915569887, 0.38240964212404543, 0.011394573776611744, 0.006195031904820567, 0.5321505794035981, 0.03476180036672574, 0.20761893732966868, 6.614553720211809e-5, 0.0045540622128015075, 0.2700275145518658, 0.0004590234378390465, 0.004780601680297542, 0.003162747088893576, 0.06484165500543976, 0.0006062057863821437, 0.009312544262156252, 0.018338734134179264, 0.0024768190702265484, 0.04608470029150553, 0.0015657264045072893, 0.04264129020384843, 0.03034473852077423, 0.03137127332388284, 0.05535957456012246, 0.0016332519194396443, 2.104519814572286, 0.016457382385387628, 0.9458640190004752, 0.00044500592757794745, 0.01922651966030125, 2.3971676820772814, 0.038115265259816185, 0.012377547214999052, 0.9891011251988212, 0.11912349644921691, 0.043634576818667274, 0.03473849386968508, 0.0005251161795553847, 0.009394108093578026, 0.2012677700571407, 0.33105007341867876, 0.03670862278189504, 0.557441397840061, 0.10245342291065757, 0.0006738403062835575, 0.01995637730106744, 1.1293509658245315, 0.12438037953709719, 0.003009163689409014, 0.005973332697882615, 0.020769058359085234, 0.029675107098423546, 0.010852212268150191, 0.00289880220414353, 0.08264763970799936, 0.0011547941025428492, 0.04131703753618656, 0.0004851928442598332, 0.15876421970281124, 0.011499295615757537, 0.13069478215454425, 0.16362702728250397, 0.04753117394466431, 0.014462090488644521, 0.3223872165627712, 0.01623668829463393, 0.028773205740287088, 0.05903144938946727, 0.027584016345301467, 0.03135092212005554, 0.01465998756732385, 0.023170153237071733, 0.005515099777242529, 0.004997741362145888, 0.0019802775112162108, 0.2394790336151675, 0.13777829486273757, 0.02355171060769022, 0.22357412371274132, 0.015639662820261795, 0.6477503708567071, 0.10550058390024825, 0.008912867718160461, 0.02655740743491055, 0.0003319678551520233, 0.09634330959638235, 0.01673604647160506, 0.0036267047995161283, 0.03330076447941435, 0.1385829568068526, 0.00792529183858699, 0.009362805469398846, 0.055454328812437115, 0.2959020601481385, 0.6536180197581768, 0.07140230259919765, 0.018291032043038915, 0.42956359413553497, 0.006847680885935154, 0.04203295066189019, 0.0012501854265941762, 0.9716207558868816, 0.0023378035023157266, 0.6465288454659239, 0.04200635699421544, 0.010768580670454402]
x2 = [0.09610941706095852, 2.6788212845035386, 0.002583702575676502, 0.3079890371414663, 0.2218233792549798, 0.07975000051131587, 0.22310573843696402, 0.0016145767310154937, 0.00021506066259480462, 0.1347348238545566, 0.7244128703617047, 0.0011828566361204594, 0.07002289701258912, 0.37366185926285417, 0.0007702828176743898, 0.002139278700879984, 0.053836464007526154, 1.650675513088037, 1.16748122425437, 0.27856093459188397, 0.019058258961000864, 0.16136521150410382, 0.08224579148911378, 0.004024360208805437, 0.7653817220257503, 4.4510953200179815, 0.25957822569124633, 0.37137008090011087, 1.0357988095863304, 1.2389842387287384, 0.4324607874947091, 5.269593589329902, 0.5621803938941623, 0.03477934257291081, 1.4619965090681062, 1.7365164047084427, 0.028582510379680535, 1.1864719119647948, 0.00017403544878260159, 0.32998873142007473, 0.23548033193902543, 0.019210839894924763, 0.4530222091064397, 0.36620714829237433, 0.033215060313819496, 0.36313208130177793, 0.01484899619985411, 4.698840747702183e-6, 0.22943861446916383, 0.0693734390306599, 0.5194246266697191, 2.254208363566848, 0.000711991818247004, 0.03227740032396568, 0.0002067872176035323, 0.0008581956571378268, 0.25524877190609313, 0.0006603091802297528, 1.362499153127377, 0.08577309821496772, 0.8919014915087333, 0.5230148340571447, 0.3562150939685192, 0.3562150939685192, 0.1096908647583281, 0.20407089519132424, 0.1968720725392859, 0.48739080592383005, 0.04061200146027687, 0.20350016132170584, 0.6452477732061614, 0.4935279378347359, 0.08269231898382176, 0.010974942559581268, 1.6116941048352218, 1.1928108856953, 0.15213049376092777, 1.1501296327247874, 4.425046102227064, 0.8991160278122725, 0.6456958173927121, 0.4580013476284833, 1.0730853116158547, 0.02257630017653147, 0.12632124277322926, 0.19019934287306395, 0.05196206322855161, 0.02692636386162153, 0.635005506129145, 0.0972247251099256, 0.22760365264589139, 0.07938909838989823, 0.00028864197438840976, 0.014932056591780294, 0.03840574490115696, 0.8190214201900288, 0.30950765751926473, 0.01831369772284331, 0.021621005780058804, 0.9430252521456538, 0.0012103784072304167, 0.22155472706333956, 0.8627598292464865, 0.1347165223919831, 0.13961685134675358, 0.396375900478387, 0.6152459263002183, 0.061040907672904716, 0.17837935923383433, 0.3582749524196826, 0.011621160871505566, 0.2727183192294628, 0.03317357438251278, 0.005996550580435087, 0.0012854244709616827, 0.03435859450068371, 0.4084641127852834, 0.2156258703166822, 0.009693582456685656, 0.00017760650614569273, 0.11942646862758109, 0.5276094841987697, 0.09066065784318494, 0.009929957262853248, 0.052145347173973046, 0.5724172922392592, 0.014649269664895301, 0.03913792721866548, 0.0013455493423829188, 0.3953058983299156, 0.22439797677280424, 0.01227838224650002, 0.03605488081864467, 0.001879662186147144, 0.4409922694618947, 0.003946687216989506, 0.3092252935097444, 0.15935751264549847, 0.47063574021947513, 0.1922336294839871, 0.16143647680511083, 0.017894712219287973, 0.24489610764512534, 0.03229765751935813, 0.15918992781905314, 1.436885125027776, 1.0319324805303918, 0.04264171592814084, 0.2528556838239409, 0.033424334783545964, 0.0004900776090921828, 0.23388375032149153, 0.0009505823861797834, 0.1835680041079445, 5.796064815787655e-5, 4.0806986460067876, 0.08346313295260399, 0.5272289742049457, 0.9931513138005919, 1.0850581417353803e-6, 0.30014196696728745, 1.2968901911493473, 0.078242727820577, 0.02386976812780534, 0.4251328893584252, 0.1979793262794858, 2.6160527648452464e-7, 0.6926779466296006, 0.25803324018763996, 0.062248616253323265, 0.08765575761459778, 0.04120106917205882, 0.25067625966553186, 0.008562886172120192, 0.00022500000084360892, 0.020053628341366012, 0.1011039485999002, 0.004543950646515702, 0.004090312768752987, 0.024199736620066938, 0.23609140197946205, 0.08782228599374417, 0.4200776046483452, 0.1309926797100626, 0.006665392855877088, 0.34397553423331073, 0.006744069891373599, 0.5680539834354023, 0.015620473802026125, 0.24491747714212983, 0.0021386494864975964, 0.6407900944133236, 0.3431802914429819, 0.6204188359136701, 2.63173564256491, 0.6698705449027024, 0.054904693752213116, 0.06797321779763552, 1.767005817528848, 0.0011152537338764798, 1.2128283813183613, 0.0184390507113931, 2.3766787146086807, 3.0079507122335087, 4.258404245487947, 1.3115650352289032, 2.8165805348731867, 0.025905717699369488, 0.664907800923179, 0.0011444411916685549, 1.7241447201716487, 0.8370970621184224, 3.7284109849477085, 0.5113090444749049, 1.4257963182829891, 0.5167011440300261, 0.02526566428379101, 1.3757506587868202, 0.012755883561513673, 0.37649443520629744, 1.3193112989834972, 4.815314748655017e-5, 0.06997476757293467, 0.2934423572024434, 0.14542305380085527, 0.0015900672702848092, 0.10233979942471935, 0.005630546906068476, 0.006469621166966418, 0.002964489964671273, 0.0006010368661893317, 0.005049890932956196, 0.2841651168698957, 0.03239227448552545, 0.0437699743603815, 0.25004113429080693, 0.09896237374382906, 0.008348297582788964, 0.06578790089468022, 0.06653433188654503, 0.4642751904757431, 0.39519374748374775, 0.13476408975837945, 0.014546447433743066, 0.03048312954732133, 0.4971181043026032, 0.0018662766285030344, 2.6278158118829213, 0.1835022400764073, 2.47514809706536, 0.4902999631428375, 0.6470801794417882, 3.786416800758203, 5.710830088843509, 0.20926417831010427, 0.1581341399810609, 0.027658022936031913, 0.00026667083846736194, 0.1238659117684014, 0.0063843628083706545, 0.2568548171899579, 1.0711977658880973, 0.0013630664009920536, 3.30352005277502, 0.879937452995484, 1.3685169188596928, 3.1345899818323706, 0.6720940644275492, 0.6578195141762373, 0.07532735309925823, 0.34805685820061716, 0.842015572726283, 1.7087250120723894, 0.029488252214740676, 0.24182332479734264, 2.284138680208943, 0.31894040973928284, 0.061151112591218436, 1.7538966941844074, 1.8268492450043812, 0.8870968872656356, 1.6888231118637285, 1.6087801518278428, 2.019083717365777, 0.1684290942528512, 1.0410453692042765, 0.10908074809402096, 0.1908436042909946, 1.1155471867184612, 5.844220501773345e-8, 0.9173794532331626, 0.17207017942125807, 0.019631404095377558, 6.555129538971182e-5, 0.00982371335634011, 0.35698824532156515, 0.0007820153496087637, 0.08405923604797942, 0.008030119032879519, 0.019637432372691947, 0.08906498732956161, 0.34791881304822925, 0.1888647288187308, 0.18720383176890742, 0.013171865708443678, 2.044673633104564, 0.15459915234264454, 2.7340068188035773, 0.05613079127103884, 1.524501016813576, 0.3659967127896868, 1.767352893320553, 0.10153648063314294, 1.435364000364032, 0.2167276648091159, 0.8031751357843536, 0.032374505789632385, 0.3751207540409881, 2.149188191296807, 0.0569958712551922, 0.04589654437267971, 1.5084140556081869, 0.7492740902741857, 0.1656337374812022, 0.11493729848117477, 0.4580338241879098, 0.051352422239383536, 0.07530480863923807, 0.19846467652836752, 0.27377279396793475, 0.1987303527859933, 0.026353024881242783, 0.23472445370635528, 0.003034692953906944, 1.316931342061209, 0.8390005939171763, 0.01754342967885931, 0.2469866860516095, 0.057710642895835086, 0.051701050550059705, 0.17219583486604545, 0.07883443523184595, 0.15164278691054486, 1.021923484986976, 1.1332130435385828, 0.0956724321770484, 1.4033171247754346, 0.18694570389475104, 0.1313883092766454, 1.7455074139725115, 0.24915777761962332, 0.08740574923380957, 0.0006376809234389431, 1.0984572099386032, 0.00929021796449848, 0.08574866678963247, 0.0023646622324457658, 0.00929021785769423, 0.06095506448679574, 0.05073967134751741, 1.0752853369569975, 0.813834965372417, 0.013766648374547249, 0.4010969177159289, 0.03954033174282336, 0.0107168060720065, 0.051118343620808644, 0.7264977807132222, 0.0295394916335375, 0.40768478136598163, 0.0005107261219950305, 1.3835791642534696, 0.05082592741617635, 0.5182288196996737, 0.20847353016272271, 0.31457673259478647, 0.03854852878681378, 0.9355254491638486, 0.2808480238401504, 0.3979094654666121, 0.003576278721621333, 0.5286854122427138, 0.09093614772575773, 0.0008980702651302059, 4.506337756442705, 0.0628419448477344, 0.5120521952985422, 0.005285361135176275, 0.1292140484977512, 0.3828250468549696, 2.8142650757929815, 0.04403330176973427, 1.4896474672190718, 1.206052602820401, 0.19957580371050726, 0.0024332886984693245, 0.6014342222138763, 0.01140756420168566, 0.005377842609848153, 0.17604244197404836, 0.05760422812917011, 0.31250381829377694, 1.1490894921751678, 0.32317018839483974, 1.477363382486283, 0.5145477322334638, 0.00044536297985431025, 0.03722728387531214, 0.10378974208240024, 0.04986984116156657, 0.10029440871562356, 0.5982456162589183, 0.07998428351762868, 1.5670089066470347, 0.8722840563055405, 0.0073673753922654595, 0.017523748116534052, 0.15166069792511527, 0.2647333138240556, 0.07087804020640305, 0.6665559662655085, 4.465561571362131, 10.40555472376894, 16.357949944570567, 1.8201377363046138, 14.44889082735589, 5.777626475633906, 0.0012396712566665454, 0.6883883094000172, 8.861699000902632, 3.1943960598716226, 0.011490096079574411, 2.3567955489581562, 6.115447204722121, 1.8989595298996975, 0.26058211927209585, 0.18129924094686936, 0.14887662516273448, 1.6450471835892526, 0.7236768307417346, 0.05850914274620028, 2.662184941920464, 0.21830373822980817, 1.5939020924298253, 0.03694131138650948, 0.10559458810404099, 0.0024689384133387868, 6.783973349246389, 0.010398398911134211, 3.3524729267239373, 0.068279431966427, 2.0162990366951843, 3.28019301227849, 0.11927888704886429, 0.6265796292912164, 0.7748629517746713, 0.006076061991189065, 0.013128366319410133, 0.4488822636762288, 0.2354825292028072, 2.209225500280821, 0.19817768183084417, 0.002926153051489738, 0.019247637042804473, 0.36352165683014365, 2.7112274772259863, 1.2192855982919537, 0.044840887285913816, 0.056805932987708904, 1.3494592480103418, 0.0013576460417352443, 0.2242100355383555, 1.3618848691363492, 0.07975331131097356, 0.11782991085393398, 0.014381096371652689, 0.0017168415935754928, 0.9583105878100994, 0.024554390730871872, 0.11866843195091219, 1.9632576325544924, 1.2646413642305825, 2.155179950498865, 0.01581167729278044, 2.544402790787023, 0.011883784671174378, 0.14682683497024251, 0.014799396019231316, 0.016969808531168, 0.0002806611450338811, 0.0027712499496886185, 0.1768016741246593, 1.0107010248365031, 1.1670828567876885, 2.0758158009107004, 3.9421882611486314, 0.44865444108316177, 0.41854806373328796, 0.6038449480901684, 0.048182822003452724, 3.872119837986092, 0.24572661055217634, 1.0824271647473727, 2.03779299856089, 2.24255529227242, 3.263758245708823, 0.6011732417563199, 0.7879168968089538, 1.5050527610036375, 0.025441763685205794, 0.04546771822433159, 1.0865854705464866, 0.5184737462324385, 0.910142321772268, 2.286511011661547, 0.03842941438688528, 1.7247351272464149, 5.824146531510543, 1.2275181867210987, 0.007635716199518582, 0.6376029845125641, 6.433776369887999, 2.6511260306327085, 4.7012118510676935, 0.004347054808087411, 1.3659555616858692, 0.24570121720140126, 4.073627148727174, 2.421056718500661, 1.9251272852839614, 1.1711801545005558, 0.27715925852263584, 5.565322930135341, 0.0009579624668431065, 3.481273317628789, 0.2516299228341998, 0.021401362682114608, 3.4939583654402, 1.983427037787052, 0.0035237228726767754, 0.0020569783634058533, 1.5677078198252883, 3.7527934289121045, 2.662250670402008, 2.710828525609125, 0.21585584855319767, 4.351194233101605e-6, 2.0510428949246498, 1.5360618675912747, 0.18582604017048446, 1.306272035777689, 0.03919095545102478, 0.6862750084437098, 5.573613903979422, 0.1703233717825925, 0.13130228089646268, 0.09800057883604318, 0.009598337681838763, 1.2360761565697687, 0.2301996571176748, 0.0012030825891027046, 2.5884984445887227, 0.014820775781172928, 0.03219035087883163, 0.3389309399247959, 0.40782563342972156, 0.174415708647101, 0.013136952864563189, 0.004999242998317165, 0.4249259448217729, 0.0013205614013213534, 0.0028123552076516883, 0.7868471672769553, 0.1833852057359362, 0.04349210760699473, 0.4056241084484814, 0.11084892073550408, 1.0130429443072781, 1.0914632328925065, 1.4490325408381783, 0.07340161990758948, 0.07208089577358524, 0.9189331941156488, 0.9950856118029908, 1.4757545294127993e-5, 0.009985690118679568, 0.4440624343427734, 0.09396965438944942, 0.005941963128194226, 0.26524574628999026, 8.22865063234208e-6, 0.03219996480514713, 0.029022332981481352, 0.03705667977313132, 0.8565969851329813, 0.2667246033735577, 0.5873038079830065, 0.7544028206125967, 0.3534000062803314, 0.0010673040857031, 0.10641747032439862, 0.006743164041479742, 1.5151748468672672, 0.9082197477491177, 6.00611111378366e-5, 0.7592059305274265, 0.9514315044323035, 0.9155569359195882, 0.0005729044894809509, 0.13396099842071768, 0.35612588639461673, 0.03291967439965614, 1.7835250613851086, 0.46535213486548405, 2.2890401090842216e-5, 0.1697874694586328, 0.009181282586632885, 0.014428021431883162, 0.07327111169524764, 0.07719069688098099, 0.2386765746730553, 0.021151820703514634, 0.10117839668330411, 0.028587544356908454, 0.8442957886076867, 0.6669902604644344, 0.03311263636671677, 0.0260970648495329, 0.08500568162075645, 0.10965349882925712, 0.3466110038677883, 0.0662619529917042, 0.025608597520586286, 1.712442003477771, 13.300237353327269, 3.3009254902353966, 3.0610154169935098, 2.838291793764177, 1.8598323545243238, 0.030852749611046233, 0.4716075610867205, 0.29537007408977667, 0.008282943401236243, 2.2468064683584106, 0.1256023130608896, 0.49645792315995513, 5.527171786059049e-5, 0.27374446623212906, 0.00952083061718175, 0.06180425502658006, 0.01894107118695491, 0.1847286840616142, 0.1401438184149155, 0.20231760203480303, 0.08177228385295082, 0.00079104764983973, 0.010017859656537101, 0.01863291170930948, 0.02365535332003779, 0.014329635910613195, 0.39443315701474363, 0.08586140751153488, 0.0019274133276433627, 0.6851756940473963, 0.005311998850043209, 0.0017024959555922987, 0.079688894718958, 0.22008320587901115, 0.008130608536134967, 0.08510524420987187, 0.29682965621648394, 0.028338802589761864, 0.05011453282274803, 0.02104371353267891, 0.0018935209969263623, 0.03713120970097753, 0.26128043499760933, 0.01942085910485509, 0.02409474450511849, 0.2586053664787466, 0.038606498086274865, 0.06127251225564271, 4.7751707769901346e-5, 0.1835721577393485, 0.09056041030997672, 0.00027112998387032923, 0.052429812659836525, 6.03971060870506, 2.073913523576319, 2.2353248178138454, 0.003027384071535517, 1.0051156181575258, 0.15144269337829147, 0.00010578097131303075, 0.00040398676844760964, 1.189210215892407, 0.4085511322933493, 0.3050386957011653, 0.729340228548671, 0.3861413223970852, 0.2689323689592784, 0.856462644552892, 0.6109684123883636, 0.10548780428401248, 0.2340838798352385, 0.1777146984711078, 0.002958576511911419, 0.10710288496433198, 0.2103252201669941, 1.539517691333209, 0.013022405694318602, 0.09721162451566145, 0.0004450999776792177, 0.10044411808050144, 0.37033849738013236, 0.053624759748461814, 0.02230205845284987, 7.067652115930854e-5, 0.21197286681136482, 0.1226659239582447, 0.0319877674614164, 0.08340411873281124, 0.10434005192264394, 0.00010704532120576496, 0.469542294582416, 0.42238872403469113, 0.19906499754072418, 0.025611051627757477, 4.7584503819435815, 0.15288208712720827, 1.1719774681176782, 0.046314819022991664, 0.02596355838457836, 0.00011293349290798197, 0.5438027191907789, 0.025513367954121367, 0.21552863937083835, 0.053438858962943384, 0.5447817034365408, 0.04341513767062041, 0.006306507791694165, 0.15943016116215442, 0.2675947791253387, 0.016672345318286222, 0.06227164917694632, 0.1402194735857296, 0.002166155678603786, 0.3300636020729472, 0.11718001922823885, 1.6385072924932298, 0.05414217650787199, 0.3617840605745264, 0.013052553097741575, 0.41975632687895537, 0.6506558695291546, 0.1451019274177716, 0.031169863735291094, 0.04177842691590757, 0.12613322282896455, 0.05627174041462594, 0.03593303679676961, 0.012905521172021369, 0.05407880270522265, 0.6953072159182692, 0.000619616509593883, 0.18444785797066962, 0.6182056858262527, 0.33837639055534086, 0.005093817751051703, 0.12886772639154007, 0.122308865377569, 0.00013129235104194828, 0.0732684331815726, 0.04497864379020792, 0.03514296697730362, 0.08832896925862935, 0.031055691034788224, 0.13043429709465412, 0.10941541868394032, 0.06314815563956179, 0.41099443705370803, 0.649567483868346, 0.005868020937729122, 0.009268333268147229, 0.3735101732583879, 0.004563294063522422, 0.0002635501993347267, 0.004327989402044048, 0.5052475934321649, 0.039312233429900666, 0.00014915203351702864, 0.005954858596661586, 0.3230989285791853, 0.13772568488281842, 0.26392342935831414, 0.1558730238088185, 0.24761261238104038, 0.006686102464138949, 0.025214691642705556, 0.3555708368894157, 0.010309331346734675, 0.0025358538925498776, 0.018647625366932926, 0.013679714648336727, 0.060052297952910345, 1.7956473189152413, 0.35439058459450185, 0.0019429284643071047, 0.09083090787269694, 0.09447004403016364, 0.04575136490335676, 0.006459371442439489, 0.11261868468888157, 0.0008209421471136433, 0.12181384224474495, 0.6576428425169688, 0.020840193947308763, 0.018653988817572152, 0.03556092834428674, 1.5674535023655984, 0.03843923840553641, 0.011252969161252612, 0.005254626037166597, 0.013576570624271495, 0.5195953788449381, 0.012800912700290658, 0.08391698079169817, 0.05250545865232042, 0.024563216320698793, 0.001980376901751918, 0.0933614612590033, 0.041709311922108486, 0.00670696954123109, 0.003734452871888666, 0.017996718703772972, 0.1540165935245423, 0.4461858257869078, 0.7288837326380754, 0.0875387940731562, 0.03252182159753454, 0.594101714982668, 0.09108890408323, 1.1342438934197567, 0.3712756969567675, 0.0018965862018593643, 0.0031481901627399447, 0.04113192757760038, 0.043020824844486105, 0.005927464499773701, 0.01576473650159528, 0.004771527495940785, 0.16513905402476844, 0.000166970350633535, 0.007524462046500157, 0.011935856891056891, 0.04110830032928077, 0.027671082401395663, 0.23876646919202865, 0.005310444163454746, 3.2854037560380474, 0.12292849398403076, 0.44532821773541253, 0.2662274979472321, 0.038634887727501496, 0.05594401038358628, 0.19755036943015952, 0.0004998730758698305, 0.012231449286693084, 0.0022726809126874096, 0.5652303300137818, 0.11488282090519102, 0.006013619799989586, 0.07881511743641124, 0.02266188087562861, 0.0008754202237340968, 0.011336656440702482, 0.00506698056456099, 0.19819404671256646, 0.010338183204109248, 0.047905709657831086, 0.0004568750550543027, 0.6708609989917624, 0.4217972007628798, 0.0026318892298838295, 0.002719595459366587, 0.020284471517469056, 0.0013320109303731043, 0.630492468309407, 0.7277083660370265, 0.7241872447638558, 0.023724489647558626, 0.05138674088550191, 0.021448912084270546, 0.8619674915569887, 0.38240964212404543, 0.011394573776611744, 0.006195031904820567, 0.5321505794035981, 0.03476180036672574, 0.20761893732966868, 6.614553720211809e-5, 0.0045540622128015075, 0.2700275145518658, 0.0004590234378390465, 0.004780601680297542, 0.003162747088893576, 0.06484165500543976, 0.0006062057863821437, 0.009312544262156252, 0.018338734134179264, 0.0024768190702265484, 0.04608470029150553, 0.0015657264045072893, 0.04264129020384843, 0.03034473852077423, 0.03137127332388284, 0.05535957456012246, 0.0016332519194396443, 2.104519814572286, 0.016457382385387628, 0.9458640190004752, 0.00044500592757794745, 0.01922651966030125, 2.3971676820772814, 0.038115265259816185, 0.012377547214999052, 0.9891011251988212, 0.11912349644921691, 0.043634576818667274, 0.03473849386968508, 0.0005251161795553847, 0.009394108093578026, 0.2012677700571407, 0.33105007341867876, 0.03670862278189504, 0.557441397840061, 0.10245342291065757, 0.0006738403062835575, 0.01995637730106744, 1.1293509658245315, 0.12438037953709719, 0.003009163689409014, 0.005973332697882615, 0.020769058359085234, 0.029675107098423546, 0.010852212268150191, 0.00289880220414353, 0.08264763970799936, 0.0011547941025428492, 0.04131703753618656, 0.0004851928442598332, 0.15876421970281124, 0.011499295615757537, 0.13069478215454425, 0.16362702728250397, 0.04753117394466431, 0.014462090488644521, 0.3223872165627712, 0.01623668829463393, 0.028773205740287088, 0.05903144938946727, 0.027584016345301467, 0.03135092212005554, 0.01465998756732385, 0.023170153237071733, 0.005515099777242529, 0.004997741362145888, 0.0019802775112162108, 0.2394790336151675, 0.13777829486273757, 0.02355171060769022, 0.22357412371274132, 0.015639662820261795, 0.6477503708567071, 0.10550058390024825, 0.008912867718160461, 0.02655740743491055, 0.0003319678551520233, 0.09634330959638235, 0.01673604647160506, 0.0036267047995161283, 0.03330076447941435, 0.1385829568068526, 0.00792529183858699, 0.009362805469398846, 0.055454328812437115, 0.2959020601481385, 0.6536180197581768, 0.07140230259919765, 0.018291032043038915, 0.42956359413553497, 0.006847680885935154, 0.04203295066189019, 0.0012501854265941762, 0.9716207558868816, 0.0023378035023157266, 0.6465288454659239, 0.04200635699421544]
df = DataFrame(y=y, x1=x1, x2=x2)
f = @formula(y ~ x1 + x2)
lr2 = regress(f, df, cov="nw")
@test isapprox(0.14294512360920375, lr2.R2)
@test isapprox(0.141222400239574, lr2.ADJR2)
@test [] == lr2.white_types
@test [:nw] == lr2.hac_types
@test isapprox([0.04903317561314946, 0.06022964615347672, 0.07414158887070771], lr2.hac_stderrors[1])
@test isapprox([6.306088088824664, 4.169867149620186, 2.8751798612414263], lr2.hac_t_values[1])
@test isapprox([4.296835456090843e-10, 3.313459203977476e-5, 0.004124044653585188], lr2.hac_p_values[1])
@test isapprox([0.2129872220097398, 0.13295791474690624, 0.06767857989564119], lr2.hac_ci_low[1])
@test isapprox([0.40542782737291977, 0.3693413311103545, 0.35866222650735935], lr2.hac_ci_up[1])
end
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 3395 |
@testset "K-Fold cross-validation" begin
target_R2 = [0.46405795117804827, 0.4314854720368352, 0.4471605188073827, 0.4650281295531955, 0.5353418610442862]
target_ADJR2 = [0.45098619388970795, 0.41761926403773364, 0.433839085525633, 0.45213724110869413, 0.5241452793827027]
target_TRAIN_MSE = [1.5167244045363427e6, 1.728038497400901e6, 1.766104797959807e6, 1.824818743281271e6, 1.6143508602587546e6]
target_TRAIN_RMSE = [1231.5536547533536, 1314.548780913398, 1328.9487567095305, 1350.8585208234322, 1270.5710764293176]
target_TEST_MSE = [2.430733983431065e6, 1.619864225581008e6, 1.447032419711302e6, 1.1981745824179968e6, 2.5641127272976018e6]
target_TEST_RMSE = [1559.0811343323558, 1272.7388677890717, 1202.926606119967, 1094.61161259051, 1601.28471150436]
year = [2011, 2011, 2011, 2011, 2012, 2010, 2011, 2010, 2011, 2010, 2010, 2011, 2011, 2010, 2011, 2011, 2010, 2010, 2011, 2011, 2010, 2010, 2011, 2010, 2009, 2010, 2010, 2010, 2010, 2009, 2010, 2009, 2011, 2011, 2009, 2010, 2011, 2010, 2010, 2010, 2010, 2010, 2010, 2009, 2010, 2010, 2010, 2010, 2009, 2009, 2010, 2010, 2010, 2009, 2010, 2010, 2010, 2009, 2009, 2009, 2010, 2010, 2009, 2009, 2009, 2010, 2010, 2010, 2009, 2009, 2009, 2009, 2010, 2009, 2010, 2009, 2009, 2010, 2010, 2010, 2009, 2009, 2007, 2010, 2010, 2010, 2009, 2009, 2009, 2008, 2009, 2009, 2009, 2010, 2010, 2008, 2008, 2009, 2009, 2008, 2009, 2010, 2008, 2009, 2010, 2008, 2008]
mileage = [7413, 10926, 7351, 11613, 8367, 25125, 27393, 21026, 32655, 36116, 40539, 9199, 9388, 32058, 15367, 16368, 19926, 36049, 11662, 32069, 16035, 39943, 36685, 24920, 20019, 29338, 7784, 35636, 22029, 33107, 36306, 34419, 4867, 18948, 24030, 33036, 23967, 37905, 28955, 11165, 44813, 36469, 22143, 34046, 32703, 35894, 38275, 24855, 29501, 35394, 36447, 35318, 24929, 23785, 15167, 13541, 20278, 46126, 53733, 21108, 21721, 26716, 26887, 36252, 9450, 31414, 37185, 48174, 50533, 36713, 34888, 38380, 35574, 27528, 33302, 43369, 64055, 41342, 34503, 16573, 32403, 34846, 39665, 21325, 32743, 40058, 42325, 44518, 53902, 127327, 27136, 45813, 31538, 29517, 35871, 49787, 36323, 39211, 44789, 45996, 54988, 49720, 36322, 39222, 44089, 45993, 50988]
price = [21992, 20995, 19995, 17809, 17500, 17495, 17000, 16995, 16995, 16995, 16995, 16992, 16950, 16950, 16000, 15999, 15999, 15995, 15992, 15992, 15988, 15980, 15899, 15889, 15688, 15500, 15499, 15499, 15298, 14999, 14999, 14995, 14992, 14992, 14992, 14990, 14989, 14906, 14900, 14893, 14761, 14699, 14677, 14549, 14499, 14495, 14495, 14480, 14477, 14355, 14299, 14275, 14000, 13999, 13997, 13995, 13995, 13995, 13995, 13992, 13992, 13992, 13992, 13991, 13950, 13950, 13950, 13895, 13888, 13845, 13799, 13742, 13687, 13663, 13599, 13584, 13425, 13384, 13383, 13350, 12999, 12998, 12997, 12995, 12995, 12995, 12995, 12995, 12995, 12995, 12992, 12990, 12988, 12849, 12780, 12777, 12704, 12595, 12507, 12500, 12500, 12773, 12704, 12595, 12519, 12333, 12399]
df = DataFrame(Year = year, Mileage = mileage , Price = price)
res = kfold(@formula(Price ~ 1 + Year + Mileage ), df, 5, 1, false)
@test isapprox(target_R2, res.R2)
@test isapprox(target_ADJR2, res.ADJR2)
@test isapprox(target_TRAIN_MSE, res.TRAIN_MSE)
@test isapprox(target_TRAIN_RMSE, res.TRAIN_RMSE)
@test isapprox(target_TEST_MSE, res.TEST_MSE)
@test isapprox(target_TEST_RMSE, res.TEST_RMSE)
end | LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 3242 | using StatsBase: isapprox
@testset "Less than full rank regression behaviour" begin
# The package does not remove collinear terms (when there is a less than full rank matrix).
# This test only checks that the behaviour is similar to other software package (such as R).
year = [2011, 2011, 2011, 2011, 2012, 2010, 2011, 2010, 2011, 2010, 2010, 2011, 2011, 2010, 2011, 2011, 2010, 2010, 2011, 2011, 2010, 2010, 2011, 2010, 2009, 2010, 2010, 2010, 2010, 2009, 2010, 2009, 2011, 2011, 2009, 2010, 2011, 2010, 2010, 2010, 2010, 2010, 2010, 2009, 2010, 2010, 2010, 2010, 2009, 2009, 2010, 2010, 2010, 2009, 2010, 2010, 2010, 2009, 2009, 2009, 2010, 2010, 2009, 2009, 2009, 2010, 2010, 2010, 2009, 2009, 2009, 2009, 2010, 2009, 2010, 2009, 2009, 2010, 2010, 2010, 2009, 2009, 2007, 2010, 2010, 2010, 2009, 2009, 2009, 2008, 2009, 2009, 2009, 2010, 2010, 2008, 2008, 2009, 2009, 2008, 2009, 2010, 2008, 2009, 2010, 2008, 2008]
mileage = [7413, 10926, 7351, 11613, 8367, 25125, 27393, 21026, 32655, 36116, 40539, 9199, 9388, 32058, 15367, 16368, 19926, 36049, 11662, 32069, 16035, 39943, 36685, 24920, 20019, 29338, 7784, 35636, 22029, 33107, 36306, 34419, 4867, 18948, 24030, 33036, 23967, 37905, 28955, 11165, 44813, 36469, 22143, 34046, 32703, 35894, 38275, 24855, 29501, 35394, 36447, 35318, 24929, 23785, 15167, 13541, 20278, 46126, 53733, 21108, 21721, 26716, 26887, 36252, 9450, 31414, 37185, 48174, 50533, 36713, 34888, 38380, 35574, 27528, 33302, 43369, 64055, 41342, 34503, 16573, 32403, 34846, 39665, 21325, 32743, 40058, 42325, 44518, 53902, 127327, 27136, 45813, 31538, 29517, 35871, 49787, 36323, 39211, 44789, 45996, 54988, 49720, 36322, 39222, 44089, 45993, 50988]
price = [21992, 20995, 19995, 17809, 17500, 17495, 17000, 16995, 16995, 16995, 16995, 16992, 16950, 16950, 16000, 15999, 15999, 15995, 15992, 15992, 15988, 15980, 15899, 15889, 15688, 15500, 15499, 15499, 15298, 14999, 14999, 14995, 14992, 14992, 14992, 14990, 14989, 14906, 14900, 14893, 14761, 14699, 14677, 14549, 14499, 14495, 14495, 14480, 14477, 14355, 14299, 14275, 14000, 13999, 13997, 13995, 13995, 13995, 13995, 13992, 13992, 13992, 13992, 13991, 13950, 13950, 13950, 13895, 13888, 13845, 13799, 13742, 13687, 13663, 13599, 13584, 13425, 13384, 13383, 13350, 12999, 12998, 12997, 12995, 12995, 12995, 12995, 12995, 12995, 12995, 12992, 12990, 12988, 12849, 12780, 12777, 12704, 12595, 12507, 12500, 12500, 12773, 12704, 12595, 12519, 12333, 12399]
df = DataFrame(Year = year, Mileage = mileage , Price = price)
clm = regress(@formula( Price ~ 1 + Year + Mileage ), df)
@test isapprox([-2.1875899220039356e6, 1096.1497198779816, -0.023836779513595294] , clm.coefs)
@test isapprox([352441.774385393, 175.28249088168033, 0.009841450356317822] , clm.stderrors)
@test isapprox([-6.2069541155238275, 6.2536178848457125, -2.4220799425455644], clm.t_values)
@test isapprox(3, clm.p)
@test isapprox(1.697508841124793e6, clm.MSE)
@test true == clm.intercept
@test isapprox(0.4643179971701117, clm.R2)
@test isapprox(0.4540164201926138, clm.ADJR2)
@test isapprox(1302.884814987416, clm.RMSE)
@test isapprox([1.1199586251463344e-8, 9.016454363548383e-9, 0.017161942174616873], clm.p_values)
end | LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 9673 |
@testset "no intercept model1" begin
x = [-5.0, -4.8, -4.6, -4.4, -4.2, -4.0, -3.8, -3.6, -3.4, -3.2, -3.0, -2.8, -2.6, -2.4, -2.2, -2.0, -1.8, -1.6, -1.4, -1.2, -1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0]
y = [32.76095293186829, 32.114646881731815, 30.482249790567508, 30.172132123809526, 28.858546806301607, 27.55525364876047, 26.748316399220183, 25.47632057519211, 24.178757213602406, 23.115113146604088, 21.67768685139419, 20.667430892918777, 19.25030326250073, 17.969338480523586, 16.92778066296735, 16.01093498430364, 14.972217418295738, 14.129273625410654, 12.528548495622791, 11.995655292690893, 10.437691863433523, 9.04875056404323, 8.352545547062054, 6.595027421481294, 5.769353744384407, 4.656766397706274, 3.2769464299313293, 2.557398138612538, 1.8697655690559674, 0.1758423094007262, -1.1812180900645513, -2.4595328718334226, -3.224902727206241, -4.52850811714283, -5.620262454732497, -6.65705617649476, -8.109350054557144, -8.964028166630365, -9.764063784932889, -11.49260887582183, -12.144819717782108, -13.843175083307838, -14.603379688823846, -15.335111221096993, -16.28627855774275, -17.73382957392828, -19.156926371425502, -20.641255379884615, -21.17362575264, -22.5708673184868, -23.072613984429083]
df = DataFrame(x=x, y=y)
lrnoint = regress(@formula(y ~ 0 + x), df, req_stats=[:default, :aic, :vif])
@test isapprox(0.9252939081, lrnoint.R2)
@test isapprox(0.9237997863, lrnoint.ADJR2)
@test isapprox(160.90768623, lrnoint.AIC)
@test isapprox([0.], lrnoint.VIF)
@test isapprox(619.2894615, lrnoint.f_value)
@test leaq(lrnoint.f_pvalue, 0.001)
results = predict_in_sample(lrnoint, df, Ξ±=0.05, req_stats=["default", "rstudent"])
@test isapprox([28.385804339677044, 27.25037216608996, 26.114939992502876, 24.9795078189158, 23.84407564532872, 22.708643471741635, 21.57321129815455, 20.43777912456747, 19.30234695098039, 18.16691477739331, 17.031482603806225, 15.896050430219143, 14.760618256632062, 13.62518608304498, 12.4897539094579, 11.354321735870817, 10.218889562283735, 9.083457388696655, 7.948025215109571, 6.81259304152249, 5.677160867935409, 4.541728694348327, 3.406296520761245, 2.2708643471741636, 1.1354321735870818, 0.0, -1.1354321735870818, -2.2708643471741636, -3.406296520761245, -4.541728694348327, -5.677160867935409, -6.81259304152249, -7.948025215109571, -9.083457388696655, -10.218889562283735, -11.354321735870817, -12.4897539094579, -13.62518608304498, -14.760618256632062, -15.896050430219143, -17.031482603806225, -18.16691477739331, -19.30234695098039, -20.43777912456747, -21.57321129815455, -22.708643471741635, -23.84407564532872, -24.9795078189158, -26.114939992502876, -27.25037216608996, -28.385804339677044]
, results.predicted)
@test isapprox([0.938033258871313, 1.0426179367896207, 0.9319648731276958, 1.1097378448312425, 1.0685340559242513, 1.0299419326232258, 1.0993609703148552, 1.0677740527805426, 1.0309373482675432, 1.0448144116724511, 0.978326230602586, 1.0038487427946268, 0.9422793707140289, 0.9101464457061582, 0.9291611309816967, 0.9748389070719975, 0.9946108960924838, 1.0563303532318353, 0.9563424123040439, 1.0843302339300107, 0.993564396349128, 0.9392838550370636, 1.032388388246509, 0.9000336054567951, 0.9655586062937045, 0.97036522335683, 0.9185810103197727, 1.007013102837162, 1.1028816259631355, 0.9840106274248026, 0.9373227029099432, 0.9074862070811145, 0.9866973636129569, 0.9515650780179017, 0.9616191528659762, 0.9835165279774034, 0.9168886277207625, 0.9778135787709513, 1.0509330791271387, 0.9250288747899885, 1.0300270784500691, 0.910564302144073, 0.992654638302146, 1.0816862270131862, 1.1237223658970572, 1.0578001520839455, 0.9973186697257519, 0.9236477076314706, 1.0570827343881202, 1.0021858611232244, 1.1440369678715858]
, results.rstudent)
y = [3.547744106900422, 9.972950249405148, 16.471345464154027, 22.46768807351274, 20.369933318011807, 21.18590757820348, 29.962620198209024, 30.684400502954748, 29.28429078492597, 34.272759386588824, 33.05504692986838, 45.09273876302829, 45.28374262744938, 54.54563960566191, 46.86173948296966, 46.85926120310666, 67.54337216713414, 66.0400205145086, 64.77001443647681, 63.98759256558095, 15.016939388490687, 12.380885701920008, 10.221963402745288, 24.826987790646672, 22.231511892187548, 18.05125492502642, 21.809661866284717, 28.533306224702308, 24.70476800009685, 39.592181057942916, 37.425282624708906, 33.89372811236659, 44.07442335927311, 42.61943719318272, 52.32531447897055, 44.12096163880534, 49.19965880584543, 52.304114239221036, 59.79488104919937, 65.00419916894333, 10.978506745605673, 12.944637189081874, 16.96525561080181, 15.93489355798633, 33.559135307088305, 28.887571687420433, 28.296418324622593, 32.2405215537489, 31.466024917490223, 37.78855308849255, 48.09725215994402, 40.70190542438069, 43.2525436240948, 44.75159242105558, 52.553066965996074, 53.265851121437095, 60.092700204726015, 62.78014347092756, 69.07895282754228, 74.27890668517966, 10.57283140105129, 7.290600131361426, 16.10299402050687, 14.428954773831242, 22.418180226673464, 28.21022852582732, 29.60436622143203, 33.648588929669636, 37.451930576147994, 43.85548900583812, 41.44168340404242, 43.48815266671309, 54.72835160956764, 53.55177468229062, 50.62088969800169, 50.863408563713335, 47.40251347184729, 64.29413401802115, 64.20687412126479, 73.66653216085827, 15.820723594639162, 21.973463928234743, 23.804440715385162, 15.139822408433913, 30.015089890369985, 28.08454394421385, 33.041880065463566, 28.49429418531324, 38.33763660905526, 34.503176013521724, 48.748946870573235, 45.45351516824085, 53.522908096188765, 45.95216131836423, 63.13018379633756, 63.4236036208151, 65.28265579397677, 55.43500146374922, 76.50187470137375, 67.22421998359037, 19.289152114521762, 27.63557010588222, 17.700031078096686, 25.462368278660957, 22.94613580060685, 36.19731649621813, 35.22216995579936, 25.526434727578838, 45.882864925557726, 38.71433797181679, 50.617762276434554, 41.96951039650285, 52.265123328453214, 45.383991138243765, 62.7923270665056, 64.40670612696276, 74.89775274405821, 72.89056716347118, 66.37071343209973, 76.32913721410918, 29.923781174452596, 16.465832617450324, 30.530915733275403, 30.512411156491954, 28.04012351007911, 26.315140074869376, 37.18491928428231, 41.958551085353626, 48.370736387628895, 49.419917216561835, 48.67296268029715, 55.484477112881166, 51.120639229597025, 54.797092949987224, 61.92608065418044, 69.79495109420618, 64.43521939892794, 74.35280205157255, 77.22355341160723, 68.90654715174155, 10.87752158238755, 20.25006748725279, 31.71957876614495, 31.42898857400639, 32.505052996368256, 34.1225641368903, 35.66496173153403, 31.76371648643483, 42.97107454773545, 47.41827763710622]
x2 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8]
x3 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
df = DataFrame(y=y, x2=x2, x3=x3)
lrnoint = regress(@formula(y ~ 0 + x2 +x3), df, req_stats=["default", "pcorr1", "pcorr2", "scorr1", "scorr2", "vif"])
@test isapprox([207017.27329467665, 84364.93565847247], lrnoint.Type1SS)
@test isapprox([6502.255171530228, 84364.93565847244], lrnoint.Type2SS)
@test isapprox([0.9814816505541067, 0.9557504161506739], lrnoint.pcorr1)
@test isapprox([0.6247239646866073, 0.9557504161506739], lrnoint.pcorr2)
@test isapprox([0.7010686580206047, 0.28570375449728647], lrnoint.scorr1)
@test isapprox([0.02202003356851988, 0.28570375449728636], lrnoint.scorr2)
@test isapprox([1.0099808403511108, 1.0099808403511117], lrnoint.VIF)
@test isapprox(0.9867724125178912, lrnoint.R2)
@test isapprox(0.9865936613357005, lrnoint.ADJR2)
@test isapprox(5520.3685951, lrnoint.f_value)
@test leaq(lrnoint.f_pvalue, 0.001)
end
@testset "no intercept weighted regression" begin
tw = [
2.3 7.4 0.058
3.0 7.6 0.073
2.9 8.2 0.114
4.8 9.0 0.144
1.3 10.4 0.151
3.6 11.7 0.119
2.3 11.7 0.119
4.6 11.8 0.114
3.0 12.4 0.073
5.4 12.9 0.035
6.4 14.0 0
] # data from https://blogs.sas.com/content/iml/2016/10/05/weighted-regression.html
df = DataFrame(tw, [:y,:x,:w])
f = @formula(y ~ 0 + x)
lm = regress(f, df, weights="w")
@test isapprox([0.3056575637534476], lm.coefs)
@test isapprox(0.8626294380695502, lm.R2)
@test isapprox(0.8473660422995002, lm.ADJR2)
@test isapprox([0.040658241629241754], lm.stderrors)
@test isapprox([3.6247531175150984e-5], lm.p_values)
@test isapprox(56.51622031, lm.f_value)
end
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 4175 | @testset "ridge regression" begin
M = [ 1. 1. 1. 1.
1. 2. 1. 3.
1. 3. 1. 3.
1. 1. -1. 2.
1. 2. -1. 2.
1. 3. -1. 1. ]
df = DataFrame(M, [:x0, :x1, :x2, :y])
# Simple
t_coefs = [1.5454545454545454, 0.22727272727272727, 0.30303030303030304]
t_mse = 1.03030303030303
t_rmse = 1.0150384378451045
t_vifs = [0.0, 0.8264462809917354, 0.8264462809917352]
t_r2 = 0.2272727272727274
# coefs, mse, rmse, r2, vifs = ridge(@formula(y ~ x1 + x2), df, 0.1)
rr = ridge(@formula(y ~ x1 + x2), df, 0.1)
@test isapprox(t_coefs, rr.coefs)
@test isapprox(t_mse, rr.MSE)
@test isapprox(t_rmse, rr.RMSE)
@test isapprox(t_vifs, rr.VIF)
@test isapprox(t_r2, rr.R2)
# series of simple ridge regression
t_intercepts = [1.5, 1.5454545454545454, 1.5833333333333333]
t_x1s = [0.25, 0.22727272727272727, 0.20833333333333334]
t_x2s = [0.3333333333333333, 0.30303030303030304, 0.2777777777777778]
t_vifintercepts = [0.0, 0.0, 0.0]
t_vifx1s = [1.0, 0.8264462809917354, 0.6944444444444445]
t_vifx2s = [0.9999999999999998, 0.8264462809917352, 0.6944444444444445]
t_mse = [1.0277777777777777, 1.03030303030303, 1.0362654320987652]
t_rmse = [1.0137937550497031, 1.0150384378451045, 1.0179712334338162]
t_r2 = [0.22916666666666674, 0.2272727272727274, 0.22280092592592604]
t_adjr2 = [-0.2847222222222221, -0.2878787878787876, -0.2953317901234567]
rdf = ridge(@formula(y ~ x1 + x2), df, 0:0.1:0.2)
coefs_names = ["(Intercept)", "x1" , "x2" ]
vifs_names = "vif " .* coefs_names
@test isapprox(t_intercepts, rdf[!, coefs_names[1]])
@test isapprox(t_x1s, rdf[!, coefs_names[2]])
@test isapprox(t_x2s, rdf[!, coefs_names[3]])
@test isapprox(t_vifintercepts, rdf[!, vifs_names[1]])
@test isapprox(t_vifx1s, rdf[!, vifs_names[2]])
@test isapprox(t_vifx2s, rdf[!, vifs_names[3]])
@test isapprox(t_mse, rdf[!, "MSE"])
@test isapprox(t_rmse, rdf[!, "RMSE"])
@test isapprox(t_r2, rdf[!, "R2"])
@test isapprox(t_adjr2, rdf[!, "ADJR2"])
# test weighted ridge regression
using LinearRegressionKit
using Test, DataFrames, StatsModels
tw = [
2.3 7.4 0.058
3.0 7.6 0.073
2.9 8.2 0.114
4.8 9.0 0.144
1.3 10.4 0.151
3.6 11.7 0.119
2.3 11.7 0.119
4.6 11.8 0.114
3.0 12.4 0.073
5.4 12.9 0.035
12. 11. -0.1
]
df = DataFrame(tw, [:y,:x,:w])
f = @formula(y ~ x)
lm, ps = regress(f, df, "all", weights="w", req_stats=["default", "vif"])
lm = regress(f, df, weights="w", req_stats=["default", "vif"])
rr = ridge(f, df, 0., weights="w")
@test isapprox(lm.coefs, rr.coefs)
@test isapprox(lm.VIF, rr.VIF)
@test isapprox(lm.R2, rr.R2)
@test isapprox(lm.ADJR2, rr.ADJR2)
@test isapprox(lm.MSE, rr.MSE)
@test isapprox(lm.RMSE, rr.RMSE)
t_predis = predict_in_sample(lm, df, req_stats=req_stats=[:predicted, :residuals])
predis = predict_in_sample(rr, df)
@test isapprox(t_predis.predicted, predis.predicted)
@test isapprox(t_predis.residuals, predis.residuals)
predos = predict_out_of_sample(rr, df)
@test isapprox(t_predis.predicted, predos.predicted)
wrdf = ridge(f, df, 0:0.1:0.2, weights="w")
@test isapprox(0.:0.1:0.2 , wrdf.k)
@test isapprox([0.1828582250674794, 0.18288116845535146, 0.18293534034338269], wrdf.MSE)
@test isapprox([0.42761925245185045, 0.42764607849874114, 0.42770941109985255], wrdf.RMSE)
@test isapprox([0.014954934572438905, 0.014831340071840282, 0.014539519723204886], wrdf.R2)
@test isapprox([-0.10817569860600629, -0.10831474241917971, -0.10864304031139449], wrdf.ADJR2)
@test isapprox([2.3282371768678907, 2.4079428880617186, 2.4743643140565754], wrdf[!, "(Intercept)"])
@test isapprox([0.08535712911515224, 0.07759739010468385, 0.07113094092929353], wrdf[!, "x"])
@test isapprox([0. , 0. , 0.], wrdf[!, "vif (Intercept)"])
@test isapprox([1.0, 0.8264462809917354, 0.6944444444444445], wrdf[!, "vif x"])
end
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 3811 | include("../src/sweep_operator.jl")
@testset "Sweep Operator Correctness" begin
correct_result = [1.1666666666666667 -0.5 -0.0 1.5; -0.5 0.25 -0.0 0.25; 0.0 -0.0 0.16666666666666666 0.3333333333333333; -1.5 -0.25 -0.3333333333333333 3.0833333333333335]
correct_T1SS = [24.0, 0.25, 0.6666666666666665]
correct_T2SS = [1.9285714285714284, 0.25, 0.6666666666666666]
correct_last_see = 3.0833333333
M = [ 1. 1. 1. 1.
1. 2. 1. 3.
1. 3. 1. 3.
1. 1. -1. 2.
1. 2. -1. 2.
1. 3. -1. 1. ]
M0 = M' * M
sweepedM0 = sweep_op_full!(M0')
@test isapprox(correct_result, M0)
M0 = M' * M
SSE, TypeISS = sweep_op_fullT1SS!(M0')
@test isapprox(correct_last_see, SSE)
@test isapprox(correct_result, M0)
@test isapprox(correct_T1SS, TypeISS)
@test isapprox(correct_T2SS, get_TypeIISS(M0))
A = [0.6694830121789994; 0.21314246720926422; 0.6529660250873977; 0.6385918132605166; 0.037358200179362755; 0.2875097674400453; 0.21811874684632737; 0.4232050450926038; 0.9734084003457419; 0.4895213360877587; 0.22107794000504877;;]
B = [0.1092789425428552 0.7682468044279679 0.9375459449272519 0.3016008879920138 0.6350731353296186 0.6391931975313466 0.7931988086444425 0.1547756065020972 0.7551009672990262 0.2043390054901021 0.37466999909368104; 0.7518732467227479 0.17404612464595814 0.761770033509471 0.2770864478415873 0.7543141370258291 0.32310676567293395 0.33347421997349236 0.4534191137643454 0.5128610834497598 0.9341505398911903 0.7536289774998648; 0.8217480529578403 0.34093452577281813 0.962711302240167 0.749580537971113 0.6119080109928845 0.4898631756099011 0.22873586091225762 0.7556388096116227 0.5482875796771497 0.3019759415002481 0.24383237803532276; 0.9205908572601128 0.9838016151616238 0.9380559252830013 0.33900574573219244 0.0583887229946799 0.4679092112776474 0.39026963404013393 0.5418546773143993 0.6935042985856147 0.3282403487260446 0.6961565982332839; 0.1414240797001498 0.3412940519296852 0.8408526865689825 0.462294772065576 0.31393723531549245 0.8748575543600328 0.14699174692940986 0.39306278859334387 0.10912929212661415 0.9141524544576278 0.29686130512125486; 0.37906779731717744 0.8475450403582677 0.2038618175296657 0.4683061428672235 0.9297014862588634 0.5466002831094359 0.6751069695895096 0.9743862251742357 0.9564283751044231 0.6116585273083751 0.7909339981249403; 0.5359109396842612 0.012988843952200346 0.9835824622845665 0.8686665313226049 0.45336455279489085 0.5839037022748381 0.9157470648204125 0.7116357064138576 0.5183459854159834 0.08679668241144856 0.2799014486961946; 0.7554124198898702 0.9323865537553858 0.8678647805277917 0.038064063816100724 0.3429878578591379 0.31534343578500357 0.475130227302884 0.36996903727864516 0.030115361864833545 0.34244322477966027 0.4523144307547967; 0.8336756262881949 0.5216976063941485 0.28203838317254926 0.7373358446522794 0.5620614410329805 0.057089029616333886 0.2616440484472927 0.7340293213619165 0.7384112237773132 0.7537099230023115 0.6503159099088369; 0.5551618374419353 0.03118050811467732 0.23476295981821782 0.917221770950081 0.223897520609306 0.5216802455519959 0.365440442059882 0.7394898203765863 0.09923845623220229 0.4348799692321885 0.10475084732823181; 0.9893186829830513 0.5621839305113805 0.278541900532278 0.10896953976431789 0.31531406921972904 0.5869378617853271 0.4820934500666002 0.9326376745113245 0.8775590942514956 0.7887445296205713 0.6274169456020134]
correct_coefs = [0.8626528683885901; 0.7799120469808962; -0.3648600566802049; 1.0021645622078064; 0.2748318846779194; -0.16712897766519763; 0.09387657451235487; -1.251063803103481; 0.46242701312864226; 0.27040571287723963; -0.8997388730388888;;]
@test isapprox(sweep_linreg(B, A), correct_coefs)
end | LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | code | 11395 | include("../src/utilities.jl")
@testset "Plots request massaging" begin
wanted = "all"
needed = Set([:fit, :residuals, :normal_checks, :cooksd, :leverage, :homoscedasticity])
@test needed == get_needed_plots(wanted)
wanted = []
needed = Set([])
@test needed == get_needed_plots(wanted)
wanted = :none
needed = Set()
@test needed == get_needed_plots(wanted)
wanted = Set()
needed = Set()
@test needed == get_needed_plots(wanted)
wanted = ["fit", "cooksd", "homoscedasticity"]
needed = Set([:fit, :cooksd, :homoscedasticity])
@test needed == get_needed_plots(wanted)
wanted = [:residuals, :normal_checks]
needed = Set([:residuals, :normal_checks])
@test needed == get_needed_plots(wanted)
wanted = :leverage
needed = Set([:leverage])
@test needed == get_needed_plots(wanted)
end
@testset "Covariance estimator stats massaging" begin
wanted = [:white, :white , :bogus , :nw]
needed = ([:white], [:nw])
@test needed == get_needed_robust_cov_stats(wanted)
wanted = [:hc0, :hc1, :nw]
needed = ([:hc0, :hc1], [:nw])
@test needed == get_needed_robust_cov_stats(wanted)
wanted = ["White", "HC0" , "nw" , "nothing"]
needed = ([:white, :hc0], [:nw])
@test needed == get_needed_robust_cov_stats(wanted)
wanted = ["Hc1", "HC0" , "nothing"]
needed = ([:hc0 , :hc1], [])
@test needed == get_needed_robust_cov_stats(wanted)
wanted = "Nw"
needed = ([], [:nw])
@test needed == get_needed_robust_cov_stats(wanted)
wanted = :hc0
needed = ([:hc0], [])
@test needed == get_needed_robust_cov_stats(wanted)
wanted = :hc0
needed = ([:hc0], [])
@test needed == get_needed_robust_cov_stats(wanted)
wanted = []
needed = ([], [])
@test needed == get_needed_robust_cov_stats(wanted)
wanted = :none
needed = ([], [])
@test needed == get_needed_robust_cov_stats(wanted)
wanted = :all
needed = ([:white, :hc0, :hc1, :hc2, :hc3], [:nw])
@test needed == get_needed_robust_cov_stats(wanted)
end
@testset "model stats massaging" begin
wanted = ["default"]
needed = Set([:coefs, :sse, :mse, :sst, :rmse, :sigma, :t_statistic, :r2, :adjr2, :stderror, :t_values, :p_values, :ci, :f_stats])
@test needed == get_needed_model_stats(wanted)
wanted = [:default, :vif]
needed = Set([:coefs, :sse, :vif, :mse, :sst, :rmse, :sigma, :t_statistic, :r2, :adjr2, :stderror, :t_values, :p_values, :ci, :f_stats])
@test needed == get_needed_model_stats(wanted)
wanted = [:default, :diag_ks]
needed = Set([:coefs, :sse, :diag_ks, :mse, :sst, :rmse, :sigma, :t_statistic, :r2, :adjr2, :stderror, :t_values, :p_values, :ci, :f_stats])
@test needed == get_needed_model_stats(wanted)
wanted = [:default, :diag_normality]
needed = Set([:coefs, :sse, :diag_ks, :diag_ad, :diag_jb, :mse, :sst, :rmse, :sigma, :t_statistic, :r2, :adjr2, :stderror, :t_values, :p_values, :ci, :f_stats])
@test needed == get_needed_model_stats(wanted)
wanted = [:default, :diag_heteroskedasticity]
needed = Set([:coefs, :sse, :diag_white, :diag_bp, :mse, :sst, :rmse, :sigma, :t_statistic, :r2, :adjr2, :stderror, :t_values, :p_values, :ci, :f_stats])
@test needed == get_needed_model_stats(wanted)
wanted = ["r2", "rmse"]
needed = Set([:r2, :rmse, :coefs, :sse, :mse, :sst])
@test needed == get_needed_model_stats(wanted)
wanted = ["aic"]
needed = Set([:coefs, :mse, :sse, :aic])
@test needed == get_needed_model_stats(wanted)
wanted = ["Ci"]
needed = Set([:coefs, :mse, :sse, :ci, :sigma, :stderror, :t_statistic])
@test needed == get_needed_model_stats(wanted)
wanted = ["Adjr2"]
needed = Set([:coefs, :mse, :sse, :r2, :adjr2, :sst])
@test needed == get_needed_model_stats(wanted)
wanted = ["stderror"]
needed = Set([:coefs, :mse, :sse, :stderror, :sigma])
@test needed == get_needed_model_stats(wanted)
wanted = ["t_values"]
needed = Set([:coefs, :mse, :sse, :t_values, :stderror, :sigma])
@test needed == get_needed_model_stats(wanted)
wanted = ["p_values"]
needed = Set([:coefs, :mse, :sse, :p_values, :t_values, :stderror, :sigma])
@test needed == get_needed_model_stats(wanted)
wanted = ["ci", "P_values"]
needed = Set([:coefs, :mse, :sse, :p_values, :t_values, :stderror, :sigma, :ci, :t_statistic])
@test needed == get_needed_model_stats(wanted)
wanted = ["none"]
needed = Set([:coefs, :mse, :sse])
@test needed == get_needed_model_stats(wanted)
wanted = ["all"]
needed = Set([:coefs, :sse, :mse, :sst, :rmse, :aic, :sigma, :t_statistic, :vif, :r2, :adjr2, :stderror, :t_values, :p_values, :ci,
:diag_normality, :diag_ks, :diag_ad, :diag_jb, :diag_heteroskedasticity, :diag_white, :diag_bp, :press,
:t1ss, :t2ss, :pcorr1, :pcorr2, :scorr1, :scorr2, :cond, :f_stats])
@test needed == get_needed_model_stats(wanted)
wanted = [ ]
needed = Set([:coefs, :mse, :sse])
@test needed == get_needed_model_stats(wanted)
wanted = ["press" ]
needed = Set([:coefs, :mse, :sse, :press])
@test needed == get_needed_model_stats(wanted)
wanted = ["t1ss" ]
needed = Set([:coefs, :mse, :sse, :t1ss])
@test needed == get_needed_model_stats(wanted)
wanted = ["t2ss", "t1ss" ]
needed = Set([:coefs, :mse, :sse, :t1ss, :t2ss])
@test needed == get_needed_model_stats(wanted)
wanted = ["pcorr1" ]
needed = Set([:coefs, :mse, :sse, :t1ss, :pcorr1])
@test needed == get_needed_model_stats(wanted)
wanted = ["pcorr1", "pcorr2" ]
needed = Set([:coefs, :mse, :sse, :t1ss, :t2ss, :pcorr1, :pcorr2])
@test needed == get_needed_model_stats(wanted)
wanted = [:scorr1 ]
needed = Set([:coefs, :mse, :sse, :sst, :t1ss, :scorr1])
@test needed == get_needed_model_stats(wanted)
wanted = [:scorr1, :scorr2 ]
needed = Set([:coefs, :mse, :sse, :sst, :t1ss, :t2ss, :scorr1, :scorr2])
@test needed == get_needed_model_stats(wanted)
wanted = [ "bogus"]
needed = Set([:coefs, :mse, :sse])
@test needed == get_needed_model_stats(wanted)
wanted = [ "cond"]
needed = Set([:coefs, :mse, :sse, :cond])
@test needed == get_needed_model_stats(wanted)
wanted = ["stderror", "Bogus"]
needed = Set([:coefs, :mse, :sse, :stderror, :sigma])
@test needed == get_needed_model_stats(wanted)
wanted = [ "f_stats"]
needed = Set([:coefs, :mse, :sse, :sst, :f_stats])
@test needed == get_needed_model_stats(wanted)
wanted = Set([:stderror, :Bogus])
needed = Set([:coefs, :mse, :sse, :stderror, :sigma])
@test needed == get_needed_model_stats(wanted)
wanted = Set()
needed = Set([:coefs, :mse, :sse])
@test needed == get_needed_model_stats(wanted)
end
@testset "prediction stats massaging" begin
wanted = ["none"]
target_need = Set([:predicted])
target_present = Set([:predicted])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = ["all"]
target_need = Set([:predicted, :residuals, :leverage, :stdp, :stdi, :stdr, :student, :rstudent, :lcli, :ucli, :lclp, :uclp, :press, :cooksd])
target_present = Set([:predicted, :residuals, :leverage, :stdp, :stdi, :stdr, :student, :rstudent, :lcli, :ucli, :lclp, :uclp, :press, :cooksd])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = ["CooksD"]
target_need = Set([:predicted, :residuals, :leverage, :stdp, :stdr, :student, :cooksd])
target_present = Set([:predicted, :cooksd])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = ["CooksD", "lcli"]
target_need = Set([:predicted, :residuals, :leverage, :stdp, :stdr, :student, :cooksd, :stdi, :lcli])
target_present = Set([:predicted, :cooksd, :lcli])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = ["CooksD", "Bogus", "lcli"]
target_need = Set([:predicted, :residuals, :leverage, :stdp, :stdr, :student, :cooksd, :stdi, :lcli])
target_present = Set([:predicted, :cooksd, :lcli])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = ["PRESS"]
target_need = Set([:predicted, :residuals, :leverage, :press])
target_present = Set([:predicted, :press])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = ["lclp"]
target_need = Set([:predicted, :stdp, :leverage, :lclp])
target_present = Set([:predicted, :lclp])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = ["uclp"]
target_need = Set([:predicted, :stdp, :leverage, :uclp])
target_present = Set([:predicted, :uclp])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = ["ucli"]
target_need = Set([:predicted, :stdi, :leverage, :ucli])
target_present = Set([:predicted, :ucli])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = ["ucli", "LCLP"]
target_need = Set([:predicted, :stdi, :leverage, :ucli, :lclp, :stdp])
target_present = Set([:predicted, :ucli, :lclp])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = ["student"]
target_need = Set([:predicted, :residuals, :leverage, :stdr, :student])
target_present = Set([:predicted, :student])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = [:rstudent]
target_need = Set([:predicted, :residuals, :leverage, :stdr, :student, :rstudent])
target_present = Set([:predicted, :rstudent])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = [:stdp]
target_need = Set([:predicted, :leverage, :stdp])
target_present = Set([:predicted, :stdp])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = [:stdp, :stdi]
target_need = Set([:predicted, :leverage, :stdp, :stdi])
target_present = Set([:predicted, :stdp, :stdi])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = [:stdpp, :stdr]
target_need = Set([:predicted, :leverage, :stdr])
target_present = Set([:predicted, :stdr])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = []
target_need = Set([:predicted])
target_present = Set([:predicted])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
wanted = Set()
target_need = Set([:predicted])
target_present = Set([:predicted])
need, present = get_prediction_stats(wanted)
@test target_need == need
@test target_present == present
end
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | docs | 13813 | [](https://ericqu.github.io/LinearRegressionKit.jl/dev/)
[](https://ericqu.github.io/LinearRegressionKit.jl/stable)
# LinearRegressionKit.jl
LinearRegressionKit.jl implements linear regression using the least-squares algorithm (relying on the sweep operator). This package is in the beta stage. Hence it is likely that some bugs exist. Furthermore, the API might change in future versions. User's or prospective users' feedback is welcome.
# Installation
Enter the Pkg REPL by pressing ] from the Julia REPL. Then install the package with:
``` pkg> add LinearRegressionKit ``` or ```pkg> add https://github.com/ericqu/LinearRegressionKit.jl.git ```.
To uninstall use ``` pkg> rm LinearRegressionKit```
# Usage
The following is a simple usage:
```julia
using LinearRegressionKit, DataFrames, StatsModels
x = [0.68, 0.631, 0.348, 0.413, 0.698, 0.368, 0.571, 0.433, 0.252, 0.387, 0.409, 0.456, 0.375, 0.495, 0.55, 0.576, 0.265, 0.299, 0.612, 0.631]
y = [15.72, 14.86, 6.14, 8.21, 17.07, 9.07, 14.68, 10.37, 5.18, 9.36, 7.61, 10.43, 8.93, 10.33, 14.46, 12.39, 4.06, 4.67, 13.73, 14.75]
df = DataFrame(y=y, x=x)
lr = regress(@formula(y ~ 1 + x), df)
```
which outputs the following information:
```
Model definition: y ~ 1 + x
Used observations: 20
Model statistics:
RΒ²: 0.938467 Adjusted RΒ²: 0.935049
MSE: 1.01417 RMSE: 1.00706
ΟΜΒ²: 1.01417
F Value: 274.526 with degrees of freedom 1 and 18, Pr > F (p-value): 2.41337e-12
Confidence interval: 95%
Coefficients statistics:
Terms β² Stats β Coefs Std err t Pr(>|t|) code low ci high ci
βββββββββββββββΌββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
(Intercept) β -2.44811 0.819131 -2.98867 0.007877 ** -4.16904 -0.727184
x β 27.6201 1.66699 16.5688 2.41337e-12 *** 24.1179 31.1223
Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
```
# Contrasts with Julia Stats GLM package
First, the GLM package provides more than linear regression with Ordinary Least-Squares through the Generalized Linear Model with Maximum Likelihood Estimation.
LinearRegressionKit accepts model without intercept. Like models made with GLM the intercept is implicit, and to enable the no intercept the user must specify it in the formula (for instance ```y ~ 0 + x```).
LinearRegressionKit supports analytical weights; GLM supports frequency weights.
Both LinearRegressionKit and GLM rely on StatsModels.jl for the model's description (@formula); hence it is easy to move between the two packages. Similarly, contrasts and categorical variables are defined in the same way facilitating moving from one to the other when needed.
LinearRegressionKit relies on the Sweep operator to estimate the coefficients, and GLM depends on Cholesky and QR factorizations.
The Akaike information criterion (AIC) is calculated with the formula relevant only for Linear Regression hence enabling comparison between linear regressions (AIC=n log(SSE / n) + 2p; where SSE is the Sum of Squared Errors and p is the number of predictors). On the other hand, the AIC calculated with GLM is more general (based on log-likelihood), enabling comparison between a broader range of models.
LinearRegressionKit package provides access to some robust covariance estimators (for Heteroscedasticity: White, HC0, HC1, HC2 and HC3 and for HAC: Newey-West)
Ridge Regression (potentially with analytical weights) is implemented in the LinearRegressionKit package.
# List of Statistics
## List of Statistics calculated about the linear regression model:
- AIC: Akaike information criterion with the formula AIC=n log(SSE / n) + 2p; where SSE is the Sum of Squared Errors and p is the number of predictors.
- SSE Sum of Squared Errors as the output from the sweep operator.
- SST as the Total Sum of Squares as the sum over all squared differences between the observations and their overall mean.
- RΒ² as 1 - SSE/SST.
- Adjusted RΒ².
- ΟΜΒ² (sigma) Estimate of the error variance.
- Variance Inflation Factor.
- CI the confidence interval based the \alpha default value of 0.05 giving the 95% confidence interval.
- The t-statistic.
- The mean squared error.
- The root of the mean squared error.
- The standard errors and their equivalent with a Heteroscedasticity or HAC covariance estimator
- The t values and their equivalent with a Heteroscedasticity or HAC covariance estimator
- P values for each predictor and their equivalent with a Heteroscedasticity or HAC covariance estimator
- Type 1 & 2 Sum of squares
- Squared partial correlation coefficient, squared semi-partial correlation coefficient.
- PRESS as the sum of square of predicted residuals errors
- F Value (SAS naming) F Statistic (R naming) is presented with its p-value
## List of Statistics about the predicted values:
- The predicted values
- The residuals values (as the actual values minus the predicted ones)
- The Leverage or the i-th diagonal element of the projection matrix.
- STDI is the standard error of the individual predicted value.
- STDP is the standard error of the mean predicted value
- STDR is the standard error of the residual
- Student as the studentized residuals also knows as the Standardized residuals or internally studentized residuals.
- Rstudent is the studentized residual with the current observation deleted.
- LCLI is the lower bound of the confidence interval for the individual prediction.
- UCLI is the upper bound of the confidence interval for the individual prediction.
- LCLP is the lower bound of the confidence interval for the expected (mean) value.
- UCLP is the upper bound of the confidence interval for the expected (mean) value.
- Cook's Distance
- PRESS as predicted residual errors
# Questions and Feedback
Please post your questions, feedabck or issues in the Issues tabs. As much as possible, please provide relevant contextual information.
# Credits and additional information
- Goodnight, J. (1979). "A Tutorial on the SWEEP Operator." The American Statistician.
- Gordon, R. A. (2015). Regression Analysis for the Social Sciences. New York and London: Routledge.
- https://blogs.sas.com/content/iml/2021/07/14/performance-ls-regression.html
- https://github.com/joshday/SweepOperator.jl
- http://hua-zhou.github.io/teaching/biostatm280-2019spring/slides/12-sweep/sweep.html
- https://github.com/mcreel/Econometrics for the Newey-West implementation
- https://blogs.sas.com/content/iml/2013/03/20/compute-ridge-regression.html
- Code from StatsModels https://github.com/JuliaStats/StatsModels.jl/blob/master/test/extension.jl (in December 2021)
# Examples
The following is a short example illustrating some statistics about the predicted data.
First, a simulation of some data with a polynomial function.
```julia
using LinearRegressionKit, DataFrames, StatsModels
using Distributions # for the data generation with Normal() and Uniform()
using VegaLite
# Data simulation
f(x) = @. (x^3 + 2.2345x - 1.2345 + rand(Normal(0, 20)))
xs = [x for x in -2:0.1:8]
ys = f(xs)
vdf = DataFrame(y=ys, x=xs)
```
Then we can make the first model and look at the results:
```julia
lr, ps = regress(@formula(y ~ 1 + x), vdf, "all",
req_stats=["default", "vif", "AIC"],
plot_args=Dict("plot_width" => 200))
lr
```
```
Model definition: y ~ 1 + x
Used observations: 101
Model statistics:
RΒ²: 0.758985 Adjusted RΒ²: 0.75655
MSE: 5660.28 RMSE: 75.2348
ΟΜΒ²: 5660.28 AIC: 874.744
F Value: 311.762 with degrees of freedom 1 and 99, Pr > F (p-value): 2.35916e-32
Confidence interval: 95%
Coefficients statistics:
Terms β² Stats β Coefs Std err t Pr(>|t|) code low ci high ci VIF
βββββββββββββββΌβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
(Intercept) β -26.6547 10.7416 -2.48145 0.0147695 * -47.9683 -5.34109 0.0
x β 45.3378 2.56773 17.6568 2.35916e-32 *** 40.2429 50.4327 1.0
Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
```
This is okay, so let's further review some diagnostic plots.
```julia
[[ps["fit"] ps["residuals"]]
[ps["histogram density"] ps["qq plot"]]]
```

Please note that for the fit plot, the orange line shows the regression line, in dark grey the confidence interval for the mean, and in light grey the interval for the individuals predictions.
Plots are indicating the potential presence of a polynomial component. Hence one might try to add one by doing the following:
```julia
lr, ps = regress(@formula(y ~ 1 + x^3 ), vdf, "all",
req_stats=["default", "vif", "AIC"],
plot_args=Dict("plot_width" => 200 ))
```
Giving:
```
Model definition: y ~ 1 + :(x ^ 3)
Used observations: 101
Model statistics:
RΒ²: 0.984023 Adjusted RΒ²: 0.983861
MSE: 375.233 RMSE: 19.3709
ΟΜΒ²: 375.233 AIC: 600.662
F Value: 6097.23 with degrees of freedom 1 and 99, Pr > F (p-value): 9.55196e-91
Confidence interval: 95%
Coefficients statistics:
Terms β² Stats β Coefs Std err t Pr(>|t|) code low ci high ci VIF
βββββββββββββββΌβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
(Intercept) β -0.0637235 2.38304 -0.0267404 0.978721 -4.7922 4.66475 0.0
x ^ 3 β 1.05722 0.0135394 78.0847 9.55196e-91 *** 1.03036 1.08409 1.0
Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
```

Further, in addition to the diagnostic plots helping confirm if the residuals are normally distributed, a few tests can be requested:
```julia
# Data simulation
f(x) = @. (x^3 + 2.2345x - 1.2345 + rand(Uniform(0, 20)))
xs = [x for x in -2:0.001:8]
ys = f(xs)
vdf = DataFrame(y=ys, x=xs)
lr = regress(@formula(y ~ 1 + x^3 ), vdf,
req_stats=["default", "vif", "AIC", "diag_normality"])
```
Giving:
```
Model definition: y ~ 1 + :(x ^ 3)
Used observations: 10001
Model statistics:
RΒ²: 0.99795 Adjusted RΒ²: 0.99795
MSE: 43.4904 RMSE: 6.59472
ΟΜΒ²: 43.4904 AIC: 37731.2
F Value: 4.868e+06 with degrees of freedom 1 and 9999, Pr > F (p-value): 0
Confidence interval: 95%
Coefficients statistics:
Terms β² Stats β Coefs Std err t Pr(>|t|) code low ci high ci VIF
βββββββββββββββΌβββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
(Intercept) β 11.3419 0.0816199 138.96 0.0 *** 11.1819 11.5019 0.0
x ^ 3 β 1.04021 0.000471459 2206.35 0.0 *** 1.03928 1.04113 1.0
Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
Diagnostic Tests:
Kolmogorov-Smirnov test (Normality of residuals):
KS statistic: 3.05591 observations: 10001 p-value: 0.0
with 95.0% confidence: reject null hyposthesis.
AndersonβDarling test (Normality of residuals):
AΒ² statistic: 25.508958 observations: 10001 p-value: 0.0
with 95.0% confidence: reject null hyposthesis.
Jarque-Bera test (Normality of residuals):
JB statistic: 240.520153 observations: 10001 p-value: 0.0
with 95.0% confidence: reject null hyposthesis.
```
Here is how to request the robust covariance estimators:
```julia
lr = regress(@formula(y ~ 1 + x^3 ), vdf, cov=["white", "nw"])
```
Giving:
```
Model definition: y ~ 1 + :(x ^ 3)
Used observations: 10001
Model statistics:
RΒ²: 0.99795 Adjusted RΒ²: 0.99795
MSE: 43.4904 RMSE: 6.59472
PRESS: 435034
F Value: 4.868e+06 with degrees of freedom 1 and 9999, Pr > F (p-value): 0
Confidence interval: 95%
White's covariance estimator (HC0):
Terms β² Stats β Coefs Std err t Pr(>|t|) code low ci high ci
βββββββββββββββΌββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
(Intercept) β 11.3419 0.0828903 136.83 0.0 *** 11.1794 11.5044
x ^ 3 β 1.04021 0.000471604 2205.67 0.0 *** 1.03928 1.04113
Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
Newey-West's covariance estimator:
Terms β² Stats β Coefs Std err t Pr(>|t|) code low ci high ci
βββββββββββββββΌββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
(Intercept) β 11.3419 0.158717 71.46 0.0 *** 11.0308 11.653
x ^ 3 β 1.04021 0.000863819 1204.19 0.0 *** 1.03851 1.0419
Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
```
Finally if you would like more examples I encourage you to go to the documentation as it gives a few more examples.
## Notable changes since version 0.7.10
- allow more recent dependent packages | LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | docs | 2695 | ## Tutorial Linear Regression Basics
This tutorial details a simple regression analysis based on the "Formaldehyde" dataset.
### First, creating the dataset
This is done relying on the `DataFrames.jl` package.
```@example basic1
using DataFrames
df = DataFrame(Carb=[0.1,0.3,0.5,0.6,0.7,0.9], OptDen=[0.086,0.269,0.446,0.538,0.626,0.782])
```
### Second, the model is defined
We want OptDen as the dependent variable (the response) and Carb as the independent variable (the predictor). Our model will have an intercept; however, the package will implicitly add the intercept to the model. We define the model as `Optden ~ Carb`; the variable's names need to be column names from the DataFrame, which is the second argument to the `regress` function. The `lm` object will then present essential information from the regression.
```@example basic1
using LinearRegressionKit
using StatsModels # this is requested to use the @formula
lm = regress(@formula(OptDen ~ Carb), df)
```
### Third, some illustration about the model is created
Here we will only look at the fit-plot. To obtain it, we only need to add a third argument to the `regress` function. Namely, the name of the plot requested ("fit"). When at least one plot is requested, the `regress` function will return a pair of objects: the information about the regression (as before), and an object (`Dict`) to access the requested plot(s).
```@example basic1
using VegaLite # this is the package use for plotting
lm, ps = regress(@formula(OptDen ~ Carb), df, "fit")
ps["fit"]
```
The response is plotted on the y-axis, and the predictor is plotted on the x-axis. The dark orange line represents the regression equation. The dark grey band represents the confidence interval given the Ξ± (which defaults to 0.05 and gives a 95% confidence interval). The light grey band represents the individual prediction interval. Finally, the blue circles represent the actual observations from the dataset.
#### Fourth, generate the predictions from the model
Here we get the predicted values from the model using the same Dataframe.
```@example basic1
results = predict_in_sample(lm, df)
```
#### Fifth, generate the others statistics about the model
In order to get all the statistics, one can use the "all" keyword as an argument of the `req_stats` argument.
```@example basic1
results = predict_in_sample(lm, df, req_stats="all")
```
#### Sixth, generate prediction for new data
We first create a new DataFrame that needs to use the same column names used in the model. In our case, there is only one column: "Carb".
```@example basic1
ndf = DataFrame(Carb= [0.11, 0.22, 0.55, 0.77])
predictions = predict_out_of_sample(lm, ndf)
```
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | docs | 15294 | # LinearRegressionKit.jl Documentation
LinearRegressionKit.jl implements linear regression using the least-squares algorithm (relying on the sweep operator). This package is in the alpha stage. Hence it is likely that some bugs exist. Furthermore, the API might change in future versions.
The usage aims to be straightforward, a call to ```regress``` to build a linear regression model, and a call to ```predict_in_sample``` to predict data using the built linear regression model.
When predicting on data not present during the regression, use the ```predict_out_of_sample``` function as this does not require a response value (consequently, statistics that need a response, as the residuals, are not available.)
The regress call will compute some statistics about the fitted model in addition to the coefficients. The statistics computed depend on the value of the ```req_stats``` argument.
The prediction functions compute predicted values together with some statistics. Like for the regress calls, the statistics computed depend on the value of the ```req_stats``` argument.
When some analytical positive weights are used, a weighted regression is performed.
### Statistics related to the regression (the fitting)
Fitting the model generates some statistics dependent on the `req_stats` argument of the `regress` function.
- ``n``, ``p``, `"coefs"` and `"see"` are always computed
- `"mse"`, `"sst"`, `"rmse"`, `"aic"`, `"sigma"`, `"t_statistic"`, `"vif"`, `"r2"`, `"adjr2"`, `"stderror"`, `"t_values"`, `"p_values"`, `"ci"`, `"press"`, and `"cond"` are computed upon request.
- some diagnostics can be requested as well. Here is the full list as Symbols `[:diag_normality, :diag_ks, :diag_ad, :diag_jb, :diag_heteroskedasticity, :diag_white, :diag_bp ]`, `"diag_normality"` is a shortcut for `[:diag_ks, :diag_ad, :diag_jb]` and `:diag_heteroskedasticity` is a shortcut for `[:diag_white, :diag_bp]`.
- "default", includes the mandatory stats, and some of the optional statistics here as Symbols: `[:coefs, :sse, :mse, :sst, :rmse, :sigma, :t_statistic, :r2, :adjr2, :stderror, :t_values, :p_values, :ci, :f_stats]`
- `"all"` includes all availble statistics
- `"none"` include only the mandatory statistics
The meaning for these statistics is given below.
#### Number of observations and variables
The number of observations ``n`` used to fit the model.
The number of independent variables ``p`` used in the model.
#### Total Sum of Squares
The Total Sum of Squares (SST) is calculated but not presented to the user. In case of model with intercept the SST is computed with the following:
```math
\mathrm{SST}=\sum_{i=1}^{n}\left(y_{i}-\bar{y}\right)^2
```
And when there is no intercept with the following:
```math
\mathrm{SST}=\sum_{i=1}^{n} y_{i}^2
```
#### Error Sum of Squares
The Error Sum of Squares (or SSE) also known as Residual Sum of Square (RSS). This package uses the sweep operator (Goodnight, J. (1979). "A Tutorial on the SWEEP Operator." The American Statistician.) to compute the SSE.
#### Mean Squared Error
The Mean Squared Error (MSE) is calculated as
```math
\mathrm{MSE} = \displaystyle{\frac{{\mathrm{SSE}}}{{n - p}}}
```
The Root Mean Squared Error (RMSE) is calculated as
```math
\mathrm{RMSE} = \sqrt{\mathrm{MSE}}
```
The MSE is the estimator of ΟΜΒ² unless at least one robust covariance estimator is requested.
#### RΒ² and Adjusted RΒ²
The RΒ² (R2 or R-squared) see (https://en.wikipedia.org/wiki/Coefficient_of_determination) is calculated with the following formula:
```math
\mathrm{R}^2 = 1 - \displaystyle{\frac{{\mathrm{SSE}}}{{\mathrm{SST}}}}
```
The Adjusted RΒ² (ADJR2) is computed with the following formulas:
when it is a model with an intercept:
```math
\mathrm{ADJR}^2 = 1 - \displaystyle \frac{(n-1)(1-\mathrm{R}^2)}{n-p}
```
And when there is no intercept:
```math
\mathrm{ADJR}^2 = 1 - \displaystyle \frac{(n)(1-\mathrm{R}^2)}{n-p}
```
#### Akaike information criterion
The Akaike information criterion is calculated with the Linear Regression specific formula:
```math
\mathrm{AIC} = \displaystyle n \ln \left( \frac{\mathrm{SSE}}{n} \right) + 2p
```
#### t\_statistic and confidence interval
The t\_statistic is computed by using the inverse cumulative t_distribution (with ```quantile()```) with parameter (``n - p``) at ``1 - \frac{Ξ±}{2}``.
The standard errors of the coefficients are calculated by multiplying the Sigma (estimated by the MSE) with the pseudo inverse matrix (resulting from the sweep operator), out of which the square root of the diagonal elements are extracted.
The t-values are calculated as the coefficients divided by their standard deviation.
The upper bound of the confidence interval for each coefficient is calculated as the coeffiecent + coefficient's standard error * t\_statistic.
The lower bound of the confidence interval for each coefficient is calculated as the coeffiecent - coefficient's standard error * t\_statistic.
#### p-values
The p-values are computed using the F Distribution, the degree of freedom for each coefficent.
#### F Value (F Statistic)
The F Value (F Statistic) is computed using the F Distribution, the degree of freedom for the exaplined and unexplained variance.
#### Variance inflation factor
Variance inflation factor (VIF) is calculated by taking the diagonal elements of the inverse of the correlation matrix formed by the independent variables.
#### PRESS predicted residual error sum of squares
The predicted residual error sum of squares is calculated by taking the sum of squares from the `PRESS` (see below the statistics related to predictions) of each observations.
#### Condition number (cond)
The condition number of the design matrix is computed using the function `cond` from `LinearAlgebra`. it give some information indicating how severly ill-condition the problem is. See [Wikipedia page](https://en.wikipedia.org/wiki/Condition_number) and [Julia documentation](https://docs.julialang.org/en/v1/stdlib/LinearAlgebra/#LinearAlgebra.cond) for more information.
### Robust covariance estimators
Robust Covariance estimator can be requested through the `cov` argument of the `regress` function.
The options are (as Symbols):
- `:white`: Heteroscedasticity
- `:hc0`: Heteroscedasticity
- `:hc1`: Heteroscedasticity)
- `:hc2`: Heteroscedasticity)
- `:hc3`: Heteroscedasticity)
- `:nw`: HAC (Heteroskedasticity and Autocorrelation Consistent estimator)
#### Heteroscedasticity estimators
The user can select estimators from above list. If the user select `:white` as an estimator then HC3 will be selected for a small size (n <= 250) otherwise HC0 will be selected. (see "Using Heteroscedasticity Consitent Standard Errors in the Linear Regression Model" J. Scott Long and Laurie H. Ervin (1998-2000)).
If another estimator is requested it is provided. A list of estimator can be requested as in for instance `cov=[:hc2, hc3]`.
Comprehensive descriptions of the estimators and their applications shoudl in found in a text book, here only a brief description of the implementation is provided.
##### HC0
Having InvMat the pseudo inverse resulting from the sweep operator. And having ``xe`` being the matrix of the independent variables times the residuals. Then HC0 is calculated as:
```math
\textup{HC0} = \sqrt{diag(\textup{InvMat } \textup{xe}' \textup{xe} \textup{ InvMat})}
```
##### HC1
Having n being the number of observations and p the number of variables. Then HC1 is calculated as:
```math
\textup{HC1} = \sqrt{diag(\textup{InvMat } \textup{xe}' \textup{xe} \textup{ InvMat } \frac{n}{n-p})}
```
##### HC2
The leverage or hat matrix is calculated as:
```math
\textup{H} = \textup{X} (\textup{X'X})^{-1}\textup{X'}
```
``xe`` is scaled by ``\frac{1}{1 - H}`` then
```math
\textup{HC2} = \sqrt{diag(\textup{InvMat } \textup{xe}' \textup{xe} \textup{ InvMat } )}
```
##### HC3
``xe`` is scaled by ``\frac{1}{{\left( 1 - H \right)^2}}`` then
```math
\textup{HC3} = \sqrt{diag(\textup{InvMat } \textup{xe}' \textup{xe} \textup{ InvMat } )}
```
#### Heteroskedasticity and autocorrelation consistent estimator (HAC)
Newey-West estimator calculation is not documented yet.
See [reference implementation](https://github.com/mcreel/Econometrics/blob/508aee681ca42ff1f361fd48cd64de6565ece221/src/NP/NeweyWest.jl) [current implementation](https://github.com/ericqu/LinearRegressionKit.jl/blob/docu/src/newey_west.jl) for details.
### Statistics related to the prediction
Predicting values using independent variables and a model will generate predicted values and some additional statistics dependent on the value of the `req_stats` argument of the `predict*` functions.
Here is a list of the available statistics:
[:predicted, :residuals, :leverage, :stdp, :stdi, :stdr, :student, :rstudent, :lcli, :ucli, :lclp, :uclp, :press, :cooksd]
#### Predicted
The predicted value is the sum of the dependant variable(s) multiplied by the coefficients from the regression and the intercept (if the model has one). The predicted value is also known as the Y-hat.
#### Residuals
The residuals are here defined as the known responses variables minus the predicted values.
#### Leverage
The leverage for the i-th independent observation x_i when it is not a weighted regression is calculated as:
```math
\mathrm{h_i} = \mathrm{x_i' (X' X)^{-1} x_i}
```
And as per below when it is a weighted regression with a vector of weights ``W`` with the i-th weight being ``w_i`` then the i-th leverage is calculated as such:
```math
\mathrm{h_i} = \mathrm{w_i \cdot x_i' (X' W X)^{-1} x_i}
```
#### STDP
STDP is the standard error of the mean predicted value, and is calculated as
```math
\textup{STDP} = \sqrt{\hat{\sigma}^2 h_i }
```
and for a weighted regression as:
```math
\textup{STDP} = \sqrt{\hat{\sigma}^2 h_i / w_i}
```
#### STDI
STDI is the standard error of the individual predicted value, and is calculated as
```math
\textup{STDI} = \sqrt{\hat{\sigma}^2 (1 + h_i)}
```
and for a weighted regression as:
```math
\textup{STDI} = \sqrt{\hat{\sigma}^2 (1 + h_i) / w_i}
```
#### STDR
STDR is the standard error of the residual, and is calculated as
```math
\textup{STDR} = \sqrt{\hat{\sigma}^2 (1 - h_i) }
```
and for a weighted regression as:
```math
\textup{STDR} = \sqrt{\hat{\sigma}^2 (1 - h_i) / w_i}
```
#### Student
Student represents the standardized residuals, and is calculated by using the residuals over the standard error of the residuals.
#### RStudent
RStudent is the studentized residuals calculated as
```math
\textup{RSTUDENT} = \sqrt{ \frac{n - p - 1}{n - p - \textup{student}^2}}
```
#### LCLI
LCLI is the lower bound of the prediction interval and is calculated as:
```math
\textup{LCLI} = \mathrm{predicted} - ( \mathrm{t\_statistic} \cdot \mathrm{STDI} )
```
#### UCLI
UCLI is the upper bound of the prediction interval and is calculated as:
```math
\textup{UCLI} = \mathrm{predicted} + ( \mathrm{t\_statistic} \cdot \mathrm{STDI} )
```
#### LCLP
LCLP is the lower bound of the predicted mean confidence interval and is calculated as:
```math
\textup{LCLP} = \mathrm{predicted} - ( \mathrm{t\_statistic} \cdot \mathrm{STDP} )
```
#### UCLP
UCLP is the upper bound of the predicted mean confidence interval and is calculated as:
```math
\textup{UCLI} = \mathrm{predicted} + ( \mathrm{t\_statistic} \cdot \mathrm{STDP} )
```
#### COOKSD
COOKSD is the Cook's Distance for each predicted value, and is calculated as
```math
\textup{COOKSD} = \frac{1}{p} \frac{\textup{STDP}^2}{\textup{STDR}^2 \cdot \textup{student}^2}
```
#### PRESS
PRESS is the predicted residual error sum of squares and is calculated as
```math
\textup{PRESS} = \frac{\textup{residuals}}{1 - \textup{leverage}}
```
#### Type 1 SS
Type 1 Sum of squares, are calculated as a by-product of the sweep operator.
#### Type 2 SS
Type 2 Sum of squares, are calculated using the pseudo-inverse matrix. The Type 2 SS of the ith independent variable is the square of the coefficient of the independent variable divided by the ith element of the diagonal from the pseudo-inverse matrix.
#### Pcorr 1 and 2
`pcorr1` and `pcorr2` are the squared partial correlation coefficient calculated as:
```math
\textup{pcorr1} = \frac{\textup{Type 1 SS}}{\textup{Type 1 SS}+ \textup{SSE}}
```
```math
\textup{pcorr2} = \frac{\textup{Type 2 SS}}{\textup{Type 2 SS}+ \textup{SSE}}
```
When there is an intercept in the model the `pcorr1` and `pcorr2` are considered `missing` for the intercept.
#### Scorr 1 and 2
`scorr1` and `scorr2` are the squared semi-partial correlation coefficient calculated as:
```math
\textup{scorr1} = \frac{\textup{Type 1 SS}}{\textup{SST}}
```
```math
\textup{scorr2} = \frac{\textup{Type 2 SS}}{\textup{SST}}
```
When there is an intercept in the model the `scorr1` and `scorr2` are considered `missing` for the intercept.
### Ridge Regression and Weighthed Ridge Regression
Ridge regression and weighthed ridge regression are possible using the `ridge` functions; please note that only the following statistics are available: `MSE`, `RMSE`, `R2`, `ADJR2`, and `VIF`.
The ridge constant `k` can be specified as scalar or as a range (`AbstractRange`).
The coefficients calculation is inspired by the details given in [SAS blog post](https://blogs.sas.com/content/iml/2013/03/20/compute-ridge-regression.html).
>Let ``X`` be the matrix of the independent variables after centering [and scaling]the data, and let ``Y`` be a vector corresponding to the [centered]dependent variable. Let ``D`` be a diagonal matrix with diagonal elements as in ``X`X``. The ridge regression estimate corresponding to the ridge constant k can be computed as ``D^{-1/2} * (Z^{T}Z + k*I)^{-1} * Z^{T}Y ``.
### General remarks
For all options and parameters they can be passed as a `Vector{String}` or a `Vector{Symbol}` or alternatively if only options is needed as a single `String` or `Symbol`. For instance `"all"`, `:all` or `["R2", "VIF"]` or `[:r2, :vif]`.
## Functions
```@docs
regress(f::StatsModels.FormulaTerm, df::AbstractDataFrame, req_plots; Ξ±::Float64=0.05, req_stats=["default"], weights::Union{Nothing,String}=nothing, remove_missing=false, cov=[:none], contrasts=nothing, plot_args=Dict("plot_width" => 400, "loess_bw" => 0.6, "residuals_with_density" => false))
regress(f::StatsModels.FormulaTerm, df::DataFrames.AbstractDataFrame; Ξ±::Float64=0.05, req_stats=["default"], weights::Union{Nothing,String}=nothing, remove_missing=false, cov=[:none], contrasts=nothing)
predict_out_of_sample(lr::linRegRes, df::AbstractDataFrame; Ξ±=0.05, req_stats=["none"], dropmissingvalues=true)
predict_in_sample(lr::linRegRes, df::AbstractDataFrame; Ξ±=0.05, req_stats=["none"], dropmissingvalues=true)
kfold(f, df, k, r = 1, shuffle=true; kwargs...)
ridge(f::StatsModels.FormulaTerm, df::DataFrames.AbstractDataFrame, k::Float64; weights::Union{Nothing,String}=nothing, remove_missing=false, contrasts=nothing)
ridge(f::StatsModels.FormulaTerm, df::DataFrames.AbstractDataFrame, ks::AbstractRange ; weights::Union{Nothing,String}=nothing, remove_missing=false, contrasts=nothing, traceplots = false)
predict_in_sample(rr::ridgeRegRes, df::AbstractDataFrame; dropmissingvalues=true)
predict_out_of_sample(rr::ridgeRegRes, df::AbstractDataFrame; dropmissingvalues=true)
```
## Index
```@index
```
## Content
```@contents
```
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | docs | 3393 | ## Tutorial multiple linear regression with categorical variables
This tutorial details a multiple regression analysis based on the "carseat" dataset (Information about car seat sales in 400 stores). This tutorial follows roughly the same steps done the datasets in the "An Introduction to Statistical Learning" book (https://www.statlearning.com/), from pages 119, 120, and 124.
### First, creating the dataset.
We create the dataset with the help of the `DataFrames.jl` and `Download` packages.
```@example multir
using Downloads, DataFrames, CSV
df = DataFrame(CSV.File(Downloads.download("https://raw.githubusercontent.com/Kulbear/ISLR-Python/master/data/Carseats.csv")))
describe(df)
```
### Second, basic analysis.
We make a model with all variables and a couple of interactions.
```@example multir
using LinearRegressionKit, StatsModels
lm = regress(@formula(Sales ~ CompPrice + Income + Advertising + Population + Price +
ShelveLoc + Age + Education + Urban + US + Income & Advertising + Price & Age), df)
```
To have better explainability, we choose to set the base for the Shelve Location (ShelveLoc) as "Medium" so that the results highlight what happens when it is "Bad" or "Good". Furthermore, to form an idea about how collinear the predictors are, we request the Variance inflation factor (VIF).
```@example multir
lm = regress(@formula(Sales ~ CompPrice + Income + Advertising + Population + Price +
ShelveLoc + Age + Education + Urban + US + Income & Advertising + Price & Age), df,
req_stats=["default", "vif"],
contrasts= Dict(:ShelveLoc => DummyCoding(base="Medium"), :Urban => DummyCoding(base="No"), :US => DummyCoding(base="No") ))
```
Now let's assume we want our response to be Sales and the predictors to be Price, Urban, and US:
```@example multir
lm = regress(@formula(Sales ~ Price + Urban + US), df,
contrasts= Dict(:ShelveLoc => DummyCoding(base="Medium"), :Urban => DummyCoding(base="No"), :US => DummyCoding(base="No") ))
```
Indeed, we note that "Urban:Yes" appears to have a low significance. Hence we could decide to make our model without this predictor:
```@example multir
lm = regress(@formula(Sales ~ Price + US), df,
contrasts= Dict(:ShelveLoc => DummyCoding(base="Medium"), :Urban => DummyCoding(base="No"), :US => DummyCoding(base="No") ))
```
To identify potential outliers and high leverage variables, we choose to plot the Cook's Distance and the leverage plot.
```@example multir
using VegaLite
lm, ps = regress(@formula(Sales ~ Price + US), df, "all",
req_stats=["default", "vif"],
contrasts= Dict(:ShelveLoc => DummyCoding(base="Medium"), :Urban => DummyCoding(base="No"), :US => DummyCoding(base="No") ))
p = [ps["leverage"]
ps["cooksd"]]
```
Alternatively, we can also use the predicted values and their statistics to create a new data frame with the entries of interest (here, we show only the first three entries).
```@example multir
results = predict_in_sample(lm, df, req_stats="all")
threshold_cooksd = 4 / lm.observations
potential_outliers = results[ results.cooksd .> threshold_cooksd , :]
potential_outliers[1:3, 1:3]
```
```@example multir
threshold_leverage = 2 * lm.p / lm.observations
potential_highleverage = results[ abs.(results.leverage) .> threshold_leverage , : ]
potential_highleverage[1:3, 1:3]
```
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | docs | 2934 | ## Tutorial ridge regression
This tutorial gives a brief introduction to ridge regression. The tutorial makes use of the acetylene dataset from "Marquardt, D. W., and Snee, R. D. (1975). βRidge Regression in Practice.β American Statistician 29:3β20.", and the follow the same outline as the [SAS documentation](https://documentation.sas.com/doc/en/statug/15.2/statug_reg_examples05.htm)
### First, creating the dataset.
We create the dataset with the help of the `DataFrames.jl` package.
```@example ridgeregression
using DataFrames
x1 = [1300, 1300, 1300, 1300, 1300, 1300, 1200, 1200, 1200, 1200, 1200, 1200, 1100, 1100, 1100, 1100]
x2 = [7.5, 9.0, 11.0, 13.5, 17.0, 23.0, 5.3, 7.5, 11.0, 13.5, 17.0, 23.0, 5.3, 7.5, 11.0, 17.0]
x3 = [0.012, 0.012, 0.0115, 0.013, 0.0135, 0.012, 0.04, 0.038, 0.032, 0.026, 0.034, 0.041, 0.084, 0.098, 0.092, 0.086]
y = [49.0, 50.2, 50.5, 48.5, 47.5, 44.5, 28.0, 31.5, 34.5, 35.0, 38.0, 38.5, 15.0, 17.0, 20.5, 29.5]
df = DataFrame(x1= x1, x2= x2, x3= x3, y= y)
```
### Second, make a least square regression
We make a ordinary least squares (OLS) regression for comparison.
```@example ridgeregression
using LinearRegressionKit, StatsModels
using VegaLite
f = @formula(y ~ x1 + x2 + x3 + x1 & x2 + x1^2)
lm, ps = regress(f, df, "all", req_stats=["default", "vif"])
lm
```
We observe that the VIF for the coefficients are highs, and hence indicate likely multicollinearity.
### Ridge regression
Ridge regression requires a parameter (k), while there are methods to numerically identify k. It is also possible to trace the coefficients and VIFs values to let the analyst choose a k. Here we are going to trace for the k between 0.0 and 0.1 by increment of 0.0005. We display only the results for the first 5 k.
```@example ridgeregression
rdf, ps = ridge(f, df, 0.0:0.0005:0.1, traceplots=true)
rdf[1:5 , :]
```
Here is the default trace plot for the coefficients:
```@example ridgeregression
ps["coefs traceplot"]
```
!!! note
It is an issue that the `&` in the variable names are replaced by ` ampersand ` because otherwise it would cause issue either on the web display or on the SVG display. This only affect the plot generate with the library. One can directly use the DataFrame to generate its own plots.
And here is the default trace plot for the VIFs:
```@example ridgeregression
ps["vifs traceplot"]
```
As it is difficult to see the VIFs traces, it is also possible to request the version of hte plot with the y-axis log scaled.
```@example ridgeregression
ps["vifs traceplot log"]
```
Once a `k` has been selected (in this case 0.004) a regular ridge regression with this value can executed.
```@example ridgeregression
rlm = ridge(f, df, 0.004)
```
From there the regular `predict_*` functions can be used, although only the `predicted` and potentially the `residuals` statistics will be calculated.
```@example ridgeregression
res = predict_in_sample(rlm, df)
```
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 0.7.11 | 779527a9e0d0979ff6804f39721ed7afa52930c6 | docs | 2037 | ## Tutorial weighted regression
This tutorial gives a brief introduction to simple weighted regression using analytical weights. The tutorial makes use of the short dataset available on this [sas blog post](https://blogs.sas.com/content/iml/2016/10/05/weighted-regression.html).
### First, creating the dataset.
We create the dataset with the help of the `DataFrames.jl` package.
```@example weightedregression
using DataFrames
tw = [
2.3 7.4 0.058
3.0 7.6 0.073
2.9 8.2 0.114
4.8 9.0 0.144
1.3 10.4 0.151
3.6 11.7 0.119
2.3 11.7 0.119
4.6 11.8 0.114
3.0 12.4 0.073
5.4 12.9 0.035
6.4 14.0 0
]
df = DataFrame(tw, [:y,:x,:w])
```
### Second, make a basic analysis
We make a simple linear regression.
```@example weightedregression
using LinearRegressionKit, StatsModels
using VegaLite
f = @formula(y ~ x)
lms, pss = regress(f, df, "fit")
lms
```
And then the weighted regression version:
```@example weightedregression
lmw, psw = regress(f, df, "fit", weights="w")
lmw
```
The output of the model indicates that this is a weighted regression.
We also note that the number of observations is 10 instead of 11 for the simple regression. This is because the last observation weights 0, and as the package only uses positive weights, it is not used to fit the regression model.
For comparison, we fit the simple regression with only the first 10 observations.
```@example weightedregression
df = first(df, 10)
lms, pss = regress(f, df, "fit")
lms
```
We can now realise that the coefficients are indeed differents with the weighted regression.
We can then contrast the fit plot from both regressions.
```@example weightedregression
[pss["fit"] psw["fit"]]
```
We note that the regression line is indeed "flatter" in the weighted regression case.
We also note that the prediction interval is presented differently (using error bars), and it shows a different shape, reflecting the weights' importance.
| LinearRegressionKit | https://github.com/ericqu/LinearRegressionKit.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 49 | using Nevanlinna; Nevanlinna.comonicon_install()
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 652 | using Nevanlinna
using Documenter
DocMeta.setdocmeta!(Nevanlinna, :DocTestSetup, :(using Nevanlinna); recursive=true)
makedocs(;
modules=[Nevanlinna],
authors="Hiroshi Shinaoka <[email protected]> and contributors",
repo="https://github.com/shinaoka/Nevanlinna.jl/blob/{commit}{path}#{line}",
sitename="Nevanlinna.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://shinaoka.github.io/Nevanlinna.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/shinaoka/Nevanlinna.jl",
devbranch="main",
)
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 4938 | module Nevanlinna
using LinearAlgebra
using GenericLinearAlgebra
using Optim
using Zygote
using Comonicon
using TOML
# Some hack
using MultiFloats
function Base.convert(t::Type{Float64x2}, ::Irrational{:Ο})
return Float64x2(BigFloat(Ο, precision=128))
end
function Float64x2(::Irrational{:Ο})
return Float64x2(BigFloat(Ο, precision=128))
end
#==
# log2 is not implemented in MultiFloats.
# This will use the BigFloat implementation of
# log2, which will not be as fast as a pure-MultiFloat implementation.
MultiFloats.use_bigfloat_transcendentals()
==#
include("export.jl")
include("util.jl")
include("data.jl")
include("solver.jl")
include("ham_solver.jl")
include("hardy.jl")
#include("nevanlinna_impl.jl")
include("core.jl")
include("schur.jl")
include("optimize.jl")
@cast function bare(input_data::String, input_param::String, output_data::String)
f= open(input_data, "r")
data = split.(readlines(f),'\t')
close(f)
wn = im.*parse.(BigFloat, collect(Iterators.flatten(data))[1:3:end])
gw = parse.(BigFloat,collect(Iterators.flatten(data))[2:3:end]) .+ im.*parse.(BigFloat,collect(Iterators.flatten(data))[3:3:end])
param = TOML.parsefile(input_param)
N_real::Int64 = param["basic"]["N_real"]
w_max::Float64 = param["basic"]["w_max"]
eta::Float64 = param["basic"]["eta"]
sum_rule::Float64 = param["basic"]["sum_rule"]
H_max::Int64 = param["basic"]["H_max"]
iter_tol::Int64 = param["basic"]["iter_tol"]
lambda::Float64 = param["basic"]["lambda"]
verbose::Bool = false
pick_check::Bool = true
optimization::Bool = true
ini_iter_tol::Int64 = 500
mesh::Symbol = :linear
if haskey(param, "option")
if haskey(param["option"], "verbose")
verbose = param["option"]["verbose"]
end
if haskey(param["option"], "pick_check")
pick_check = param["option"]["pick_check"]
end
if haskey(param["option"], "optimization")
optimization = param["option"]["optimization"]
end
if haskey(param["option"], "ini_iter_tol")
ini_iter_tol = param["option"]["ini_iter_tol"]
end
if haskey(param["option"], "mesh")
mesh = param["option"]["mesh"]
end
end
sol = NevanlinnaSolver(wn, gw, N_real, w_max, eta, sum_rule, H_max, iter_tol, lambda, verbose=verbose, pick_check = pick_check, optimization=optimization, ini_iter_tol=ini_iter_tol, mesh=mesh)
if optimization
solve!(sol)
end
open(output_data, "w") do f
for i in 1:N_real
println(f, Float64(real(sol.reals.freq[i])), "\t", Float64(imag(sol.reals.val[i]))/pi)
end
end
end
@cast function hamburger(input_data::String, input_moment::String, input_param::String, output_data::String)
f= open(input_data, "r")
data = split.(readlines(f),'\t')
close(f)
wn = im.*parse.(BigFloat, collect(Iterators.flatten(data))[1:3:end])
gw = parse.(BigFloat,collect(Iterators.flatten(data))[2:3:end]) .+ im.*parse.(BigFloat,collect(Iterators.flatten(data))[3:3:end])
f = open(input_moment, "r")
moments = Complex{BigFloat}.(parse.(Float64, readlines(f)))
close(f)
param = TOML.parsefile(input_param)
N_real::Int64 = param["basic"]["N_real"]
w_max::Float64 = param["basic"]["w_max"]
eta::Float64 = param["basic"]["eta"]
sum_rule::Float64 = param["basic"]["sum_rule"]
H_max::Int64 = param["basic"]["H_max"]
iter_tol::Int64 = param["basic"]["iter_tol"]
lambda::Float64 = param["basic"]["lambda"]
verbose::Bool = false
pick_check::Bool = true
optimization::Bool = true
ini_iter_tol::Int64 = 500
mesh::Symbol = :linear
if haskey(param, "option")
if haskey(param["option"], "verbose")
verbose = param["option"]["verbose"]
end
if haskey(param["option"], "pick_check")
pick_check = param["option"]["pick_check"]
end
if haskey(param["option"], "optimization")
optimization = param["option"]["optimization"]
end
if haskey(param["option"], "ini_iter_tol")
ini_iter_tol = param["option"]["ini_iter_tol"]
end
if haskey(param["option"], "mesh")
mesh = param["option"]["mesh"]
end
end
sol = HamburgerNevanlinnaSolver(moments, wn, gw, N_real, w_max, eta, sum_rule, H_max, iter_tol, lambda, verbose=verbose, pick_check = pick_check, optimization = optimization, ini_iter_tol=ini_iter_tol, mesh = mesh)
if optimization
solve!(sol)
end
open(output_data, "w") do f
for i in 1:N_real
println(f, Float64(real(sol.nev_st.reals.freq[i])), "\t", Float64(imag(sol.val[i]))/pi)
end
end
end
@main
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 3615 | function calc_phis(imags::ImagDomainData{T})::Vector{Complex{T}} where {T<:Real}
phis = Array{Complex{T}}(undef, imags.N_imag)
abcds = Array{Complex{T}}(undef, 2, 2, imags.N_imag)
phis[1] = imags.val[1]
for i in 1:imags.N_imag
view(abcds,:,:,i) .= Matrix{Complex{T}}(I, 2, 2)
end
for j in 1:imags.N_imag-1
for k in j+1:imags.N_imag
prod = Array{Complex{T}}(undef, 2, 2)
prod[1,1] = (imags.freq[k] - imags.freq[j]) / (imags.freq[k] - conj(imags.freq[j]))
prod[1,2] = phis[j]
prod[2,1] = conj(phis[j]) * (imags.freq[k] - imags.freq[j]) / (imags.freq[k] - conj(imags.freq[j]))
prod[2,2] = one(T)
view(abcds,:,:,k) .= view(abcds,:,:,k)*prod
end
phis[j+1] = (-abcds[2,2,j+1]*imags.val[j+1] + abcds[1,2,j+1]) / (abcds[2,1,j+1]*imags.val[j+1] - abcds[1,1,j+1])
end
return phis
end
function calc_abcd(imags::ImagDomainData{T},
reals::RealDomainData{T},
phis::Vector{Complex{T}}
)::Array{Complex{T},3} where {T<:Real}
abcd = Array{Complex{T}}(undef, 2, 2, reals.N_real)
for i in 1:reals.N_real
result = Matrix{Complex{T}}(I, 2, 2)
z::Complex{T} = reals.freq[i]
for j in 1:imags.N_imag
prod = Array{Complex{T}}(undef, 2, 2)
prod[1,1] = (z - imags.freq[j]) / (z - conj(imags.freq[j]))
prod[1,2] = phis[j]
prod[2,1] = conj(phis[j])*(z - imags.freq[j]) / (z - conj(imags.freq[j]))
prod[2,2] = one(T)
result *= prod
end
abcd[:,:,i] .= result
end
return abcd
end
function check_causality(hardy_matrix::Array{Complex{T},2},
ab_coeff::Vector{Complex{S}};
verbose::Bool=false
)::Bool where {S<:Real, T<:Real}
param = hardy_matrix*ab_coeff
max_theta = findmax(abs.(param))[1]
if max_theta <= 1.0
if verbose
println("max_theta=",max_theta)
println("hardy optimization was success.")
end
causality = true
else
if verbose
println("max_theta=",max_theta)
println("hardy optimization was failure.")
end
causality = false
end
return causality
end
function evaluation!(sol::NevanlinnaSolver{T};
verbose::Bool=false
)::Bool where {T<:Real}
causality = check_causality(sol.hardy_matrix, sol.ab_coeff, verbose=verbose)
if causality
param = sol.hardy_matrix*sol.ab_coeff
theta = (sol.abcd[1,1,:].* param .+ sol.abcd[1,2,:]) ./ (sol.abcd[2,1,:].*param .+ sol.abcd[2,2,:])
sol.reals.val .= im * (one(T) .+ theta) ./ (one(T) .- theta)
end
return causality
end
function hamburger_evaluation!(
sol ::HamburgerNevanlinnaSolver{T};
verbose::Bool=false
)::Bool where {T<:Real}
causality = check_causality(sol.nev_st.hardy_matrix, sol.nev_st.ab_coeff, verbose=verbose)
if causality
param = sol.nev_st.hardy_matrix*sol.nev_st.ab_coeff
theta = (sol.nev_st.abcd[1,1,:].* param .+ sol.nev_st.abcd[1,2,:]) ./ (sol.nev_st.abcd[2,1,:].*param .+ sol.nev_st.abcd[2,2,:])
sol.nev_st.reals.val .= im * (one(T) .+ theta) ./ (one(T) .- theta)
P, Q, G, D = calc_PQGD(sol.mat_real_omega, sol.p, sol.q, sol.gamma, sol.delta)
sol.val .= (- G .- sol.nev_st.reals.val .* D) ./ (P .+ sol.nev_st.reals.val .* Q)
end
return causality
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 3375 | struct ImagDomainData{T<:Real}
N_imag::Int64 #The number of points used in Nevanlinna algorithm
freq ::Array{Complex{T},1} #The values of Matsubara frequencies
val ::Array{Complex{T},1} #The values of negative of Green function
end
function ImagDomainData(wn ::Array{Complex{T},1},
gw ::Array{Complex{T},1},
N_imag ::Int64;
verbose::Bool = false
)::ImagDomainData{T} where {T<:Real}
val = Array{Complex{T}}(undef, N_imag)
freq = Array{Complex{T}}(undef, N_imag)
for i in 1:N_imag
freq[i] = wn[i]
val[i] = (-gw[i] - im) / (-gw[i] + im)
end
Pick = Array{Complex{T}}(undef, N_imag, N_imag)
for j in 1:N_imag
for i in 1:N_imag
freq_i = (freq[i] - im) / (freq[i] + im)
freq_j = (freq[j] - im) / (freq[j] + im)
nom = one(T) - val[i] * conj(val[j])
den = one(T) - freq_i * conj(freq_j)
Pick[i,j] = nom / den
end
Pick[j,j] += T(1e-250)
end
success = issuccess(cholesky(Pick,check = false))
if verbose
if success
println("Pick matrix is positive semi-definite.")
else
println("Pick matrix is non positive semi-definite matrix in Schur method.")
end
end
freq = reverse(freq)
val = reverse(val)
return ImagDomainData(N_imag, freq, val)
end
struct RealDomainData{T<:Real}
N_real ::Int64 #The number of mesh in real axis
w_max ::Float64 #The energy cutoff of real axis
eta ::Float64 #The paramer. The retarded Green function is evaluated at omega+i*eta
sum_rule::Float64 #The value of sum of spectral function
freq ::Array{Complex{T},1} #The values of frequencies of retarded Green function
val ::Array{Complex{T},1} #The values of negative of retarded Green function
end
function RealDomainData(N_real ::Int64,
w_max ::Float64,
eta ::Float64,
sum_rule::Float64
;
T::Type=BigFloat,
small_omega::Float64 = 1e-5,
mesh::Symbol=:linear
)::RealDomainData{T}
if mesh === :linear
val = Array{Complex{T}}(collect(LinRange(-w_max, w_max, N_real)))
freq = val .+ eta * im
return RealDomainData(N_real, w_max, eta, sum_rule, freq, val)
elseif mesh === :log
half_N = N_real Γ· 2
mesh = exp.(LinRange(log.(small_omega), log.(w_max), half_N))
val = Array{Complex{T}}([reverse(-mesh); mesh])
freq = val .+ eta * im
return RealDomainData(N_real, w_max, eta, sum_rule, freq, val)
elseif mesh === :test
val = Array{Complex{T}}(undef, N_real)
freq = Array{Complex{T}}(undef, N_real)
inter::T = big(2.0*w_max) / (N_real-1)
temp ::T = big(-w_max)
freq[1] = -big(w_max) + big(eta)*im
for i in 2:N_real
temp += inter
freq[i] = temp + big(eta)*im
end
return RealDomainData(N_real, w_max, eta, sum_rule, freq, val)
else
throw(ArgumentError("Invalid mesh"))
end
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 71 | export NevanlinnaSolver
export HamburgerNevanlinnaSolver
export solve!
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 10191 | mutable struct HamburgerNevanlinnaSolver{T<:Real}
moments ::Vector{Complex{T}}
N_moments_ ::Int64
N ::Int64
n1 ::Int64
n2 ::Int64
isPSD ::Bool
isSingular ::Bool
isProper ::Bool
isDegenerate ::Bool
p ::Vector{Complex{T}}
q ::Vector{Complex{T}}
gamma ::Vector{Complex{T}}
delta ::Vector{Complex{T}}
hankel ::Array{Complex{T},2}
mat_real_omega::Array{Complex{T},2}
val ::Vector{Complex{T}}
nev_st ::NevanlinnaSolver{T}
verbose ::Bool
end
function HamburgerNevanlinnaSolver(
moments ::Vector{Complex{T}},
wn ::Vector{Complex{T}},
gw ::Vector{Complex{T}},
N_real ::Int64,
w_max ::Float64,
eta ::Float64,
sum_rule ::Float64,
H_max ::Int64,
iter_tol ::Int64,
lambda ::Float64
;
verbose ::Bool=false,
pick_check ::Bool=true,
optimization::Bool=true,
ini_iter_tol::Int64=500,
mesh ::Symbol=:linear
)::HamburgerNevanlinnaSolver{T} where {T<:Real}
N_moments_ = length(moments)
if N_moments_ % 2 == 0
error("invalid moment number. Moment number should be odd.")
end
N = div((N_moments_ + 1) , 2)
#generate hankel matrix
hankel = Array{Complex{T}}(undef, N, N)
for i in 1:N, j in 1:N
hankel[i,j] = moments[i+j-1]
end
n1, n2, isDegenerate, isPSD, isSingular, isProper = existence_condition(hankel, verbose)
p, q, gamma, delta = coefficient_lists(moments, hankel, n1, n2, isDegenerate, isPSD, isSingular, isProper)
if N_real%2 == 1
error("N_real must be even number!")
end
@assert length(wn) == length(gw)
N_imag = length(wn)
embed_nev_val = Vector{Complex{T}}(undef, N_imag)
for i in 1:N_imag
z::Complex{T} = wn[i]
P, Q, G, D = calc_PQGD(z, p, q, gamma, delta)
nev_val = -gw[i]
embed_nev_val[i] = (- nev_val * P - G) / (nev_val * Q + D)
end
nev_sol = NevanlinnaSolver(wn, -embed_nev_val, N_real, w_max, eta, sum_rule, H_max, iter_tol, lambda, ini_iter_tol=ini_iter_tol, verbose=verbose, ham_option=true)
mat_real_omega = Array{Complex{T}}(undef, N_real, n2+1)
for i in 1:N_real, j in 1:(n2 + 1)
mat_real_omega[i,j] = nev_sol.reals.freq[i]^(j-1)
end
val = zeros(Complex{T}, N_real)
ham_nev_sol = HamburgerNevanlinnaSolver(moments, N_moments_, N, n1, n2, isPSD, isSingular, isProper, isDegenerate, p, q, gamma, delta, hankel, mat_real_omega, val, nev_sol, verbose)
if optimization
calc_H_min(ham_nev_sol)
else
hamburger_evaluation!(ham_nev_sol)
end
return ham_nev_sol
end
function calc_H_min(sol::HamburgerNevanlinnaSolver{T})::Nothing where {T<:Real}
H_bound::Int64 = 50
for iH in 1:H_bound
if sol.verbose
println("H=$(iH)")
end
zero_ab_coeff = zeros(ComplexF64, 2*iH)
causality, optim = hardy_optim!(sol, iH, zero_ab_coeff, iter_tol=500)
#break if we find optimal H in which causality is preserved and optimize is successful
if causality && optim
sol.nev_st.H_min = sol.nev_st.H
break
end
if isdefined(Main, :IJulia)
Main.IJulia.stdio_bytes[] = 0
end
if iH == H_bound
error("H_min does not exist")
end
end
end
function solve!(sol::HamburgerNevanlinnaSolver{T})::Nothing where {T<:Real}
ab_coeff = copy(sol.nev_st.ab_coeff)
for iH in sol.nev_st.H_min:sol.nev_st.H_max
if sol.verbose
println("H=$(iH)")
end
causality, optim = hardy_optim!(sol, iH, ab_coeff)
#break if we face instability of optimization
if !(causality && optim)
break
end
ab_coeff = copy(sol.nev_st.ab_coeff)
push!(ab_coeff, 0.0+0.0*im)
push!(ab_coeff, 0.0+0.0*im)
if isdefined(Main, :IJulia)
Main.IJulia.stdio_bytes[] = 0
end
end
end
function existence_condition(
hankel::Matrix{Complex{T}},
verbose::Bool
)::Tuple{Int64, Int64, Bool, Bool, Bool, Bool} where {T<:Real}
N = size(hankel,1)
#compute rank
n1::Int64 = rank(hankel)
n2::Int64 = 2*N - n1
if verbose
println("Rank of Hankel matrix:$(n1)")
end
if n1 == 0
error("Meeting degenerate 0 matrix.")
end
#check degeneracy
if hankel[1,:] == zeros(Complex{T},N) && hankel[:,1] == zeros(Complex{T},N)
error("Degenerate")
isDegenerate = true
else
if verbose
println("Non-degenerate")
end
isDegenerate = false
end
#check positive semi-definiteness
PSD_test = hankel .+ T(1e-250).*Matrix{Complex{T}}(I, N, N)
isPSD = issuccess(cholesky(PSD_test,check = false))
if isPSD
if verbose
println("Postive semi-definite")
end
else
error("Meeting non positive semi-definite matrix in moment calculation.")
end
#check singularity
if n1 < N
isSingular = true
if verbose
println("Singular")
end
else
isSingular = false
if verbose
println("Non-singular")
end
if isPSD
if verbose
println("Positive definite")
end
end
end
#check properness
tl_hankel = hankel[1:n1, 1:n1]
if rank(tl_hankel) < n1
isProper = false
error("Non-proper")
else
isProper = true
if verbose
println("Proper")
end
end
return n1, n2, isDegenerate, isPSD, isSingular, isProper
end
function coefficient_lists(
moments ::Vector{Complex{T}},
hankel ::Matrix{Complex{T}},
n1 ::Int64,
n2 ::Int64,
isDegenerate::Bool,
isPSD ::Bool,
isSingular ::Bool,
isProper ::Bool
)::Tuple{Vector{Complex{T}}, Vector{Complex{T}}, Vector{Complex{T}}, Vector{Complex{T}}} where {T<:Real}
N = size(hankel,1)
#fill extended hankel matrix for calculation
if !(isSingular)
extended_hankel = zeros(Complex{T}, N+1, N+1)
extended_hankel[1:N,1:N] .= hankel
for i in 1:(N-1)
extended_hankel[i, N+1] = moments[i+N]
end
for j in 1:(N-1)
extended_hankel[N+1, j] = moments[j+N]
end
extended_hankel[N, N+1] = Complex{T}(1.0)
else
extended_hankel = copy(hankel)
end
#p, q
p = zeros(Complex{T},n1+1)
q = zeros(Complex{T},n2+1)
orthogonal_polynomial(extended_hankel, p, n1)
orthogonal_polynomial(extended_hankel, q, n1 - 1)
#gamma, delta
sym_p = symmetrizer(p[2:(n1+1)])
sym_q = symmetrizer(q[2:(n2+1)])
gamma = sym_p * moments[1:n1]
delta = sym_q * moments[1:n2]
return p, q, gamma, delta
end
function symmetrizer(vec::Vector{Complex{T}}
)::Matrix{Complex{T}} where {T<:Real}
dim::Int64 = length(vec)
mat = Array{Complex{T}}(undef,dim,dim)
for i in 1:dim
for j in 1:(dim-i+1)
mat[i,j] = vec[i+j-1]
end
for j in (dim-i+2):dim
mat[i,j] = Complex{T}(0.0)
end
end
return mat
end
function removeColumn(
matrix ::Matrix{Complex{T}},
colToRemove::Int64
)::Matrix{Complex{T}} where {T<:Real}
numCols::Int64 = size(matrix,2)
if colToRemove == 1
rem_matrix = matrix[:,2:numCols]
elseif colToRemove == numCols
rem_matrix = matrix[:,1:numCols-1]
else
leftmat = matrix[:,1:colToRemove-1]
rightmat = matrix[:,colToRemove+1:numCols]
rem_matrix = hcat(leftmat,rightmat)
end
return rem_matrix
end
function orthogonal_polynomial(
mat ::Matrix{Complex{T}},
vec ::Vector{Complex{T}},
order::Int64
)::Nothing where {T<:Real}
sliced_mat = mat[1:order, 1:(order+1)]
#get cofactor of sliced matrix, as coefficients of the polynomial vector
for i in 1:(order+1)
temp = copy(sliced_mat)
temp = removeColumn(temp, i)
vec[i] = (-1)^(i+order) * det(temp)
end
norm::Complex{T} = vec[order+1]
for i in 1:(order+1)
vec[i] /= norm
end
end
function calc_PQGD(z ::Complex{T},
p ::Vector{Complex{T}},
q ::Vector{Complex{T}},
gamma::Vector{Complex{T}},
delta::Vector{Complex{T}}
)::Tuple{Complex{T},Complex{T},Complex{T},Complex{T}} where {T<:Real}
n2::Int64 = length(delta)
poly_val = Vector{Complex{T}}(undef, (n2 + 1))
for i in 1:(n2 + 1)
poly_val[i] = z^(i-1)
end
P = sum(poly_val[1:length(p)] .* p)
Q = sum(poly_val[1:length(q)] .* q)
G = sum(poly_val[1:length(gamma)] .* gamma)
D = sum(poly_val[1:length(delta)] .* delta)
return P, Q, G, D
end
function calc_PQGD(matz ::Matrix{Complex{T}},
p ::Vector{Complex{T}},
q ::Vector{Complex{T}},
gamma::Vector{Complex{T}},
delta::Vector{Complex{T}}
)::Tuple{Vector{Complex{T}},Vector{Complex{T}},Vector{Complex{T}},Vector{Complex{T}}} where {T<:Real}
n2::Int64 = length(delta)
P = matz[:,1:length(p)] * p
Q = matz[:,1:length(q)] * q
G = matz[:,1:length(gamma)] * gamma
D = matz[:,1:length(delta)] * delta
return P, Q, G, D
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 660 | function hardy_basis(z::Complex{T}, k::Int64) where {T<:Real}
w = (z-im)/(z+im)
0.5*im*(w^(k+1)-w^k)/(sqrt(pi))
end
#function hardy_basis(x::Float64, y::Float64, k::Int64)
# z::Complex{T} = T(x) +im*T(y)
# return hardy_basis(z, k)
#end
function calc_hardy_matrix(reals::RealDomainData{T},
H::Int64
)::Array{Complex{T}, 2} where {T<:Real}
hardy_matrix = Array{Complex{T}}(undef, reals.N_real, 2*H)
for k in 1:H
hardy_matrix[:,2*k-1] .= hardy_basis.(reals.freq,k-1)
hardy_matrix[:,2*k] .= conj(hardy_basis.(reals.freq,k-1))
end
return hardy_matrix
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 4124 | function calc_functional(
sol::NevanlinnaSolver{T},
H::Int64,
ab_coeff::Vector{Complex{S}},
hardy_matrix::Array{Complex{T},2};
)::Float64 where {S<:Real, T<:Real}
param = hardy_matrix*ab_coeff
theta = (sol.abcd[1,1,:].* param .+ sol.abcd[1,2,:]) ./ (sol.abcd[2,1,:].*param .+ sol.abcd[2,2,:])
green = im * (one(T) .+ theta) ./ (one(T) .- theta)
A = Float64.(imag(green)./pi)
tot_int = integrate(sol.reals.freq, A)
second_der = integrate_squared_second_deriv(sol.reals.freq, A)
max_theta = findmax(abs.(param))[1]
func = abs(sol.reals.sum_rule-tot_int)^2 + sol.lambda*second_der
return func
end
function hardy_optim!(
sol::NevanlinnaSolver{T},
H::Int64,
ab_coeff::Array{ComplexF64,1};
iter_tol::Int64=sol.iter_tol,
)::Tuple{Bool, Bool} where {T<:Real}
loc_hardy_matrix = calc_hardy_matrix(sol.reals, H)
function functional(x::Vector{ComplexF64})::Float64
return calc_functional(sol, H, x, loc_hardy_matrix)
end
function jacobian(J::Vector{ComplexF64}, x::Vector{ComplexF64})
J .= gradient(functional, x)[1]
end
res = optimize(functional, jacobian, ab_coeff, BFGS(),
Optim.Options(iterations = iter_tol,
show_trace = sol.verbose))
if !(Optim.converged(res)) && sol.verbose
println("Faild to optimize!")
end
causality = check_causality(loc_hardy_matrix, Optim.minimizer(res), verbose=sol.verbose)
if causality && (Optim.converged(res))
sol.H = H
sol.ab_coeff = Optim.minimizer(res)
sol.hardy_matrix = loc_hardy_matrix
evaluation!(sol, verbose=false)
end
return causality, (Optim.converged(res))
end
function calc_functional(
sol ::HamburgerNevanlinnaSolver{T},
H ::Int64,
ab_coeff ::Vector{Complex{S}},
hardy_matrix::Array{Complex{T},2};
)::Float64 where {S<:Real, T<:Real}
param = hardy_matrix*ab_coeff
theta = (sol.nev_st.abcd[1,1,:].* param .+ sol.nev_st.abcd[1,2,:]) ./ (sol.nev_st.abcd[2,1,:].*param .+ sol.nev_st.abcd[2,2,:])
nev_val = im * (one(T) .+ theta) ./ (one(T) .- theta)
P, Q, G, D = calc_PQGD(sol.mat_real_omega, sol.p, sol.q, sol.gamma, sol.delta)
val = (- G .- nev_val .* D) ./ (P .+ nev_val .* Q)
A = Float64.(imag(val)./pi)
tot_int = integrate(sol.nev_st.reals.freq, A)
second_der = integrate_squared_second_deriv(sol.nev_st.reals.freq, A)
func = abs(sol.nev_st.reals.sum_rule-tot_int)^2 + sol.nev_st.lambda*second_der
return func
end
function hardy_optim!(
sol ::HamburgerNevanlinnaSolver{T},
H ::Int64,
ab_coeff::Array{ComplexF64,1};
iter_tol::Int64=sol.nev_st.iter_tol,
)::Tuple{Bool, Bool} where {T<:Real}
loc_hardy_matrix = calc_hardy_matrix(sol.nev_st.reals, H)
function functional(x::Vector{ComplexF64})::Float64
return calc_functional(sol, H, x, loc_hardy_matrix)
end
function jacobian(J::Vector{ComplexF64}, x::Vector{ComplexF64})
J .= gradient(functional, x)[1]
end
res = optimize(functional, jacobian, ab_coeff, BFGS(),
Optim.Options(iterations = iter_tol,
show_trace = sol.verbose))
if !(Optim.converged(res)) && sol.verbose
println("Faild to optimize!")
end
causality = check_causality(loc_hardy_matrix, Optim.minimizer(res), verbose=sol.verbose)
#causality = evaluation!(reals, abcd, H, Optim.minimizer(res), hardy_matrix, verbose=verbose)
if causality && (Optim.converged(res))
sol.nev_st.H = H
sol.nev_st.ab_coeff = Optim.minimizer(res)
sol.nev_st.hardy_matrix = loc_hardy_matrix
hamburger_evaluation!(sol, verbose=false)
end
return causality, (Optim.converged(res))
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 1126 | function calc_opt_N_imag(N ::Int64,
wn ::Array{Complex{T},1},
gw ::Array{Complex{T},1};
verbose::Bool=false
)::Int64 where {T<:Real}
@assert N == length(wn)
@assert N == length(gw)
freq = (wn .- im) ./ (wn .+ im)
val = (-gw .- im) ./ (-gw .+ im)
k::Int64 = 0
success::Bool = true
while success
k += 1
Pick = Array{Complex{T}}(undef, k, k)
for j in 1:k
for i in 1:k
num = one(T) - val[i] * conj(val[j])
den = one(T) - freq[i] * conj(freq[j])
Pick[i,j] = num / den
end
Pick[j,j] += T(1e-250)
end
success = issuccess(cholesky(Pick,check = false))
if k == N
break
end
end
if verbose
if !(success)
println("N_imag is setted as $(k-1)")
else
println("N_imag is setted as $(N)")
end
end
if !(success)
return (k-1)
else
return (N)
end
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 3817 | mutable struct NevanlinnaSolver{T<:Real}
imags::ImagDomainData{T} #imaginary domain data
reals::RealDomainData{T} #real domain data
phis::Vector{Complex{T}} #phis in schur algorithm
abcd::Array{Complex{T},3} #continued fractions
H_max::Int64 #upper cut off of H
H_min::Int64 #lower cut off of H
H::Int64 #current value of H
ab_coeff::Vector{ComplexF64} #current solution for H
hardy_matrix::Array{Complex{T},2} #hardy_matrix for H
iter_tol::Int64 #upper bound of iteration
lambda::Float64 #regularization parameter for second derivative term
ini_iter_tol::Int64 #upper bound of iteration for H_min
verbose::Bool
end
function NevanlinnaSolver(
wn ::Vector{Complex{T}},
gw ::Vector{Complex{T}},
N_real ::Int64,
w_max ::Float64,
eta ::Float64,
sum_rule ::Float64,
H_max ::Int64,
iter_tol ::Int64,
lambda ::Float64
;
verbose ::Bool=false,
pick_check ::Bool=true,
optimization::Bool=true,
ini_iter_tol::Int64=500,
mesh ::Symbol=:linear,
ham_option ::Bool=false #option for using in Hamburger moment problem
)::NevanlinnaSolver{T} where {T<:Real}
if N_real%2 == 1
error("N_real must be even number!")
end
@assert length(wn) == length(gw)
N_imag = length(wn)
if pick_check
opt_N_imag = calc_opt_N_imag(N_imag, wn, gw, verbose=verbose)
else
opt_N_imag = N_imag
end
imags = ImagDomainData(wn, gw, opt_N_imag)
reals = RealDomainData(N_real, w_max, eta, sum_rule, T=T, mesh=mesh)
phis = calc_phis(imags)
abcd = calc_abcd(imags, reals, phis)
H_min::Int64 = 1
ab_coeff = zeros(ComplexF64, 2*H_min)
hardy_matrix = calc_hardy_matrix(reals, H_min)
sol = NevanlinnaSolver(imags, reals, phis, abcd, H_max, H_min, H_min, ab_coeff, hardy_matrix, iter_tol, lambda, ini_iter_tol, verbose)
if ham_option
return sol
end
if optimization
calc_H_min(sol)
else
evaluation!(sol)
end
return sol
end
function calc_H_min(sol::NevanlinnaSolver{T},)::Nothing where {T<:Real}
H_bound::Int64 = 50
for iH in 1:H_bound
if sol.verbose
println("H=$(iH)")
end
zero_ab_coeff = zeros(ComplexF64, 2*iH)
causality, optim = hardy_optim!(sol, iH, zero_ab_coeff, iter_tol=sol.ini_iter_tol)
#break if we find optimal H in which causality is preserved and optimize is successful
if causality && optim
sol.H_min = sol.H
break
end
if isdefined(Main, :IJulia)
Main.IJulia.stdio_bytes[] = 0
end
if iH == H_bound
error("H_min does not exist")
end
end
end
function solve!(sol::NevanlinnaSolver{T})::Nothing where {T<:Real}
ab_coeff = copy(sol.ab_coeff)
for iH in sol.H_min:sol.H_max
if sol.verbose
println("H=$(iH)")
end
causality, optim = hardy_optim!(sol, iH, ab_coeff)
#break if we face instability of optimization
if !(causality && optim)
break
end
ab_coeff = copy(sol.ab_coeff)
push!(ab_coeff, 0.0+0.0*im)
push!(ab_coeff, 0.0+0.0*im)
if isdefined(Main, :IJulia)
Main.IJulia.stdio_bytes[] = 0
end
end
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 1198 | """
Compute second derivative
If the length of `x` and `y` is `N`, the length of the returned vector is `N-2`.
"""
function second_deriv(x::AbstractVector, y::AbstractVector)
if length(x) != length(y)
throw(ArgumentError("x and y must be the same length"))
end
N = length(x)
dx_backward = view(x, 2:(N-1)) - view(x, 1:(N-2))
dx_forward = view(x, 3:N) - view(x, 2:(N-1))
y_forward = view(y, 3:N)
y_mid = view(y, 2:(N-1))
y_backward = view(y, 1:(N-2))
n = dx_backward .* y_forward + dx_forward .* y_backward - (dx_forward + dx_backward) .* y_mid
d = (dx_forward.^2) .* dx_backward + (dx_backward.^2) .* dx_forward
return 2 .* n ./ d
end
function integrate(x::AbstractVector, y::AbstractVector)
N = length(x)
dx = view(x, 2:N) .- view(x, 1:(N-1))
y_forward = view(y, 2:N)
y_backward = view(y, 1:(N-1))
return sum(0.5 * (y_forward .+ y_backward) .* dx)
end
"""
Integrate the squarre of the abs of the second derivative
"""
function integrate_squared_second_deriv(x::AbstractVector, y::AbstractVector)
N = length(x)
sd = second_deriv(x, y)
x_sd = view(x, 2:(N-1))
return integrate(x_sd, abs.(sd) .^ 2)
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 9804 | function calc_H_min(p::Vector{Complex{T}},
q::Vector{Complex{T}},
gamma::Vector{Complex{T}},
delta::Vector{Complex{T}},
mat_real_omega::Array{Complex{T},2},
reals::RealDomainData,
abcd::Array{Complex{T},3},
lambda::Float64,
verbose::Bool=false
)::Tuple{Vector{Complex{T}}, RealDomainData{T}, Int64, Array{ComplexF64,1}} where {T<:Real}
for iH in 1:50
println("H=$(iH)")
zero_ab_coeff = zeros(ComplexF64, 2*iH)
hardy_matrix = calc_hardy_matrix(reals, iH)
opt_val, opt_reals, ab_coeff, causality, optim = Nevanlinna_Schur(p, q, gamma, delta, mat_real_omega, reals, abcd, iH, zero_ab_coeff, hardy_matrix, 500, lambda, verbose)
#break if we find optimal H in which causality is preserved and optimize is successful
if causality && optim
return opt_val, opt_reals, iH, ab_coeff
break
end
if isdefined(Main, :IJulia)
Main.IJulia.stdio_bytes[] = 0
end
end
error("H_min does not exist")
end
function Nevanlinna_Schur(p::Vector{Complex{T}},
q::Vector{Complex{T}},
gamma::Vector{Complex{T}},
delta::Vector{Complex{T}},
mat_real_omega::Array{Complex{T},2},
reals::RealDomainData{T},
abcd::Array{Complex{T},3},
H::Int64,
ab_coeff::Array{ComplexF64,1},
hardy_matrix::Array{Complex{T},2},
iter_tol::Int64,
lambda::Float64,
verbose::Bool=false
)::Tuple{Vector{Complex{T}}, RealDomainData{T}, Array{ComplexF64,1}, Bool, Bool} where {T<:Real}
function functional(x::Vector{ComplexF64})::Float64
return calc_functional(p, q, gamma, delta, mat_real_omega, reals, abcd, H, x, hardy_matrix, lambda=lambda)
end
function jacobian(J::Vector{ComplexF64}, x::Vector{ComplexF64})
J .= gradient(functional, x)[1]
end
res = optimize(functional, jacobian, ab_coeff, BFGS(),
Optim.Options(iterations = iter_tol,
show_trace = verbose))
if !(Optim.converged(res))
println("Faild to optimize!")
end
causality = evaluation!(reals, abcd, H, Optim.minimizer(res), hardy_matrix, verbose=verbose)
val = zeros(Complex{T}, reals.N_real)
hamburger_evaluation!(p, q, gamma, delta, val, reals, abcd, Optim.minimizer(res), hardy_matrix,verbose=verbose)
return val, reals, Optim.minimizer(res), causality, (Optim.converged(res))
end
function calc_functional(p::Vector{Complex{T}},
q::Vector{Complex{T}},
gamma::Vector{Complex{T}},
delta::Vector{Complex{T}},
mat_real_omega::Array{Complex{T},2},
reals::RealDomainData{T},
abcd::Array{Complex{T},3},
H::Int64,
ab_coeff::Vector{Complex{S}},
hardy_matrix::Array{Complex{T},2};
lambda::Float64 = 1e-5
)::Float64 where {S<:Real, T<:Real}
param = hardy_matrix*ab_coeff
theta = (abcd[1,1,:].* param .+ abcd[1,2,:]) ./ (abcd[2,1,:].*param .+ abcd[2,2,:])
nev_val = im * (one(T) .+ theta) ./ (one(T) .- theta)
val = zeros(Complex{T}, reals.N_real)
#=
for i in 1:reals.N_real
z::Complex{T} = reals.freq[i]
P, Q, G, D = calc_PQGD(z, p, q, gamma, delta)
val[i] = (- G - nev_val[i] * D) / (P + nev_val[i] * Q)
end
=#
P, Q, G, D = calc_PQGD(mat_real_omega, p, q, gamma, delta)
val = (- G .- nev_val .* D) ./ (P .+ nev_val .* Q)
A = Float64.(imag(val)./pi)
tot_int = integrate(reals.freq, A)
second_der = integrate_squared_second_deriv(reals.freq, A)
max_theta = findmax(abs.(param))[1]
func = abs(reals.sum-tot_int)^2 + lambda*second_der
return func
end
function hamburger_evaluation!(p::Vector{Complex{T}},
q::Vector{Complex{T}},
gamma::Vector{Complex{T}},
delta::Vector{Complex{T}},
val::Vector{Complex{T}},
reals::RealDomainData{T},
abcd::Array{Complex{T},3},
ab_coeff::Vector{Complex{S}},
hardy_matrix::Array{Complex{T},2};
verbose::Bool=false
)::Bool where {S<:Real, T<:Real}
param = hardy_matrix * ab_coeff
max_theta = findmax(abs.(param))[1]
if max_theta <= 1.0
#if verbose
# println("max_theta=",max_theta)
# println("hardy optimization was success.")
#end
causality = true
theta = (abcd[1,1,:].* param .+ abcd[1,2,:]) ./ (abcd[2,1,:].*param .+ abcd[2,2,:])
reals.val .= im * (one(T) .+ theta) ./ (one(T) .- theta)
for i in 1:reals.N_real
z::Complex{T} = reals.freq[i]
P, Q, G, D = calc_PQGD(z, p, q, gamma, delta)
val[i] = (- G - reals.val[i] * D) / (P + reals.val[i] * Q)
end
else
println("max_theta=",max_theta)
println("hardy optimization was failure.")
causality = false
end
return causality
end
function calc_functional(reals::RealDomainData{T},
abcd::Array{Complex{T},3},
H::Int64,
ab_coeff::Vector{Complex{S}},
hardy_matrix::Array{Complex{T},2};
lambda::Float64 = 1e-5
)::Float64 where {S<:Real, T<:Real}
param = hardy_matrix*ab_coeff
theta = (abcd[1,1,:].* param .+ abcd[1,2,:]) ./ (abcd[2,1,:].*param .+ abcd[2,2,:])
green = im * (one(T) .+ theta) ./ (one(T) .- theta)
A = Float64.(imag(green)./pi)
tot_int = integrate(reals.freq, A)
second_der = integrate_squared_second_deriv(reals.freq, A)
max_theta = findmax(abs.(param))[1]
func = abs(reals.sum-tot_int)^2 + lambda*second_der
return func
end
function evaluation!(reals::RealDomainData{T},
abcd::Array{Complex{T},3},
H::Int64,
ab_coeff::Vector{Complex{S}},
hardy_matrix::Array{Complex{T},2};
verbose::Bool=false
)::Bool where {S<:Real, T<:Real}
causality = check_causality(hardy_matrix, ab_coeff, verbose=verbose)
param = hardy_matrix*ab_coeff
if causality
theta = (abcd[1,1,:].* param .+ abcd[1,2,:]) ./ (abcd[2,1,:].*param .+ abcd[2,2,:])
reals.val .= im * (one(T) .+ theta) ./ (one(T) .- theta)
end
#=
param = hardy_matrix*ab_coeff
max_theta = findmax(abs.(param))[1]
if max_theta <= 1.0
if verbose
println("max_theta=",max_theta)
println("hardy optimization was success.")
end
causality = true
theta = (abcd[1,1,:].* param .+ abcd[1,2,:]) ./ (abcd[2,1,:].*param .+ abcd[2,2,:])
reals.val .= im * (one(T) .+ theta) ./ (one(T) .- theta)
else
println("max_theta=",max_theta)
println("hardy optimization was failure.")
causality = false
end
=#
return causality
end
function Nevanlinna_Schur(reals::RealDomainData{T},
abcd::Array{Complex{T},3},
H::Int64,
ab_coeff::Array{ComplexF64,1},
hardy_matrix::Array{Complex{T},2},
iter_tol::Int64,
lambda::Float64,
verbose::Bool=false
)::Tuple{RealDomainData{T}, Array{ComplexF64,1}, Bool, Bool} where {T<:Real}
function functional(x::Vector{ComplexF64})::Float64
return calc_functional(reals, abcd, H, x, hardy_matrix, lambda=lambda)
end
function jacobian(J::Vector{ComplexF64}, x::Vector{ComplexF64})
J .= gradient(functional, x)[1]
end
res = optimize(functional, jacobian, ab_coeff, BFGS(),
Optim.Options(iterations = iter_tol,
show_trace = verbose))
if !(Optim.converged(res))
println("Faild to optimize!")
end
causality = evaluation!(reals, abcd, H, Optim.minimizer(res), hardy_matrix, verbose=verbose)
return reals, Optim.minimizer(res), causality, (Optim.converged(res))
end
function calc_H_min(reals::RealDomainData,
abcd::Array{Complex{T},3},
lambda::Float64,
verbose::Bool=false
)::Tuple{RealDomainData{T}, Int64, Array{ComplexF64,1}} where {T<:Real}
for iH in 1:50
println("H=$(iH)")
zero_ab_coeff = zeros(ComplexF64, 2*iH)
hardy_matrix = calc_hardy_matrix(reals, iH)
opt_reals, ab_coeff, causality, optim = Nevanlinna_Schur(reals, abcd, iH, zero_ab_coeff, hardy_matrix, 500, lambda, verbose)
#break if we find optimal H in which causality is preserved and optimize is successful
if causality && optim
return opt_reals, iH, ab_coeff
break
end
if isdefined(Main, :IJulia)
Main.IJulia.stdio_bytes[] = 0
end
end
error("H_min does not exist")
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 2912 | @testset "core" begin
T = BigFloat
gaussian(x, mu, sigma) = exp(-((x-mu)/sigma)^2)/(sqrt(Ο)*sigma)
rho(omega) = 0.8*gaussian(omega, -1.0, 1.6) + 0.2*gaussian(omega, 3, 1)
setprecision(256)
hnw = 38
test_gw = Array{Complex{T}}(undef, hnw)
test_smpl = Array{Complex{T}}(undef, hnw)
f = open((@__DIR__) * "/c++/result/green.dat", "r")
for i in 1:hnw
list = readline(f)
s = split(list,'\t')
o = parse(BigFloat, s[1])
re = parse(BigFloat, s[2])
ii = parse(BigFloat, s[3])
test_smpl[i] = im*o
test_gw[i] = re + ii*im
end
close(f)
N_real = 6000
omega_max = 10.0
eta = 0.001
H_max = 50
ab_coeff = zeros(ComplexF64, 2*H_max)
lambda = 1e-5
iter_tol = 1000
N_imag = Nevanlinna.calc_opt_N_imag(hnw, test_smpl, test_gw)
imaginary = Nevanlinna.ImagDomainData(test_smpl, test_gw, N_imag)
raw_reals = Nevanlinna.RealDomainData(N_real, omega_max, eta, 1.0, T=T, mesh=:test)
phis = Nevanlinna.calc_phis(imaginary)
abcd = Nevanlinna.calc_abcd(imaginary, raw_reals, phis)
H_min::Int64 = 1
ab_coeff = zeros(ComplexF64, 2*H_min)
hardy_matrix = Nevanlinna.calc_hardy_matrix(raw_reals, H_min)
sol = NevanlinnaSolver(imaginary, raw_reals, phis, abcd, H_max, H_min, H_min, ab_coeff, hardy_matrix, iter_tol, lambda, 1, false)
Nevanlinna.evaluation!(sol)
spec = imag.(sol.reals.val)/pi
cpp_phis = Array{Complex{T}}(undef, N_imag)
f = open((@__DIR__) * "/c++/result/phis.dat", "r")
for i in 1:N_imag
list = readline(f)
s = split(list,'\t')
real_phi = parse(BigFloat, s[1])
imag_phi = parse(BigFloat, s[2])
cpp_phis[i] = real_phi + imag_phi*im
end
close(f)
@test cpp_phis β phis
cpp_abcd = Array{Complex{T}}(undef, 2, 2, N_real)
f = open((@__DIR__) * "/c++/result/abcd.dat", "r")
for i in 1:N_real
list = readline(f)
s = split(list,'\t')
real_11 = parse(BigFloat, s[1])
imag_11 = parse(BigFloat, s[2])
real_12 = parse(BigFloat, s[3])
imag_12 = parse(BigFloat, s[4])
real_21 = parse(BigFloat, s[5])
imag_21 = parse(BigFloat, s[6])
real_22 = parse(BigFloat, s[7])
imag_22 = parse(BigFloat, s[8])
cpp_abcd[1,1,i] = real_11 + imag_11*im
cpp_abcd[1,2,i] = real_12 + imag_12*im
cpp_abcd[2,1,i] = real_21 + imag_21*im
cpp_abcd[2,2,i] = real_22 + imag_22*im
end
close(f)
@test cpp_abcd β abcd
cpp_spec = Array{Float64}(undef, N_real)
f = open((@__DIR__) * "/c++/result/out_spec.dat", "r")
for i in 1:N_real
list = readline(f)
s = split(list,'\t')
spectral = parse(Float64, s[2])
cpp_spec[i] = spectral
end
close(f)
@test (cpp_spec) β Float64.(spec)
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 2112 | @testset "moment" begin
T = BigFloat
setprecision(2048)
#define spectral function
gaussian(x, mu, sigma) = exp(-0.5*((x-mu)/sigma)^2)/(sqrt(2*Ο)*sigma)
rho(omega) = 0.5*gaussian(omega, 2.0, 1.0) + 0.5*gaussian(omega, -2.0, 1.0)
function generate_input_data(rho::Function, beta::Float64)
lambda = 1e+4
wmax = lambda/beta
basis = SparseIR.FiniteTempBasisSet(beta, wmax, 1e-15)
rhol = [overlap(basis.basis_f.v[l], rho) for l in 1:length(basis.basis_f)]
gl = - basis.basis_f.s .* rhol
gw = evaluate(basis.smpl_wn_f, gl)
hnw = length(basis.smpl_wn_f.sampling_points)Γ·2
#To exclude effect of enviroment, we limit data until 30th
input_smpl = Array{Complex{T}}(undef, 31)
input_gw = Array{Complex{T}}(undef, 31)
for i in 1:31
input_smpl[i]= SparseIR.valueim(basis.smpl_wn_f.sampling_points[hnw+i], beta)
input_gw[i] = gw[hnw+i]
end
return input_smpl, input_gw
end
beta = 100. #inverse temperature
input_smpl, input_gw = generate_input_data(rho, beta)
N_real = 1000 #demension of array of output
omega_max = 10.0 #energy cutoff of real axis
eta = 0.001 #broaden parameter
sum_rule = 1.0 #sum rule
H_max = 50 #cutoff of Hardy basis
lambda = 1e-4 #regularization parameter
iter_tol = 1000 #upper bound of iteration
moments = Complex{T}.([1, 0, 5, 0, 43])
sol = Nevanlinna.HamburgerNevanlinnaSolver(moments, input_smpl, input_gw, N_real, omega_max, eta, sum_rule, H_max, iter_tol, lambda, optimization=false)
calc_moment = Vector{Float64}(undef,5)
for i in 1:5
xk = real.(sol.nev_st.reals.freq).^(i-1)
y = Float64.(imag.(sol.val))/pi .* xk
calc_moment[i] = Float64(Nevanlinna.integrate(real.(sol.nev_st.reals.freq), y))
end
test_moment = ([0.9999328252706802, 2.1117052430510528e-10, 5.005359475447759, -8.294576805137473e-9, 43.293142029878446])
@test isapprox(calc_moment, test_moment; atol = 1e-4)
println(calc_moment)
end
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 103 | using Nevanlinna
using SparseIR
using Test
include("util.jl")
include("core.jl")
include("moment.jl")
| Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
|
[
"MIT"
] | 1.0.0 | 8d3e26855ba913052401e1da8597aab375e85bed | code | 787 | @testset "util.second_deriv" begin
# f(x) = x^2
# f''(x) = 2
xmax = 3
N = 1000
#x = collect(LinRange(0, xmax, N))
x = collect(LinRange(0, sqrt(xmax), N)) .^ 2 # Non-uniform mesh
res = Nevanlinna.second_deriv(x, x.^2)
#println(x)
#println(res)
@test all(isapprox.(res, 2.0, rtol=0, atol=1e-5))
end
@testset "util.integrate_squared_second_deriv" begin
# f(x) = x^3
# f''(x) = 6*x
# β«_0^xmax (f''(x))^2 =12 xmax^3
xmax = 3
N = 10000
#x = collect(LinRange(0, xmax, N))
x = collect(LinRange(0, sqrt(xmax), N)) .^ 2 # Non-uniform mesh
coeff = im
res = Nevanlinna.integrate_squared_second_deriv(x, coeff .* x.^3)
#println(res)
@test isapprox(res, 12*xmax^3, atol=0, rtol=1e-3)
#println(12 * xmax^3)
end | Nevanlinna | https://github.com/SpM-lab/Nevanlinna.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.