licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 3328 |
@testset "low_precision.jl" begin
Random.seed!(16)
n,m = 10^3, 5;
features = Array{Any}(undef, n, m);
features[:,:] = rand(n, m);
features[:,1] = round.(Int32, features[:,1]); # convert a column of 32bit integers
weights = rand(-1:1,m);
labels = round.(Int32, features * weights);
model = build_stump(labels, features)
preds = apply_tree(model, features)
@test preds isa Vector{Int32}
@test depth(model) == 1
n_subfeatures = Int32(0)
max_depth = Int32(-1)
min_samples_leaf = Int32(1)
min_samples_split = Int32(2)
min_purity_increase = 0.0
model = build_tree(
labels, features,
n_subfeatures, max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
preds = apply_tree(model, features)
@test preds isa Vector{Int32}
@test MLJ.accuracy(labels, preds) > 0.9
n_subfeatures = Int32(0)
ntrees = Int32(10)
partial_sampling = 0.7
max_depth = Int32(-1)
model = build_forest(
labels, features,
n_subfeatures,
ntrees,
partial_sampling,
max_depth)
preds = apply_forest(model, features)
@test preds isa Vector{Int32}
@test MLJ.accuracy(labels, preds) > 0.9
n_iterations = Int32(25)
model, coeffs = build_adaboost_stumps(labels, features, n_iterations);
preds = apply_adaboost_stumps(model, coeffs, features);
@test preds isa Vector{Int32}
@test MLJ.accuracy(labels, preds) > 0.6
println("\n##### nfoldCV Classification Tree #####")
n_folds = Int32(3)
pruning_purity = 1.0
max_depth = Int32(-1)
min_samples_leaf = Int32(1)
min_samples_split = Int32(2)
min_purity_increase = 0.0
accuracy = nfoldCV_tree(
labels, features,
n_folds,
pruning_purity,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
@test mean(accuracy) > 0.7
println("\n##### nfoldCV Classification Forest #####")
ntrees = Int32(10)
n_subfeatures = Int32(2)
n_folds = Int32(3)
max_depth = Int32(-1)
min_samples_leaf = Int32(5)
min_samples_split = Int32(2)
min_purity_increase = 0.0
accuracy = nfoldCV_forest(
labels, features,
n_folds,
n_subfeatures,
ntrees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
@test mean(accuracy) > 0.7
println("\n##### nfoldCV Adaboosted Stumps #####")
n_iterations = Int32(25)
accuracy = nfoldCV_stumps(labels, features, n_folds, n_iterations)
@test mean(accuracy) > 0.6
# Test Int8 labels, and Float16 features
features = Float16.(features)
labels = Int8.(labels)
model = build_stump(labels, features)
preds = apply_tree(model, features)
@test preds isa Vector{Int8}
model = build_tree(labels, features)
preds = apply_tree(model, features)
@test preds isa Vector{Int8}
model = build_forest(labels, features)
preds = apply_forest(model, features)
@test preds isa Vector{Int8}
model = build_tree(labels, features)
preds = apply_tree(model, features)
@test preds isa Vector{Int8}
model, coeffs = build_adaboost_stumps(labels, features, n_iterations);
preds = apply_adaboost_stumps(model, coeffs, features);
@test preds isa Vector{Int8}
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 4612 |
@testset "random.jl" begin
Random.seed!(16)
n,m = 10^3, 5;
features = rand(n,m);
weights = rand(-1:1,m);
labels = round.(Int, features * weights);
model = build_stump(labels, round.(Int, features))
preds = apply_tree(model, round.(Int, features))
@test depth(model) == 1
max_depth = 3
model = build_tree(labels, features, 0, max_depth)
@test depth(model) == max_depth
print_model(model, 3)
model = build_tree(labels, features)
preds = apply_tree(model, features)
@test MLJ.accuracy(labels, preds) > 0.9
@test preds isa Vector{Int}
# test RNG param of trees
n_subfeatures = 2
t1 = build_tree(labels, features, n_subfeatures; rng=10)
t2 = build_tree(labels, features, n_subfeatures; rng=10)
t3 = build_tree(labels, features, n_subfeatures; rng=5)
@test (length(t1) == length(t2)) && (depth(t1) == depth(t2))
@test (length(t1) != length(t3)) || (depth(t1) != depth(t3))
mt = Random.MersenneTwister(1)
t1 = build_tree(labels, features, n_subfeatures; rng=mt)
t3 = build_tree(labels, features, n_subfeatures; rng=mt)
@test (length(t1) != length(t3)) || (depth(t1) != depth(t3))
model = build_forest(labels, features)
preds = apply_forest(model, features)
@test MLJ.accuracy(labels, preds) > 0.9
@test preds isa Vector{Int}
n_subfeatures = 3
ntrees = 9
partial_sampling = 0.7
max_depth = -1
min_samples_leaf = 5
min_samples_split = 2
min_purity_increase = 0.0
model = build_forest(
labels, features,
n_subfeatures,
ntrees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
preds = apply_forest(model, features)
@test MLJ.accuracy(labels, preds) > 0.9
@test length(model) == ntrees
# test n_subfeatures
n_subfeatures = 0
m_partial = build_forest(labels, features) # default sqrt(nfeatures)
m_full = build_forest(labels, features, n_subfeatures)
@test all( length.(trees(m_full)) .< length.(trees(m_partial)) )
# test partial_sampling parameter, train on single sample
partial_sampling = 1 / n
n_subfeatures = 0
ntrees = 1 # single tree test
max_depth = -1
min_samples_leaf = 1
min_samples_split = 2
min_purity_increase = 0.0
partial = build_forest(
labels, features,
n_subfeatures,
ntrees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
@test typeof(trees(partial)[1]) <: Leaf
# test RNG parameter for forests
n_subfeatures = 2
ntrees = 5
m1 = build_forest(labels, features,
n_subfeatures,
ntrees;
rng=10)
m2 = build_forest(labels, features,
n_subfeatures,
ntrees;
rng=10)
m3 = build_forest(labels, features,
n_subfeatures,
ntrees;
rng=5)
@test length.(trees(m1)) == length.(trees(m2))
@test depth.(trees(m1)) == depth.(trees(m2))
@test length.(trees(m1)) != length.(trees(m3))
n_iterations = 25
model, coeffs = build_adaboost_stumps(labels, features, n_iterations);
preds = apply_adaboost_stumps(model, coeffs, features);
@test MLJ.accuracy(labels, preds) > 0.6
@test preds isa Vector{Int}
@test length(model) == n_iterations
println("\n##### nfoldCV Classification Tree #####")
nfolds = 3
pruning_purity = 1.0
max_depth = 5
accuracy = nfoldCV_tree(labels, features, nfolds, pruning_purity, max_depth; rng=10, verbose=false)
accuracy2 = nfoldCV_tree(labels, features, nfolds, pruning_purity, max_depth; rng=10)
accuracy3 = nfoldCV_tree(labels, features, nfolds, pruning_purity, max_depth; rng=5)
@test mean(accuracy) > 0.7
@test accuracy == accuracy2
@test accuracy != accuracy3
println("\n##### nfoldCV Classification Forest #####")
nfolds = 3
n_subfeatures = 2
ntrees = 10
accuracy = nfoldCV_forest(labels, features, nfolds, n_subfeatures, ntrees; rng=10, verbose=false)
accuracy2 = nfoldCV_forest(labels, features, nfolds, n_subfeatures, ntrees; rng=10)
accuracy3 = nfoldCV_forest(labels, features, nfolds, n_subfeatures, ntrees; rng=5)
@test mean(accuracy) > 0.7
@test accuracy == accuracy2
@test accuracy != accuracy3
println("\n##### nfoldCV Adaboosted Stumps #####")
n_iterations = 25
n_folds = 3
accuracy = nfoldCV_stumps(labels, features, n_folds, n_iterations; rng=10, verbose=false)
accuracy2 = nfoldCV_stumps(labels, features, n_folds, n_iterations; rng=10)
accuracy3 = nfoldCV_stumps(labels, features, n_folds, n_iterations; rng=5)
@test mean(accuracy) > 0.6
@test accuracy == accuracy2
@test accuracy != accuracy3
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 394 |
function load_digits()
data_path = joinpath(dirname(pathof(ModalDecisionTrees)), "..", "test/data/")
f = open(joinpath(data_path, "digits.csv"))
data = readlines(f)[2:end]
data = [[parse(Float32, i) for i in split(row, ",")] for row in data]
data = hcat(data...)
y = Int.(data[1, 1:end]) .+ 1
X = convert(Matrix, transpose(data[2:end, 1:end]))
return X, y
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 9929 |
using SoleModels
using SoleModels: printmodel
using SoleLogics
using ModalDecisionTrees
using ModalDecisionTrees: translate
using ModalDecisionTrees.experimentals: parse_tree
tree_str1 = """
{1} ⟨G⟩ (min[V4] >= 0.04200671690893693) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 37/74 (conf = 0.5000)
✔ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 32/37 (conf = 0.8649)
✘ {1} ⟨G⟩ (min[V22] >= 470729.9023515756) YES_WITH_COUGH : 32/37 (conf = 0.8649)
✔ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 5/6 (conf = 0.8333)
✘ YES_WITH_COUGH : 31/31 (conf = 1.0000)
"""
tree_str2 = """
{1} ⟨G⟩ (max[V28] <= 7.245112655929639) YES : 78/141 (conf = 0.5532)
✔ {1} (min[V20] >= 4222.6591159789605) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 55/88 (conf = 0.6250)
│✔ {1} ⟨L̅⟩ (max[V11] <= 0.0038141608453366675) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 34/66 (conf = 0.5152)
││✔ {1} ⟨A̅⟩ (max[V29] <= 178.31522392540964) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 19/22 (conf = 0.8636)
│││✔ YES : 3/3 (conf = 1.0000)
│││✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 19/19 (conf = 1.0000)
││✘ {1} ⟨B⟩ (min[V26] >= 217902.31767535824) YES : 29/44 (conf = 0.6591)
││ ✔ {1} max[V6] <= 0.011319891844101688 NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 12/14 (conf = 0.8571)
││ │✔ YES : 2/2 (conf = 1.0000)
││ │✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 12/12 (conf = 1.0000)
││ ✘ {1} ⟨L⟩ (min[V6] >= 1.2154505217391558) YES : 27/30 (conf = 0.9000)
││ ✔ YES : 24/24 (conf = 1.0000)
││ ✘ {1} ⟨A̅⟩ (max[V16] <= 81.4665167044706) YES : 3/6 (conf = 0.5000)
││ ✔ YES : 3/3 (conf = 1.0000)
││ ✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 3/3 (conf = 1.0000)
│✘ {1} ⟨A̅⟩ (min[V24] >= 10.975911723366615) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 21/22 (conf = 0.9545)
│ ✔ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 21/21 (conf = 1.0000)
│ ✘ YES : 1/1 (conf = 1.0000)
✘ {1} ⟨G⟩ (min[V8] >= 494.33421895459713) YES : 45/53 (conf = 0.8491)
✔ {1} ⟨L̅⟩ (min[V27] >= 87446.39318797569) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 7/13 (conf = 0.5385)
│✔ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 6/6 (conf = 1.0000)
│✘ {1} max[V2] <= 42.36525041432014 YES : 6/7 (conf = 0.8571)
│ ✔ YES : 6/6 (conf = 1.0000)
│ ✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 1/1 (conf = 1.0000)
✘ {1} ⟨G⟩ (min[V13] >= 31.231588457748384) YES : 39/40 (conf = 0.9750)
✔ YES : 39/39 (conf = 1.0000)
✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 1/1 (conf = 1.0000)
"""
tree_str3 = """
{1} ⟨=⟩ (max[V28] <= 7.245112655929639) YES : 78/141 (conf = 0.5532)
✔ {1} ⟨=⟩ (min[V20] >= 4222.6591159789605) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 55/88 (conf = 0.6250)
│✔ {1} ⟨=⟩ (max[V11] <= 0.0038141608453366675) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 34/66 (conf = 0.5152)
││✔ {1} ⟨=⟩ (max[V29] <= 178.31522392540964) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 19/22 (conf = 0.8636)
│││✔ YES : 3/3 (conf = 1.0000)
│││✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 19/19 (conf = 1.0000)
││✘ {1} ⟨=⟩ (min[V26] >= 217902.31767535824) YES : 29/44 (conf = 0.6591)
││ ✔ {1} max[V6] <= 0.011319891844101688 NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 12/14 (conf = 0.8571)
││ │✔ YES : 2/2 (conf = 1.0000)
││ │✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 12/12 (conf = 1.0000)
││ ✘ {1} ⟨=⟩ (min[V6] >= 1.2154505217391558) YES : 27/30 (conf = 0.9000)
││ ✔ YES : 24/24 (conf = 1.0000)
││ ✘ {1} ⟨=⟩ (max[V16] <= 81.4665167044706) YES : 3/6 (conf = 0.5000)
││ ✔ YES : 3/3 (conf = 1.0000)
││ ✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 3/3 (conf = 1.0000)
│✘ {1} ⟨=⟩ (min[V24] >= 10.975911723366615) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 21/22 (conf = 0.9545)
│ ✔ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 21/21 (conf = 1.0000)
│ ✘ YES : 1/1 (conf = 1.0000)
✘ {1} ⟨=⟩ (min[V8] >= 494.33421895459713) YES : 45/53 (conf = 0.8491)
✔ {1} ⟨=⟩ (min[V27] >= 87446.39318797569) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 7/13 (conf = 0.5385)
│✔ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 6/6 (conf = 1.0000)
│✘ {1} max[V2] <= 42.36525041432014 YES : 6/7 (conf = 0.8571)
│ ✔ YES : 6/6 (conf = 1.0000)
│ ✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 1/1 (conf = 1.0000)
✘ {1} ⟨=⟩ (min[V13] >= 31.231588457748384) YES : 39/40 (conf = 0.9750)
✔ YES : 39/39 (conf = 1.0000)
✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 1/1 (conf = 1.0000)
"""
tree1 = parse_tree(tree_str1; worldtypes = [SoleData.Interval], initconditions = [ModalDecisionTrees.start_without_world])
tree2 = parse_tree(tree_str2; worldtypes = [SoleData.Interval], initconditions = [ModalDecisionTrees.start_without_world])
tree3 = parse_tree(tree_str3; worldtypes = [SoleData.OneWorld], initconditions = [ModalDecisionTrees.start_without_world])
pure_tree1 = translate(tree1)
pure_tree2 = translate(tree2)
pure_tree3 = translate(tree3)
listrules(pure_tree1; use_shortforms=true) .|> antecedent .|> syntaxstring .|> println;
listrules(pure_tree1; use_shortforms=false) .|> antecedent .|> syntaxstring .|> println;
listrules(pure_tree2; use_shortforms=true) .|> antecedent .|> syntaxstring .|> println;
listrules(pure_tree2; use_shortforms=false) .|> antecedent .|> syntaxstring .|> println;
listrules(pure_tree3; use_shortforms=true) .|> antecedent .|> syntaxstring .|> println;
listrules(pure_tree3; use_shortforms=false) .|> antecedent .|> syntaxstring .|> println;
tree_str10 = """
{1} ⟨G⟩ (max[V28] <= 7.25) YES : 78/141 (conf = 0.5532)
✔ {1} (min[V20] >= 4222.66) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 55/88 (conf = 0.6250)
│✔ {1} ⟨L̅⟩ (max[V11] <= 0.00) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 34/66 (conf = 0.5152)
││✔ {1} ⟨A̅⟩ (max[V29] <= 178.32) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 19/22 (conf = 0.8636)
│││✔ YES : 3/3 (conf = 1.0000)
│││✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 19/19 (conf = 1.0000)
││✘ {1} ⟨B⟩ (min[V26] >= 217902.32) YES : 29/44 (conf = 0.6591)
││ ✔ {1} max[V6] <= 0.01 NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 12/14 (conf = 0.8571)
││ │✔ YES : 2/2 (conf = 1.0000)
││ │✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 12/12 (conf = 1.0000)
││ ✘ {1} ⟨L⟩ (min[V6] >= 1.22) YES : 27/30 (conf = 0.9000)
││ ✔ YES : 24/24 (conf = 1.0000)
││ ✘ {1} ⟨A̅⟩ (max[V16] <= 81.47) YES : 3/6 (conf = 0.5000)
││ ✔ YES : 3/3 (conf = 1.0000)
││ ✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 3/3 (conf = 1.0000)
│✘ {1} ⟨A̅⟩ (min[V24] >= 10.98) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 21/22 (conf = 0.9545)
│ ✔ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 21/21 (conf = 1.0000)
│ ✘ YES : 1/1 (conf = 1.0000)
✘ {1} ⟨G⟩ (min[V8] >= 494.33) YES : 45/53 (conf = 0.8491)
✔ {1} ⟨L̅⟩ (min[V27] >= 87446.39) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 7/13 (conf = 0.5385)
│✔ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 6/6 (conf = 1.0000)
│✘ {1} max[V2] <= 42.37 YES : 6/7 (conf = 0.8571)
│ ✔ YES : 6/6 (conf = 1.0000)
│ ✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 1/1 (conf = 1.0000)
✘ {1} ⟨G⟩ (min[V13] >= 31.23) YES : 39/40 (conf = 0.9750)
✔ YES : 39/39 (conf = 1.0000)
✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 1/1 (conf = 1.0000)
"""
@test_broken tree10 = parse_tree(tree_str10; worldtypes = [SoleData.Interval], initconditions = [ModalDecisionTrees.start_without_world])
tree_str_multi = """
{1} ⟨G⟩ (V1 <= 1) YES : 78/141 (conf = 0.5532)
✔ {2} (V2 >= 1) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 55/88 (conf = 0.6250)
│✔ {2} ⟨L̅⟩ (V3 <= 1) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 34/66 (conf = 0.5152)
││✔ {1} ⟨A̅⟩ (V4 <= 1) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 19/22 (conf = 0.8636)
│││✔ YES : 3/3 (conf = 1.0000)
│││✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 19/19 (conf = 1.0000)
││✘ {1} ⟨B⟩ (V5 >= 1) YES : 29/44 (conf = 0.6591)
││ ✔ {3} V6 <= 1 NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 12/14 (conf = 0.8571)
││ │✔ YES : 2/2 (conf = 1.0000)
││ │✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 12/12 (conf = 1.0000)
││ ✘ {2} ⟨L⟩ (V7 >= 1) YES : 27/30 (conf = 0.9000)
││ ✔ YES : 24/24 (conf = 1.0000)
││ ✘ {1} ⟨A̅⟩ (V8 <= 1) YES : 3/6 (conf = 0.5000)
││ ✔ YES : 3/3 (conf = 1.0000)
││ ✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 3/3 (conf = 1.0000)
│✘ {1} ⟨A̅⟩ (V9 >= 1) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 21/22 (conf = 0.9545)
│ ✔ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 21/21 (conf = 1.0000)
│ ✘ YES : 1/1 (conf = 1.0000)
✘ {1} ⟨G⟩ (V10 >= 1) YES : 45/53 (conf = 0.8491)
✔ {2} ⟨L̅⟩ (V11 >= 1) NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 7/13 (conf = 0.5385)
│✔ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 6/6 (conf = 1.0000)
│✘ {2} V12 <= 1 YES : 6/7 (conf = 0.8571)
│ ✔ YES : 6/6 (conf = 1.0000)
│ ✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 1/1 (conf = 1.0000)
✘ {1} ⟨G⟩ (V13 >= 1) YES : 39/40 (conf = 0.9750)
✔ YES : 39/39 (conf = 1.0000)
✘ NO_CLEAN_HISTORY_AND_LOW_PROBABILITY : 1/1 (conf = 1.0000)
"""
tree_multi = parse_tree(tree_str_multi; worldtypes = [SoleData.Interval, SoleData.Interval, SoleData.Interval], initconditions = [ModalDecisionTrees.start_without_world, ModalDecisionTrees.start_without_world, ModalDecisionTrees.start_without_world])
pure_tree_multi = translate(tree_multi)
printmodel(pure_tree_multi, show_shortforms = true)
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2199 | using Test
using AbstractTrees
using SoleLogics
using SoleData
using SoleModels
using ModalDecisionTrees
using ModalDecisionTrees: DTLeaf, DTInternal
reg_leaf, cls_leaf = DTLeaf([1.0,2.0]), DTLeaf([1,2])
decision1 = ScalarExistentialFormula(globalrel, VariableMin(1), >=, 10)
decision2 = ScalarExistentialFormula(IA_A, VariableMin(2), <, 0)
decision3 = ScalarExistentialFormula(IA_L, VariableMin(3), <=, 0)
branch = DTInternal(2, decision1, cls_leaf, cls_leaf)
branch = DTInternal(2, decision2, cls_leaf, branch)
branch = DTInternal(2, decision3, branch, cls_leaf)
@test_nowarn AbstractTrees.print_tree(branch)
branchpure = ModalDecisionTrees.translate(branch, [ModalDecisionTrees.StartWithoutWorld(), ModalDecisionTrees.StartWithoutWorld()])
@test_nowarn AbstractTrees.print_tree(branchpure)
complete = ModalDecisionTrees.restricted2complete(branch)
completepure = ModalDecisionTrees.translate(complete, [ModalDecisionTrees.StartWithoutWorld(), ModalDecisionTrees.StartWithoutWorld()])
branchpure |> printmodel
completepure |> printmodel
printmodel(branchpure; show_shortforms = true)
printmodel(completepure; show_shortforms = true)
branchpure |> listrules
completepure |> listrules
branchpure = ModalDecisionTrees.translate(branch, [ModalDecisionTrees.StartWithoutWorld(), ModalDecisionTrees.StartWithoutWorld()]; optimize_shortforms = true)
completepure = ModalDecisionTrees.translate(complete, [ModalDecisionTrees.StartWithoutWorld(), ModalDecisionTrees.StartWithoutWorld()]; optimize_shortforms = true)
printmodel(branchpure; show_shortforms = true)
printmodel(completepure; show_shortforms = true)
printmodel(branchpure)
printmodel(completepure)
branchpure |> listrules
completepure |> listrules
using D3Trees
text = ["one\n(second line)", "2", "III", "four"]
style = ["", "fill:red", "r:14px", "opacity:0.7"]
link_style = ["", "stroke:blue", "", "stroke-width:10px"]
tooltip = ["pops", "up", "on", "hover"]
@test_nowarn t = D3Tree(children,
text=text,
style=style,
tooltip=tooltip,
link_style=link_style,
title="My Tree",
init_expand=10,
)
@test_nowarn t = D3Tree(tree)
# inchrome(t)
# inbrowser(t, "firefox")
# inchrome(t)
# inbrowser(t, "firefox")
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1367 | using Test
using SoleData
using SoleLogics
using SoleData: ScalarExistentialFormula
using ModalDecisionTrees
using ModalDecisionTrees: DTLeaf, DTInternal
using ModalDecisionTrees: isbackloop, isforthloop
using ModalDecisionTrees: DoubleEdgedDecision, _back!, _forth!
using ModalDecisionTrees: back, forth
using AbstractTrees
decision1 = ScalarExistentialFormula(globalrel, VariableMin(1), >=, 10)
decision2 = ScalarExistentialFormula(IA_A, VariableMin(2), <, 0)
decision3 = ScalarExistentialFormula(IA_L, VariableMin(3), <=, 0)
ded1 = DoubleEdgedDecision(decision1)
ded2 = DoubleEdgedDecision(decision2)
ded3 = DoubleEdgedDecision(decision3)
reg_leaf, cls_leaf = DTLeaf([1.0,2.0]), DTLeaf([1,2])
branch = DTInternal(2, ded1, cls_leaf, cls_leaf)
@test !isbackloop(branch)
@test !isforthloop(branch)
_back!(ded1, Ref(branch))
_forth!(ded1, Ref(branch))
@test isbackloop(branch)
@test isforthloop(branch)
branch = DTInternal(1, ded2, cls_leaf, branch)
_back!(ded2, Ref(branch))
_forth!(ded2, Ref(branch))
branch = DTInternal(2, ded3, branch, cls_leaf)
_back!(ded3, Ref(branch))
_forth!(ded3, Ref(branch))
@test isbackloop(branch)
@test isforthloop(branch)
AbstractTrees.print_tree(branch)
t = branch
initconditions = [ModalDecisionTrees.StartWithoutWorld(), ModalDecisionTrees.StartWithoutWorld()]
pure_tree = ModalDecisionTrees.translate(t, initconditions)
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1897 |
using SoleLogics
using SoleModels
using AbstractTrees
using ModalDecisionTrees
using ModalDecisionTrees: DTLeaf, DTInternal
using SoleData: ExistentialTopFormula
using ModalDecisionTrees: AbstractNode, DoubleEdgedDecision, DTNode, Label
using ModalDecisionTrees: back!, forth!
diamondtop = ExistentialTopFormula(IA_A)
diamondtop2 = ExistentialTopFormula(IA_L)
function MDT(prediction)
return DTLeaf(prediction)
end
function MDT(p::Atom, left::AbstractNode, right::AbstractNode)
# decision = DoubleEdgedDecision(ScalarExistentialFormula(identityrel, SoleLogics.value(p)))
decision = DoubleEdgedDecision(p)
return DTInternal(1, decision, left, right)
end
function MDT(φ::ExistentialTopFormula, left::AbstractNode, right::AbstractNode)
decision = DoubleEdgedDecision(φ)
return DTInternal(1, decision, left, right)
end
ν3 = MDT(
Atom("p₂"),
MDT("L₂"),
MDT("L₁"),
)
ν1 = MDT(
diamondtop,
MDT(
Atom(" ̅p₁"),
MDT("L₁"),
ν3,
),
MDT("L₁"),
)
# back!((ν3), ModalDecisionTrees.right(ν1))
back!(ν3, ν1)
t = MDT(
diamondtop2,
ν1,
MDT("L₂"),
)
printmodel(t)
# treemap(x->(printmodel(x), children(x)), t);
# paths = treemap(x->((x isa DTInternal ? ModalDecisionTrees.decision(x) : ModalDecisionTrees.prediction(x)), children(x)), t)
# collect(Leaves(t))
# collect(PostOrderDFS(t))
# treemap(_t->(_t,children(_t)), t)
using ModalDecisionTrees: translate
t2 = ModalDecisionTrees.left(t)
using Test
initconditions = [ModalDecisionTrees.StartWithoutWorld()]
@test_nowarn translate.(Leaves(t), initconditions)
@test_nowarn translate(ν3, initconditions)
@test_nowarn translate(ν1, initconditions)
@test_nowarn translate(t, initconditions)
@test_nowarn translate(t2, initconditions)
printmodel(translate(t2, initconditions))
printmodel(t)
translate(t2, initconditions)
translate(t, initconditions)
printmodel(t2)
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1211 | @testset "ames.jl" begin
X, y = MLJ.@load_ames
# Only consider non-categorical variables
mask = BitVector((!).((<:).(eltype.(values(X)), CategoricalValue)))
# X = filter(((i,c),)->(i in mask), collect(enumerate(X))[mask])
X = NamedTuple(zip(keys(X)[mask], values(X)[mask]))
X = DataFrame(X)
X = Float64.(X)
p = randperm(Random.MersenneTwister(1), 100)
X, y = X[p, :], y[p]
n_instances = size(X, 1)
n_train = Int(floor(n_instances*.8))
p = randperm(Random.MersenneTwister(1), n_instances)
train_idxs = p[1:n_train]
test_idxs = p[n_train+1:end]
X_train, y_train = X[train_idxs,:], y[train_idxs]
X_test, y_test = X[test_idxs,:], y[test_idxs]
model = ModalDecisionTree(min_purity_increase = 0.001)
mach = machine(model, X_train, y_train) |> fit!
yhat = MLJ.predict_mean(mach, X_test)
@test StatsBase.cor(yhat, y_test) > 0.6
model = ModalRandomForest(ntrees = 15)
mach = machine(model, X_train, y_train) |> fit!
yhat = MLJ.predict_mean(mach, X_test)
@test StatsBase.cor(yhat, y_test) > 0.7
# using Plots
# p = sortperm(y_test)
# scatter(y_test[p], label = "y")
# scatter!(yhat[p], label = "ŷ")
# k = 20
# plot!([mean(yhat[p][i:i+k]) for i in 1:length(yhat[p])-k], label = "ŷ, moving average")
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2640 | using ModalDecisionTrees
using MLJ
using DataFrames
using Random
using StatsBase
include("$(dirname(dirname(pathof(ModalDecisionTrees))))/test/data/load.jl")
X, y = load_digits()
y = float.(y)
p = randperm(Random.MersenneTwister(1), 100)
X, y = X[p, :], y[p]
Xcube = cat(map(r->reshape(r, (8,8,1)), eachrow(X))...; dims=4)
Xnt = NamedTuple(zip(Symbol.(1:length(eachslice(Xcube; dims=3))), eachslice.(eachslice(Xcube; dims=3); dims=3)))
n_instances = size(X, 1)
n_train = Int(floor(n_instances*.8))
p = randperm(Random.MersenneTwister(1), n_instances)
train_idxs = p[1:n_train]
test_idxs = p[n_train+1:end]
X_train, y_train = X[train_idxs,:], y[train_idxs]
X_test, y_test = X[test_idxs,:], y[test_idxs]
X_traincube = cat(map(r->reshape(r, (8,8,1)), eachrow(X_train))...; dims=4)
X_trainnt = NamedTuple(zip(Symbol.(1:length(eachslice(X_traincube; dims=3))), eachslice.(eachslice(X_traincube; dims=3); dims=3)))
X_testcube = cat(map(r->reshape(r, (8,8,1)), eachrow(X_test))...; dims=4)
X_testnt = NamedTuple(zip(Symbol.(1:length(eachslice(X_testcube; dims=3))), eachslice.(eachslice(X_testcube; dims=3); dims=3)))
model = ModalDecisionTree(min_purity_increase = 0.001)
mach = machine(model, X_trainnt, y_train) |> fit!
println(mach)
c = StatsBase.cor(MLJ.predict_mean(mach, X_testnt), y_test)
@test_broken c > 0.45
# model = ModalRandomForest()
# mach = machine(model, X_trainnt, y_train) |> fit!
# @test StatsBase.cor(MLJ.predict_mean(mach, X_testnt), y_test) > 0.5
mach = machine(ModalRandomForest(;
n_subfeatures = 1,
ntrees = 10,
sampling_fraction = 0.7,
max_depth = -1,
min_samples_leaf = 1,
min_samples_split = 2,
min_purity_increase = 0.0,
print_progress = true,
rng = Random.MersenneTwister(1)
), Xnt, y) |> m->fit!(m, rows = train_idxs)
println(StatsBase.cor(MLJ.predict_mean(mach, X_testnt), y_test))
@test StatsBase.cor(MLJ.predict_mean(mach, X_testnt), y_test) > 0.30
mach = machine(ModalRandomForest(;
n_subfeatures = 0.6,
ntrees = 10,
sampling_fraction = 0.7,
max_depth = -1,
min_samples_leaf = 1,
min_samples_split = 2,
min_purity_increase = 0.0,
rng = Random.MersenneTwister(1)
), Xnt, y) |> m->fit!(m, rows = train_idxs)
println(StatsBase.cor(MLJ.predict_mean(mach, X_testnt), y_test))
@test StatsBase.cor(MLJ.predict_mean(mach, X_testnt), y_test) > 0.5
# using Plots
# p = sortperm(y_test)
# scatter(y_test[p], label = "y")
# scatter!(yhat[p], label = "ŷ")
# k = 20
# plot!([mean(yhat[p][i:i+k]) for i in 1:length(yhat[p])-k], label = "ŷ, moving average")
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 473 | using MLJBase
using StatsBase
rng = Random.MersenneTwister(1)
X, y = make_regression(100, 2; rng = rng)
# model = ModalDecisionTree(min_purity_increase = 0.001)
model = ModalDecisionTree()
mach = machine(model, X, y)
train_idxs = 1:div(length(y), 2)
test_idxs = div(length(y), 2)+1:100
MLJBase.fit!(mach, rows=train_idxs)
ypreds = MLJBase.predict_mean(mach, rows=test_idxs)
println(StatsBase.cor(ypreds, y[test_idxs]))
@test StatsBase.cor(ypreds, y[test_idxs]) > 0.45
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 896 | # Regression Test - Appliances Energy Prediction Data Set
# https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction
@testset "energy.jl" begin
download("https://archive.ics.uci.edu/ml/machine-learning-databases/00374/energydata_complete.csv", "energy.csv");
energy = readcsv("energy.csv");
features = energy[2:end, 3:end];
labels = float.(energy[2:end, 2]);
# over-fitting
n_subfeatures = 0
max_depth = -1
min_samples_leaf = 1
model = build_tree(
labels, features,
n_subfeatures,
max_depth,
min_samples_leaf)
preds = apply_tree(model, features);
@test R2(labels, preds) > 0.99
println("\n##### nfoldCV Regression Tree #####")
r2 = nfoldCV_tree(labels, features, 3);
@test mean(r2) > 0.05
println("\n##### nfoldCV Regression Forest #####")
r2 = nfoldCV_forest(labels, features, 2, 10, 3);
@test mean(r2) > 0.35
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2805 | @testset "low_precision.jl" begin
Random.seed!(5)
n, m = 10^3, 5 ;
features = Array{Any}(undef, n, m);
features[:,:] = randn(n, m);
features[:,1] = round.(Int32, features[:,1]); # convert a column of 32bit integers
weights = rand(-2:2,m);
labels = float.(features * weights); # cast to Array{Float64,1}
min_samples_leaf = Int32(1)
n_subfeatures = Int32(0)
max_depth = Int32(-1)
min_samples_split = Int32(2)
min_purity_increase = 0.5
model = build_tree(
labels, round.(Int32, features),
n_subfeatures,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
preds = apply_tree(model, round.(Int32, features))
@test R2(labels, preds) < 0.95
@test typeof(preds) <: Vector{Float64}
n_subfeatures = Int32(3)
ntrees = Int32(10)
partial_sampling = 0.7
max_depth = Int32(-1)
min_samples_leaf = Int32(5)
min_samples_split = Int32(2)
min_purity_increase = 0.0
model = build_forest(
labels, features,
n_subfeatures,
ntrees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
preds = apply_forest(model, features)
@test R2(labels, preds) > 0.9
@test typeof(preds) <: Vector{Float64}
println("\n##### nfoldCV Regression Tree #####")
n_folds = Int32(3)
pruning_purity = 1.0
max_depth = Int32(-1)
min_samples_leaf = Int32(5)
min_samples_split = Int32(2)
min_purity_increase = 0.0
r2 = nfoldCV_tree(
labels, features,
n_folds,
pruning_purity,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
@test mean(r2) > 0.6
println("\n##### nfoldCV Regression Forest #####")
ntrees = Int32(10)
n_subfeatures = Int32(2)
n_folds = Int32(3)
max_depth = Int32(-1)
min_samples_leaf = Int32(5)
min_samples_split = Int32(2)
min_purity_increase = 0.0
r2 = nfoldCV_forest(
labels, features,
n_folds,
n_subfeatures,
ntrees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
@test mean(r2) > 0.8
# Test Float16 labels, and Float16 features
features = Float16.(features)
labels = Float16.(labels)
model = build_stump(labels, features)
preds = apply_tree(model, features)
@test preds isa Vector{Float16}
model = build_tree(labels, features)
preds = apply_tree(model, features)
@test preds isa Vector{Float16}
model = build_forest(labels, features)
preds = apply_forest(model, features)
@test preds isa Vector{Float16}
model = build_tree(labels, features)
preds = apply_tree(model, features)
@test preds isa Vector{Float16}
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 5573 | @testset "random.jl" begin
Random.seed!(5)
n, m = 10^3, 5 ;
features = Array{Any}(undef, n, m);
features[:,:] = randn(n, m);
features[:,1] = round.(Integer, features[:,1]); # convert a column of integers
weights = rand(-2:2,m);
labels = float.(features * weights); # cast to Array{Float64,1}
model = build_stump(labels, features)
@test depth(model) == 1
# over-fitting
min_samples_leaf = 1
max_depth = -1
n_subfeatures = 0
model = build_tree(
labels, features,
n_subfeatures,
max_depth,
min_samples_leaf)
preds = apply_tree(model, features);
@test R2(labels, preds) > 0.99 # R2: coeff of determination
@test typeof(preds) <: Vector{Float64}
### @test length(model) == n # can / should this be enforced ???
# under-fitting
min_samples_leaf = 100
model = build_tree(
labels, round.(Int, features),
n_subfeatures,
max_depth,
min_samples_leaf)
preds = apply_tree(model, round.(Int, features));
@test R2(labels, preds) < 0.8
min_samples_leaf = 5
max_depth = 3
n_subfeatures = 0
model = build_tree(
labels, features,
n_subfeatures,
max_depth,
min_samples_leaf)
@test depth(model) == max_depth
min_samples_leaf = 1
n_subfeatures = 0
max_depth = -1
min_samples_split = 300
model = build_tree(
labels, features,
n_subfeatures,
max_depth,
min_samples_leaf,
min_samples_split)
preds = apply_tree(model, features);
@test R2(labels, preds) < 0.8
n_subfeatures = 0
max_depth = -1
min_samples_leaf = 1
min_samples_split = 2
min_purity_increase = 0.5
model = build_tree(
labels, features,
n_subfeatures,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
preds = apply_tree(model, features);
@test R2(labels, preds) < 0.95
# test RNG param of trees
n_subfeatures = 2
t1 = build_tree(labels, features, n_subfeatures; rng=10)
t2 = build_tree(labels, features, n_subfeatures; rng=10)
t3 = build_tree(labels, features, n_subfeatures; rng=5)
@test (length(t1) == length(t2)) && (depth(t1) == depth(t2))
@test (length(t1) != length(t3)) || (depth(t1) != depth(t3))
mt = Random.MersenneTwister(1)
t1 = build_tree(labels, features, n_subfeatures; rng=mt)
t3 = build_tree(labels, features, n_subfeatures; rng=mt)
@test (length(t1) != length(t3)) || (depth(t1) != depth(t3))
model = build_forest(labels, features)
preds = apply_forest(model, features)
@test R2(labels, preds) > 0.9
@test typeof(preds) <: Vector{Float64}
n_subfeatures = 3
ntrees = 9
partial_sampling = 0.7
max_depth = -1
min_samples_leaf = 5
min_samples_split = 2
min_purity_increase = 0.0
model = build_forest(
labels, features,
n_subfeatures,
ntrees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
preds = apply_forest(model, features)
@test R2(labels, preds) > 0.9
@test length(model) == ntrees
# test n_subfeatures
ntrees = 10
partial_sampling = 1.0
max_depth = -1
min_samples_leaf = 10
n_subfeatures = 1
m_partial = build_forest(
labels, features,
n_subfeatures,
ntrees,
partial_sampling,
max_depth,
min_samples_leaf)
n_subfeatures = 0
m_full = build_forest(
labels, features,
n_subfeatures,
ntrees,
partial_sampling,
max_depth,
min_samples_leaf)
@test mean(depth.(trees(m_full))) < mean(depth.(trees(m_partial)))
# test partial_sampling parameter, train on single sample
partial_sampling = 1 / n
n_subfeatures = 0
ntrees = 1 # single tree test
max_depth = -1
min_samples_leaf = 1
min_samples_split = 2
min_purity_increase = 0.0
partial = build_forest(
labels, features,
n_subfeatures,
ntrees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase)
@test typeof(trees(partial)[1]) <: Leaf
# test RNG parameter
n_subfeatures = 2
ntrees = 5
m1 = build_forest(labels, features,
n_subfeatures,
ntrees;
rng=10)
m2 = build_forest(labels, features,
n_subfeatures,
ntrees;
rng=10)
m3 = build_forest(labels, features,
n_subfeatures,
ntrees;
rng=5)
@test length.(trees(m1)) == length.(trees(m2))
@test depth.(trees(m1)) == depth.(trees(m2))
@test length.(trees(m1)) != length.(trees(m3))
println("\n##### nfoldCV Classification Tree #####")
nfolds = 3
pruning_purity = 1.0
max_depth = 4
r2_1 = nfoldCV_tree(labels, features, nfolds, pruning_purity, max_depth; rng=10, verbose=false)
r2_2 = nfoldCV_tree(labels, features, nfolds, pruning_purity, max_depth; rng=10)
r2_3 = nfoldCV_tree(labels, features, nfolds, pruning_purity, max_depth; rng=5)
@test mean(r2_1) > 0.5
@test r2_1 == r2_2
@test r2_1 != r2_3
println("\n##### nfoldCV Regression Forest #####")
nfolds = 3
n_subfeatures = 2
ntrees = 10
r2_1 = nfoldCV_forest(labels, features, nfolds, n_subfeatures, ntrees; rng=10, verbose=false)
r2_2 = nfoldCV_forest(labels, features, nfolds, n_subfeatures, ntrees; rng=10)
r2_3 = nfoldCV_forest(labels, features, nfolds, n_subfeatures, ntrees; rng=5)
@test mean(r2_1) > 0.8
@test r2_1 == r2_2
@test r2_1 != r2_3
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1787 | using DecisionTree
using BenchmarkTools
using Random
include("benchmark/classification_suite.jl")
include("benchmark/regression_suite.jl")
include("benchmark/utils.jl")
Random.seed!(1)
# Classification Benchmarks
classification_tree = benchmark_classification(build_tree, apply_tree)
println("\n\n############### CLASSIFICATION: BUILD TREE ###############")
print_details(classification_tree["BUILD"])
println("\n\n############### CLASSIFICATION: APPLY TREE ###############")
print_details(classification_tree["APPLY"])
classification_forest = benchmark_classification(build_forest, apply_forest)
println("\n\n############### CLASSIFICATION: BUILD FOREST ###############")
print_details(classification_forest["BUILD"])
println("\n\n############### CLASSIFICATION: APPLY FOREST ###############")
print_details(classification_forest["APPLY"])
classification_adaboost = benchmark_classification(build_adaboost, apply_adaboost)
println("\n\n############### CLASSIFICATION: BUILD ADABOOST ###############")
print_details(classification_adaboost["BUILD"])
println("\n\n############### CLASSIFICATION: APPLY ADABOOST ###############")
print_details(classification_adaboost["APPLY"])
# Regression Benchmarks
regression_tree = benchmark_regression(build_tree, apply_tree)
println("\n\n############### REGRESSION: BUILD TREE ###############")
print_details(regression_tree["BUILD"])
println("\n\n############### REGRESSION: APPLY TREE ###############")
print_details(regression_tree["APPLY"])
regression_forest = benchmark_regression(build_forest, apply_forest)
println("\n\n############### REGRESSION: BUILD FOREST ###############")
print_details(regression_forest["BUILD"])
println("\n\n############### REGRESSION: APPLY FOREST ###############")
print_details(regression_forest["APPLY"])
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1626 | using AbstractTrees: AbstractTrees
using DecisionTree
using DelimitedFiles
using Random
using ScikitLearnBase
using StableRNGs
using Statistics
using Test
using LinearAlgebra
using DecisionTree: accuracy, R2, majority_vote, mean_squared_error
using DecisionTree: confusion_matrix, ConfusionMatrix
println("Julia version: ", VERSION)
similarity(a, b) = first(reshape(a, 1, :) * b / norm(a) / norm(b))
srng() = StableRNGs.StableRNG(123)
function run_tests(list)
for test in list
println("TEST: $test \n")
include(test)
println("="^50)
end
end
classification = [
"classification/random.jl",
"classification/low_precision.jl",
"classification/heterogeneous.jl",
"classification/digits.jl",
"classification/iris.jl",
"classification/adult.jl",
"classification/scikitlearn.jl",
"classification/adding_trees.jl",
]
regression = [
"regression/random.jl",
"regression/low_precision.jl",
"regression/digits.jl",
"regression/scikitlearn.jl",
]
miscellaneous = [
"miscellaneous/convert.jl",
"miscellaneous/abstract_trees_test.jl",
"miscellaneous/feature_importance_test.jl",
"miscellaneous/ensemble_methods.jl",
# "miscellaneous/parallel.jl"
]
test_suites = [
("Classification", classification),
("Regression", regression),
("Miscellaneous", miscellaneous),
]
@testset "Test Suites" begin
for ts in 1:length(test_suites)
name = test_suites[ts][1]
list = test_suites[ts][2]
let
@testset "$name" begin
run_tests(list)
end
end
end
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 3803 | function benchmark_classification(build::Function, apply::Function)
println("\nRunning benchmarks ...")
########## benchmarks suite ##########
suite = BenchmarkGroup()
suite["BUILD"] = BenchmarkGroup()
suite["BUILD"]["DIGITS"] = BenchmarkGroup()
suite["BUILD"]["ADULT"] = BenchmarkGroup()
suite["APPLY"] = BenchmarkGroup()
suite["APPLY"]["DIGITS"] = BenchmarkGroup()
suite["APPLY"]["ADULT"] = BenchmarkGroup()
# using DIGITS dataset
X, Y = load_data("digits")
m, n = size(X)
X_Any = Array{Any}(undef, m, n)
Y_Any = Array{Any}(undef, m)
X_Any[:, :] = X
Y_Any[:] = Y
X_Any::Matrix{Any}
Y_Any::Vector{Any}
model = build(Y_Any, X_Any)
preds = apply(model, X_Any)
suite["BUILD"]["DIGITS"][pad("Y::Any X::Any")] = @benchmarkable $build($Y_Any, $X_Any)
suite["APPLY"]["DIGITS"][pad("Y::Any X::Any")] = @benchmarkable $apply($model, $X_Any)
X_Any::Matrix{Any}
Y = Int64.(Y)::Vector{Int64}
model = build(Y, X_Any)
preds = apply(model, X_Any)
suite["BUILD"]["DIGITS"][pad("Y::Int64 X::Any")] = @benchmarkable $build($Y, $X_Any)
suite["APPLY"]["DIGITS"][pad("Y::Int64 X::Any")] = @benchmarkable $apply($model, $X_Any)
X = Int64.(X)::Matrix{Int64}
Y_Any::Vector{Any}
model = build(Y_Any, X)
preds = apply(model, X)
suite["BUILD"]["DIGITS"][pad("Y::Any X::Int64")] = @benchmarkable $build($Y_Any, $X)
suite["APPLY"]["DIGITS"][pad("Y::Any X::Int64")] = @benchmarkable $apply($model, $X)
Y = Int8.(Y)
X = Float16.(X)
model = build(Y, X)
preds = apply(model, X)
suite["BUILD"]["DIGITS"][pad("Y::Int8 X::Float16")] = @benchmarkable $build($Y, $X)
suite["APPLY"]["DIGITS"][pad("Y::Int8 X::Float16")] = @benchmarkable $apply($model, $X)
Y = Int64.(Y)
X = Float64.(X)
model = build(Y, X)
preds = apply(model, X)
suite["BUILD"]["DIGITS"][pad("Y::Int64 X::Float64")] = @benchmarkable $build($Y, $X)
suite["APPLY"]["DIGITS"][pad("Y::Int64 X::Float64")] = @benchmarkable $apply($model, $X)
Y = string.(Y)::Vector{String}
X = Float64.(X)
model = build(Y, X)
preds = apply(model, X)
suite["BUILD"]["DIGITS"][pad("Y::String X::Float64")] = @benchmarkable $build($Y, $X)
suite["APPLY"]["DIGITS"][pad("Y::String X::Float64")] = @benchmarkable $apply(
$model, $X
)
# using ADULT dataset
X_Any, Y_Any = load_data("adult")
n = round(Int, size(X_Any, 1))
Y_Any = Y_Any[1:n]::Vector{Any}
X_Any = X_Any[1:n, :]::Matrix{Any}
model = build(Y_Any, X_Any)
preds = apply(model, X_Any)
suite["BUILD"]["ADULT"][pad("Y::Any X::Any")] = @benchmarkable $build($Y_Any, $X_Any)
suite["APPLY"]["ADULT"][pad("Y::Any X::Any")] = @benchmarkable $apply($model, $X_Any)
Y = String.(Y_Any)::Vector{String}
X_Any::Matrix{Any}
model = build(Y, X_Any)
preds = apply(model, X_Any)
suite["BUILD"]["ADULT"][pad("Y::String X::Any")] = @benchmarkable $build($Y, $X_Any)
suite["APPLY"]["ADULT"][pad("Y::String X::Any")] = @benchmarkable $apply($model, $X_Any)
Y_Any::Vector{Any}
X = string.(X_Any)::Matrix{String}
model = build(Y_Any, X)
preds = apply(model, X)
suite["BUILD"]["ADULT"][pad("Y::Any X::String")] = @benchmarkable $build($Y_Any, $X)
suite["APPLY"]["ADULT"][pad("Y::Any X::String")] = @benchmarkable $apply($model, $X)
Y = String.(Y)::Vector{String}
X = String.(X)::Matrix{String}
model = build(Y, X)
preds = apply(model, X)
suite["BUILD"]["ADULT"][pad("Y::String X::String")] = @benchmarkable $build($Y, $X)
suite["APPLY"]["ADULT"][pad("Y::String X::String")] = @benchmarkable $apply($model, $X)
########## run suite ##########
tune!(suite)
results = run(suite; verbose=true)
return results
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1852 | function benchmark_regression(build::Function, apply::Function)
println("\nRunning benchmarks ...")
########## benchmarks suite ##########
suite = BenchmarkGroup()
suite["BUILD"] = BenchmarkGroup()
suite["BUILD"]["DIGITS"] = BenchmarkGroup()
suite["APPLY"] = BenchmarkGroup()
suite["APPLY"]["DIGITS"] = BenchmarkGroup()
# using DIGITS dataset
X, Y = load_data("digits")
m, n = size(X)
X_Any = Array{Any}(undef, m, n)
X_Any[:, :] = X
X_Any::Matrix{Any}
Y = Float64.(Y)::Vector{Float64}
model = build(Y, X_Any)
preds = apply(model, X_Any)
suite["BUILD"]["DIGITS"][pad("Y::Float64 X::Any")] = @benchmarkable $build($Y, $X_Any)
suite["APPLY"]["DIGITS"][pad("Y::Float64 X::Any")] = @benchmarkable $apply(
$model, $X_Any
)
X = Int64.(X)::Matrix{Int64}
Y = Float64.(Y)::Vector{Float64}
model = build(Y, X)
preds = apply(model, X)
suite["BUILD"]["DIGITS"][pad("Y::Float64 X::Int64")] = @benchmarkable $build($Y, $X)
suite["APPLY"]["DIGITS"][pad("Y::Float64 X::Int64")] = @benchmarkable $apply($model, $X)
X = Int8.(X)::Matrix{Int8}
Y = Float16.(Y)::Vector{Float16}
model = build(Y, X)
preds = apply(model, X)
suite["BUILD"]["DIGITS"][pad("Y::Float16 X::Int8")] = @benchmarkable $build($Y, $X)
suite["APPLY"]["DIGITS"][pad("Y::Float16 X::Int8")] = @benchmarkable $apply($model, $X)
X = Float64.(X)::Matrix{Float64}
Y = Float64.(Y)::Vector{Float64}
model = build(Y, X)
preds = apply(model, X)
suite["BUILD"]["DIGITS"][pad("Y::Float64 X::Float64")] = @benchmarkable $build($Y, $X)
suite["APPLY"]["DIGITS"][pad("Y::Float64 X::Float64")] = @benchmarkable $apply(
$model, $X
)
########## run suite ##########
tune!(suite)
results = run(suite; verbose=true)
return results
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 705 | function build_adaboost(labels, features)
n_iterations = 10
model, coeffs = build_adaboost_stumps(labels, features, n_iterations)
return model
end
function apply_adaboost(model, features)
n = length(model)
return apply_adaboost_stumps(model, ones(n), features)
end
function pad(s::String, l::Int=21)
t = length(s)
p = max(0, l - t)
return s * " "^p
end
function print_details(results)
k = keys(results)
for i in k
s = "================ " * i * " ================"
println("\n" * s)
display(results[i])
# if typeof(results[i]) <: BenchmarkGroup
# print_details(results[i])
# end
end
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1209 | # determine order of a numerical list, eg, [0.3, 0.1, 0.6, -0.1] -> [3, 2, 4, 1]
function rank(v)
w = sort(collect(enumerate(v)); by=last)
return invperm(first.(w))
end
features, labels = load_data("iris")
features = float.(features)
labels = string.(labels)
classes = unique(labels)
@testset "adding models in an ensemble" begin
n = 40 # n_trees in first step
Δn = 30 # n_trees to be added
one_step_model = build_forest(labels, features, 2, n + Δn; rng=srng())
model1 = build_forest(labels, features, 2, n; rng=srng())
two_step_model = build_forest(model1, labels, features, 2, Δn; rng=srng())
@test length(two_step_model) == n + Δn
# test the models agree on the initial portion of the ensemble:
@test apply_forest_proba(one_step_model[1:n], features, classes) ≈
apply_forest_proba(two_step_model[1:n], features, classes)
# smoke test - predictions are from the classes seen:
@test issubset(unique(apply_forest(two_step_model, features)), classes)
# smoke test - one-step and two-step models predict the same feature rankings:
@test rank(impurity_importance(one_step_model)) ==
rank(impurity_importance(two_step_model))
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2496 | # Classification Test - Adult Data Set
# https://archive.ics.uci.edu/ml/datasets/adult
@testset "adult.jl" begin
features, labels = load_data("adult")
model = build_tree(labels, features; rng=StableRNG(1))
preds = apply_tree(model, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.99
features = string.(features)
labels = string.(labels)
n_subfeatures = 3
n_trees = 5
model = build_forest(labels, features, n_subfeatures, n_trees; rng=StableRNG(1))
preds = apply_forest(model, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.9
f1 = impurity_importance(model)
p1 =
permutation_importance(
model,
labels,
features,
(model, y, X) -> accuracy(y, apply_forest(model, X));
rng=StableRNG(1),
).mean
preds_MT = apply_forest(model, features; use_multithreading=true)
cm_MT = confusion_matrix(labels, preds_MT)
@test cm_MT.accuracy > 0.9
n_iterations = 15
model, coeffs = build_adaboost_stumps(labels, features, n_iterations; rng=StableRNG(1))
preds = apply_adaboost_stumps(model, coeffs, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.8
f2 = impurity_importance(model, coeffs)
p2 =
permutation_importance(
model,
labels,
features,
(model, y, X) -> accuracy(y, apply_forest(model, X));
rng=StableRNG(1),
).mean
@test similarity(p2, p2) > 0.8
@test similarity(f1, f2) < 0.8
println("\n##### 3 foldCV Classification Tree #####")
pruning_purity = 0.9
nfolds = 3
accuracy1 = nfoldCV_tree(
labels, features, nfolds, pruning_purity; rng=StableRNG(1), verbose=false
)
@test mean(accuracy1) > 0.8
println("\n##### 3 foldCV Classification Forest #####")
n_subfeatures = 2
n_trees = 10
n_folds = 3
partial_sampling = 0.5
accuracy1 = nfoldCV_forest(
labels,
features,
n_folds,
n_subfeatures,
n_trees,
partial_sampling;
rng=StableRNG(1),
verbose=false,
)
@test mean(accuracy1) > 0.8
println("\n##### nfoldCV Classification Adaboosted Stumps #####")
n_iterations = 15
n_folds = 3
accuracy1 = nfoldCV_stumps(
labels, features, n_folds, n_iterations; rng=StableRNG(1), verbose=false
)
@test mean(accuracy1) > 0.8
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2869 | @testset "digits.jl" begin
X, Y = load_data("digits")
t = DecisionTree.build_tree(Y, X)
@test length(t) == 148
@test sum(apply_tree(t, X) .== Y) == length(Y)
n_subfeatures = 0
max_depth = 6
min_samples_leaf = 5
t = DecisionTree.build_tree(Y, X, n_subfeatures, max_depth)
@test length(t) == 57
t = DecisionTree.build_tree(Y, X, n_subfeatures, max_depth, min_samples_leaf)
@test length(t) == 50
min_samples_leaf = 3
min_samples_split = 5
min_purity_increase = 0.05
t = DecisionTree.build_tree(
Y, X, n_subfeatures, max_depth, min_samples_leaf, min_samples_split
)
@test length(t) == 55
t = DecisionTree.build_tree(
Y,
X,
n_subfeatures,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng=StableRNG(1),
)
@test length(t) == 54
i1 = impurity_importance(t; normalize=true)
s1 = split_importance(t)
p1 =
permutation_importance(
t, Y, X, (model, y, X) -> accuracy(y, apply_tree(model, X)); rng=StableRNG(1)
).mean
# test that all purity decisions are based on passed-in purity function;
# if so, this should be same as previous test
entropy1000(ns, n) = DecisionTree.util.entropy(ns, n) * 1000
min_samples_leaf = 3
min_samples_split = 5
min_purity_increase = 0.05 * 1000
t = DecisionTree.build_tree(
Y,
X,
n_subfeatures,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
loss=entropy1000,
rng=StableRNG(1),
)
@test length(t) == 54
i2 = impurity_importance(t; normalize=true)
s2 = split_importance(t)
p2 =
permutation_importance(
t, Y, X, (model, y, X) -> accuracy(y, apply_tree(model, X)); rng=StableRNG(1)
).mean
@test isapprox(i2, i1)
@test s1 == s2
@test similarity(p2, p1) > 0.9
n_subfeatures = 3
n_trees = 10
partial_sampling = 0.7
max_depth = -1
min_samples_leaf = 1
min_samples_split = 2
min_purity_increase = 0.0
model = DecisionTree.build_forest(
Y,
X,
n_subfeatures,
n_trees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng=StableRNG(1),
)
preds = apply_forest(model, X)
cm = confusion_matrix(Y, preds)
@test cm.accuracy > 0.95
preds_MT = apply_forest(model, X; use_multithreading=true)
cm_MT = confusion_matrix(Y, preds_MT)
@test cm_MT.accuracy > 0.95
n_iterations = 100
model, coeffs = DecisionTree.build_adaboost_stumps(Y, X, n_iterations)
preds = apply_adaboost_stumps(model, coeffs, X)
cm = confusion_matrix(Y, preds)
@test cm.accuracy > 0.8
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1319 | ### Classification - Heterogeneously typed features (ints, floats, bools, strings)
@testset "heterogeneous.jl" begin
m, n = 10^2, 5
tf = [trues(Int(m / 2)) falses(Int(m / 2))]
inds = Random.randperm(StableRNG(1), m)
labels = string.(tf[inds])
features = Array{Any}(undef, m, n)
features[:, :] = randn(StableRNG(1), m, n)
features[:, 2] = string.(tf[Random.randperm(StableRNG(1), m)])
features[:, 3] = map(t -> round.(Int, t), features[:, 3])
features[:, 4] = tf[inds]
model = build_tree(labels, features; rng=StableRNG(1))
preds = apply_tree(model, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.9
n_subfeatures = 2
n_trees = 3
model = build_forest(labels, features, n_subfeatures, n_trees; rng=StableRNG(1))
preds = apply_forest(model, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.9
preds_MT = apply_forest(model, features; use_multithreading=true)
cm_MT = confusion_matrix(labels, preds_MT)
@test cm_MT.accuracy > 0.9
n_subfeatures = 7
model, coeffs = build_adaboost_stumps(labels, features, n_subfeatures; rng=StableRNG(1))
preds = apply_adaboost_stumps(model, coeffs, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.9
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 4136 | # Classification Test - Iris Data Set
# https://archive.ics.uci.edu/ml/datasets/iris
@testset "iris.jl" begin
features, labels = load_data("iris")
labels = String.(labels)
classes = sort(unique(labels))
n = length(labels)
# train a decision stump (depth=1)
model = build_stump(labels, features)
preds = apply_tree(model, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.6
@test depth(model) == 1
probs = apply_tree_proba(model, features, classes)
@test reshape(sum(probs; dims=2), n) ≈ ones(n)
# train full-tree classifier (over-fit)
model = build_tree(labels, features)
preds = apply_tree(model, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy == 1.0
@test length(model) == 9
@test depth(model) == 5
@test typeof(preds) == Vector{String}
print_tree(model)
probs = apply_tree_proba(model, features, classes)
@test reshape(sum(probs; dims=2), n) ≈ ones(n)
i1 = impurity_importance(model)
s1 = split_importance(model)
# prune tree to 8 leaves
pruning_purity = 0.9
pt = prune_tree(model, pruning_purity)
@test length(pt) == 8
preds = apply_tree(pt, features)
cm = confusion_matrix(labels, preds)
@test 0.99 < cm.accuracy < 1.0
i2 = impurity_importance(pt)
s2 = split_importance(pt)
@test isapprox(i2, i1 .+ [0, 0, 0, (47 * log(47 / 48) + log(1 / 48)) / 150])
@test s1 == s2 .+ [0, 0, 0, 1]
# prune tree to 3 leaves
pruning_purity = 0.6
pt = prune_tree(model, pruning_purity)
@test length(pt) == 3
preds = apply_tree(pt, features)
cm = confusion_matrix(labels, preds)
@test 0.95 < cm.accuracy < 1.0
probs = apply_tree_proba(model, features, classes)
@test reshape(sum(probs; dims=2), n) ≈ ones(n)
# prune tree to a stump, 2 leaves
pruning_purity = 0.5
pt = prune_tree(model, pruning_purity)
@test length(pt) == 2
preds = apply_tree(pt, features)
cm = confusion_matrix(labels, preds)
@test 0.66 < cm.accuracy < 1.0
# run n-fold cross validation for pruned tree
println("\n##### nfoldCV Classification Tree #####")
nfolds = 3
accuracy = nfoldCV_tree(labels, features, nfolds; rng=StableRNG(1))
@test mean(accuracy) > 0.8
# train random forest classifier
n_trees = 10
n_subfeatures = 2
partial_sampling = 0.5
model = build_forest(
labels, features, n_subfeatures, n_trees, partial_sampling; rng=StableRNG(2)
)
preds = apply_forest(model, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.95
@test typeof(preds) == Vector{String}
probs = apply_forest_proba(model, features, classes)
@test reshape(sum(probs; dims=2), n) ≈ ones(n)
preds_MT = apply_forest(model, features; use_multithreading=true)
cm_MT = confusion_matrix(labels, preds_MT)
@test cm_MT.accuracy > 0.95
@test typeof(preds_MT) == Vector{String}
@test sum(preds .!= preds_MT) == 0
# run n-fold cross validation for forests
println("\n##### nfoldCV Classification Forest #####")
n_subfeatures = 2
n_trees = 10
n_folds = 3
partial_sampling = 0.5
accuracy = nfoldCV_forest(
labels, features, nfolds, n_subfeatures, n_trees, partial_sampling; rng=StableRNG(1)
)
@test mean(accuracy) > 0.9
# train adaptive-boosted decision stumps
n_iterations = 15
model, coeffs = build_adaboost_stumps(labels, features, n_iterations; rng=StableRNG(1))
preds = apply_adaboost_stumps(model, coeffs, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.9
@test typeof(preds) == Vector{String}
probs = apply_adaboost_stumps_proba(model, coeffs, features, classes)
@test reshape(sum(probs; dims=2), n) ≈ ones(n)
# run n-fold cross validation for boosted stumps, using 7 iterations and 3 folds
println("\n##### nfoldCV Classification Adaboosted Stumps #####")
n_iterations = 15
nfolds = 3
accuracy = nfoldCV_stumps(labels, features, nfolds, n_iterations; rng=StableRNG(1))
@test mean(accuracy) > 0.85
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 4156 |
@testset "low_precision.jl" begin
Random.seed!(16)
n, m = 10^3, 5
features = Array{Any}(undef, n, m)
features[:, :] = rand(StableRNG(1), n, m)
features[:, 1] = round.(Int32, features[:, 1]) # convert a column of 32bit integers
weights = rand(StableRNG(1), -1:1, m)
labels = round.(Int32, features * weights)
model = build_stump(labels, features)
preds = apply_tree(model, features)
@test typeof(preds) == Vector{Int32}
@test depth(model) == 1
n_subfeatures = Int32(0)
max_depth = Int32(-1)
min_samples_leaf = Int32(1)
min_samples_split = Int32(2)
min_purity_increase = 0.0
model = build_tree(
labels,
features,
n_subfeatures,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng=StableRNG(1),
)
preds = apply_tree(model, features)
cm = confusion_matrix(labels, preds)
@test typeof(preds) == Vector{Int32}
@test cm.accuracy > 0.9
n_subfeatures = Int32(0)
n_trees = Int32(10)
partial_sampling = 0.7
max_depth = Int32(-1)
model = build_forest(
labels,
features,
n_subfeatures,
n_trees,
partial_sampling,
max_depth;
rng=StableRNG(1),
)
preds = apply_forest(model, features)
cm = confusion_matrix(labels, preds)
@test typeof(preds) == Vector{Int32}
@test cm.accuracy > 0.9
preds_MT = apply_forest(model, features; use_multithreading=true)
cm_MT = confusion_matrix(labels, preds_MT)
@test typeof(preds_MT) == Vector{Int32}
@test cm_MT.accuracy > 0.9
n_iterations = Int32(25)
model, coeffs = build_adaboost_stumps(labels, features, n_iterations; rng=StableRNG(1))
preds = apply_adaboost_stumps(model, coeffs, features)
cm = confusion_matrix(labels, preds)
@test typeof(preds) == Vector{Int32}
@test cm.accuracy > 0.6
println("\n##### nfoldCV Classification Tree #####")
n_folds = Int32(3)
pruning_purity = 1.0
max_depth = Int32(-1)
min_samples_leaf = Int32(1)
min_samples_split = Int32(2)
min_purity_increase = 0.0
accuracy1 = nfoldCV_tree(
labels,
features,
n_folds,
pruning_purity,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng=StableRNG(1),
)
@test mean(accuracy1) > 0.7
println("\n##### nfoldCV Classification Forest #####")
n_trees = Int32(10)
n_subfeatures = Int32(2)
n_folds = Int32(3)
max_depth = Int32(-1)
min_samples_leaf = Int32(5)
min_samples_split = Int32(2)
min_purity_increase = 0.0
accuracy1 = nfoldCV_forest(
labels,
features,
n_folds,
n_subfeatures,
n_trees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng=StableRNG(1),
)
@test mean(accuracy1) > 0.7
println("\n##### nfoldCV Adaboosted Stumps #####")
n_iterations = Int32(25)
accuracy1 = nfoldCV_stumps(labels, features, n_folds, n_iterations; rng=StableRNG(1))
@test mean(accuracy1) > 0.6
# Test Int8 labels, and Float16 features
features = Float16.(features)
labels = Int8.(labels)
model = build_stump(labels, features)
preds = apply_tree(model, features)
@test typeof(preds) == Vector{Int8}
model = build_tree(labels, features)
preds = apply_tree(model, features)
@test typeof(preds) == Vector{Int8}
model = build_forest(labels, features)
preds = apply_forest(model, features)
@test typeof(preds) == Vector{Int8}
preds_MT = apply_forest(model, features; use_multithreading=true)
@test typeof(preds_MT) == Vector{Int8}
@test sum(abs.(preds .- preds_MT)) == zero(Int8)
model = build_tree(labels, features)
preds = apply_tree(model, features)
@test typeof(preds) == Vector{Int8}
model, coeffs = build_adaboost_stumps(labels, features, n_iterations)
preds = apply_adaboost_stumps(model, coeffs, features)
@test typeof(preds) == Vector{Int8}
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 6012 |
@testset "random.jl" begin
Random.seed!(16)
n, m = 10^3, 5
features = rand(StableRNG(1), n, m)
weights = rand(StableRNG(1), -1:1, m)
labels = round.(Int, features * weights)
model = build_stump(labels, round.(Int, features))
preds = apply_tree(model, round.(Int, features))
@test depth(model) == 1
max_depth = 3
model = build_tree(labels, features, 0, max_depth)
@test depth(model) == max_depth
io = IOBuffer()
print_tree(io, model, 3)
text = String(take!(io))
println()
print(text)
println()
# Read the regex as: many not arrow left followed by an arrow left, a space, some numbers and
# a dot and a space and question mark.
rx = r"[^<]*< [0-9\.]* ?"
matches = eachmatch(rx, text)
@test !isempty(matches)
model = build_tree(labels, features; rng=StableRNG(1))
preds = apply_tree(model, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.9
@test typeof(preds) == Vector{Int}
# test RNG param of trees
n_subfeatures = 2
t1 = build_tree(labels, features, n_subfeatures; rng=10)
t2 = build_tree(labels, features, n_subfeatures; rng=10)
t3 = build_tree(labels, features, n_subfeatures; rng=5)
@test (length(t1) == length(t2)) && (depth(t1) == depth(t2))
@test (length(t1) != length(t3)) || (depth(t1) != depth(t3))
mt = Random.MersenneTwister(1)
t1 = build_tree(labels, features, n_subfeatures; rng=mt)
t3 = build_tree(labels, features, n_subfeatures; rng=mt)
@test (length(t1) != length(t3)) || (depth(t1) != depth(t3))
model = build_forest(labels, features; rng=StableRNG(1))
preds = apply_forest(model, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.9
@test typeof(preds) == Vector{Int}
preds_MT = apply_forest(model, features; use_multithreading=true)
cm_MT = confusion_matrix(labels, preds_MT)
@test cm_MT.accuracy > 0.9
@test typeof(preds_MT) == Vector{Int}
@test sum(abs.(preds .- preds_MT)) == zero(Int)
n_subfeatures = 3
n_trees = 9
partial_sampling = 0.7
max_depth = -1
min_samples_leaf = 5
min_samples_split = 2
min_purity_increase = 0.0
model = build_forest(
labels,
features,
n_subfeatures,
n_trees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng=StableRNG(1),
)
preds = apply_forest(model, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.6
@test length(model) == n_trees
preds_MT = apply_forest(model, features; use_multithreading=true)
cm_MT = confusion_matrix(labels, preds_MT)
@test cm_MT.accuracy > 0.9
# test n_subfeatures
n_subfeatures = 0
m_partial = build_forest(labels, features; rng=StableRNG(1)) # default sqrt(n_features)
m_full = build_forest(labels, features, n_subfeatures; rng=StableRNG(1))
@test all(length.(m_full.trees) .< length.(m_partial.trees))
# test partial_sampling parameter, train on single sample
partial_sampling = 1 / n
n_subfeatures = 0
n_trees = 1 # single tree test
max_depth = -1
min_samples_leaf = 1
min_samples_split = 2
min_purity_increase = 0.0
partial = build_forest(
labels,
features,
n_subfeatures,
n_trees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase,
)
@test typeof(partial.trees[1]) <: Leaf
# test RNG parameter for forests
n_subfeatures = 2
n_trees = 5
m1 = build_forest(labels, features, n_subfeatures, n_trees; rng=10)
m2 = build_forest(labels, features, n_subfeatures, n_trees; rng=10)
m3 = build_forest(labels, features, n_subfeatures, n_trees; rng=5)
@test length.(m1.trees) == length.(m2.trees)
@test depth.(m1.trees) == depth.(m2.trees)
@test length.(m1.trees) != length.(m3.trees)
n_iterations = 25
model, coeffs = build_adaboost_stumps(labels, features, n_iterations; rng=StableRNG(1))
preds = apply_adaboost_stumps(model, coeffs, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.6
@test typeof(preds) == Vector{Int}
@test length(model) == n_iterations
"""
RNGs can look like they produce stable results, but do in fact differ when you run it many times.
In some RNGs the problem already shows up when doing two runs and comparing those.
This loop tests multiple RNGs to have a higher chance of spotting a problem.
See https://github.com/JuliaAI/DecisionTree.jl/pull/174 for more information.
"""
function test_rng(f::Function, args, expected_accuracy)
println("Testing $f")
accuracy = f(args...; rng=StableRNG(10), verbose=false)
accuracy2 = f(args...; rng=StableRNG(5), verbose=false)
@test accuracy != accuracy2
for i in 10:14
accuracy = f(args...; rng=StableRNG(i), verbose=false)
accuracy2 = f(args...; rng=StableRNG(i), verbose=false)
@test mean(accuracy) > expected_accuracy
@test accuracy == accuracy2
end
end
println("\n##### nfoldCV Classification Tree #####")
nfolds = 3
pruning_purity = 1.0
max_depth = 5
args = [labels, features, nfolds, pruning_purity, max_depth]
test_rng(nfoldCV_tree, args, 0.7)
println("\n##### nfoldCV Classification Forest #####")
nfolds = 3
n_subfeatures = 2
n_trees = 10
args = [labels, features, nfolds, n_subfeatures, n_trees]
test_rng(nfoldCV_forest, args, 0.7)
# This is a smoke test to verify that the multi-threaded code doesn't crash.
nfoldCV_forest(args...; rng=MersenneTwister(1))
println("\n##### nfoldCV Adaboosted Stumps #####")
n_iterations = 25
n_folds = 3
args = [labels, features, n_folds, n_iterations]
test_rng(nfoldCV_stumps, args, 0.6)
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 3034 | @testset "scikitlearn.jl" begin
Random.seed!(2)
n, m = 10^3, 5
features = rand(StableRNG(1), n, m)
weights = rand(StableRNG(1), -1:1, m)
labels = round.(Int, features * weights)
# I wish we could use ScikitLearn.jl's cross-validation, but that'd require
# installing it on Travis
model = fit!(
DecisionTreeClassifier(; rng=StableRNG(1), pruning_purity_threshold=0.9),
features,
labels,
)
@test mean(predict(model, features) .== labels) > 0.8
@test impurity_importance(model) == impurity_importance(model.root)
@test split_importance(model) == split_importance(model.root)
@test isapprox(
permutation_importance(model, features, labels; rng=StableRNG(1)).mean,
permutation_importance(
model.root,
labels,
features,
(model, y, X) -> accuracy(y, apply_tree(model, X));
rng=StableRNG(1),
).mean,
)
model = fit!(RandomForestClassifier(; rng=StableRNG(1)), features, labels)
@test mean(predict(model, features) .== labels) > 0.8
@test impurity_importance(model) == impurity_importance(model.ensemble)
@test split_importance(model) == split_importance(model.ensemble)
@test isapprox(
permutation_importance(model, features, labels; rng=StableRNG(1)).mean,
permutation_importance(
model.ensemble,
labels,
features,
(model, y, X) -> accuracy(y, apply_forest(model, X));
rng=StableRNG(1),
).mean,
)
model = fit!(AdaBoostStumpClassifier(; rng=StableRNG(1)), features, labels)
# Adaboost isn't so hot on this task, disabled for now
mean(predict(model, features) .== labels)
@test impurity_importance(model) == impurity_importance(model.ensemble, model.coeffs)
@test split_importance(model) == split_importance(model.ensemble, model.coeffs)
@test isapprox(
permutation_importance(model, features, labels; rng=StableRNG(1)).mean,
permutation_importance(
(model.ensemble, model.coeffs),
labels,
features,
(model, y, X) -> accuracy(y, apply_adaboost_stumps(model, X));
rng=StableRNG(1),
).mean,
)
N = 3000
X = randn(StableRNG(1), N, 10)
# TODO: we should probably support fit!(::DecisionTreeClassifier, ::BitArray)
y = convert(Vector{Bool}, randn(N) .< 0)
max_depth = 5
model = fit!(DecisionTreeClassifier(; rng=StableRNG(1), max_depth), X, y)
@test depth(model) == max_depth
## Test that the RNG arguments work as expected
Random.seed!(2)
X = randn(StableRNG(1), 100, 10)
y = rand(StableRNG(1), Bool, 100)
@test predict_proba(fit!(RandomForestClassifier(; rng=10), X, y), X) ==
predict_proba(fit!(RandomForestClassifier(; rng=10), X, y), X)
@test predict_proba(fit!(RandomForestClassifier(; rng=10), X, y), X) !=
predict_proba(fit!(RandomForestClassifier(; rng=12), X, y), X)
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 4308 | # Test `AbstractTrees`-interface
@testset "abstract_trees_test.jl" begin
# CAVEAT: These tests rely heavily on the texts generated in `printnode`.
# After changes in `printnode` the following `*pattern`s might be adapted.
### Some content-checking helpers
# if no feature names or class labels are given, the following keywords must be present
featid_pattern = "Feature: " # feature ids are prepended by this text
classid_pattern = "Class: " # `Leaf.majority` is prepended by this text
# if feature names and class labels are given, they can be identified within the tree using these patterns
fname_pattern(fname) = fname * " <" # feature names are followed by " <"
clabel_pattern(clabel) = "─ " * clabel * " (" # class labels are embedded in "─ " and " ("
# occur all elements of `pool` in the form defined by `fname_/clabel_pattern` in `str_tree`?
function check_occurence(str_tree, pool, pattern)
count(map(elem -> occursin(pattern(elem), str_tree), pool)) == length(pool)
end
@info("Test base functionality")
l1 = Leaf(1, [1, 1, 2])
l2 = Leaf(2, [1, 2, 2])
l3 = Leaf(3, [3, 3, 1])
n2 = Node(2, 0.5, l2, l3)
n1 = Node(1, 0.7, l1, n2)
feature_names = ["firstFt", "secondFt"]
class_labels = ["a", "b", "c"]
infotree1 = wrap(n1, (featurenames=feature_names, classlabels=class_labels))
infotree2 = wrap(n1, (featurenames=feature_names,))
infotree3 = wrap(n1, (classlabels=class_labels,))
infotree4 = wrap(n1, (x=feature_names, y=class_labels))
infotree5 = wrap(n1)
@info(" -- Tree with feature names and class labels")
AbstractTrees.print_tree(infotree1)
rep1 = AbstractTrees.repr_tree(infotree1)
@test check_occurence(rep1, feature_names, fname_pattern)
@test check_occurence(rep1, class_labels, clabel_pattern)
@info(" -- Tree with feature names")
AbstractTrees.print_tree(infotree2)
rep2 = AbstractTrees.repr_tree(infotree2)
@test check_occurence(rep2, feature_names, fname_pattern)
@test occursin(classid_pattern, rep2)
@info(" -- Tree with class labels")
AbstractTrees.print_tree(infotree3)
rep3 = AbstractTrees.repr_tree(infotree3)
@test occursin(featid_pattern, rep3)
@test check_occurence(rep3, class_labels, clabel_pattern)
@info(" -- Tree with ids only (nonsense parameters)")
AbstractTrees.print_tree(infotree4)
rep4 = AbstractTrees.repr_tree(infotree4)
@test occursin(featid_pattern, rep4)
@test occursin(classid_pattern, rep4)
@info(" -- Tree with ids only")
AbstractTrees.print_tree(infotree5)
rep5 = AbstractTrees.repr_tree(infotree5)
@test occursin(featid_pattern, rep5)
@test occursin(classid_pattern, rep5)
@info("Test `children` with 'adult' decision tree")
@info(" -- Preparing test data")
features, labels = load_data("adult")
feature_names_adult = [
"age",
"workclass",
"fnlwgt",
"education",
"education-num",
"marital-status",
"occupation",
"relationship",
"race",
"sex",
"capital-gain",
"capital-loss",
"hours-per-week",
"native-country",
]
model = build_tree(labels, features)
wrapped_tree = wrap(model, (featurenames=feature_names_adult,))
@info(" -- Test `children`")
function traverse_tree(node::InfoNode)
l, r = AbstractTrees.children(node)
@test l.info == node.info
@test r.info == node.info
traverse_tree(l)
traverse_tree(r)
end
traverse_tree(leaf::InfoLeaf) = nothing
traverse_tree(wrapped_tree)
end
@testset "abstract_trees - test misuse" begin
@info("Test misuse of `classlabel` information")
@info("Create test data - a decision tree based on the iris data set")
features, labels = load_data("iris")
features = float.(features)
labels = string.(labels)
model = DecisionTreeClassifier()
fit!(model, features, labels)
@info("Try to replace the exisitng class labels")
class_labels = unique(labels)
dtree = model.root.node
wt = DecisionTree.wrap(dtree, (classlabels=class_labels,))
@test_throws AssertionError AbstractTrees.print_tree(wt)
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1140 | # Test conversion of Leaf to Node
@testset "convert.jl" begin
lf = Leaf(1, [1])
nv = Node{Int,Int}[]
rv = Root{Int,Int}[]
push!(nv, lf)
push!(rv, lf)
push!(rv, nv[1])
@test apply_tree(nv[1], [0]) == 1
@test apply_tree(rv[1], [0]) == 1
@test apply_tree(rv[2], [0]) == 1
lf = Leaf(1.0, [0.0, 1.0])
nv = Node{Int,Float64}[]
rv = Root{Int,Float64}[]
push!(nv, lf)
push!(rv, lf)
push!(rv, nv[1])
@test apply_tree(nv[1], [0]) == 1.0
@test apply_tree(rv[1], [0]) == 1.0
@test apply_tree(rv[2], [0]) == 1.0
lf = Leaf("A", ["B", "A"])
nv = Node{Int,String}[]
rv = Root{Int,String}[]
push!(nv, lf)
push!(rv, lf)
push!(rv, nv[1])
@test apply_tree(nv[1], [0]) == "A"
@test apply_tree(rv[1], [0]) == "A"
@test apply_tree(rv[2], [0]) == "A"
end
@testset "convert to text" begin
n, m = 10^3, 5
features = rand(StableRNG(1), n, m)
weights = rand(StableRNG(1), -1:1, m)
labels = features * weights
model = fit!(DecisionTreeRegressor(; rng=StableRNG(1)), features, labels)
# Smoke test.
print_tree(devnull, model)
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1448 | @testset "methods for `Ensemble` type" begin
features, labels = load_data("iris")
# combining identical ensembles:
ensemble1 = build_forest(labels, features, 2, 7)
@test DecisionTree.has_impurity_importance(ensemble1)
@test ensemble1[1:2].trees == ensemble1.trees[1:2]
@test length(ensemble1) == 7
@test DecisionTree.n_features(ensemble1) == 4
ensemble = vcat(ensemble1, ensemble1)
@test ensemble.featim ≈ ensemble1.featim
# combining heterogeneous ensembles:
ensemble2 = build_forest(labels, features, 2, 10)
ensemble = vcat(ensemble1, ensemble2)
n1 = length(ensemble1.trees)
n2 = length(ensemble2.trees)
n = n1 + n2
@test n * ensemble.featim ≈ n1 * ensemble1.featim + n2 * ensemble2.featim
# including an ensemble without impurity importance should drop impurity importance from
# the combination:
ensemble3 = build_forest(labels, features, 2, 4; impurity_importance=false)
@test !DecisionTree.has_impurity_importance(ensemble3)
@test vcat(ensemble1, ensemble3).featim == Float64[]
@test vcat(ensemble3, ensemble1).featim == Float64[]
@test vcat(ensemble3, ensemble3).featim == Float64[]
# changing the number of features:
ensemble4 = build_forest(labels, features[:, 1:3], 2, 4)
@test_logs vcat(ensemble3, ensemble4) # ensemble 3 doesn't support importances
@test_throws DecisionTree.ERR_ENSEMBLE_VCAT vcat(ensemble1, ensemble4)
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 8585 | @testset "feature_importance_test.jl" begin
X = [
-3 2 2
-2 -2 -3
-2 0 2
-1 -3 1
-1 -1 0
2 -3 1
1 -2 0
3 -2 3
1 -1 2
2 -1 -2
2 0 1
5 0 0
1 0 2
3 1 -1
1 4 -3
5 5 -2
6 3 -1
4 2 0
4 3 2
5 2 4
]
y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1]
# classifier
model = build_tree(y, X)
entropy1 = -(10 * log(10 / 20) + 10 * log(10 / 20)) / 20
entropy2 = -(5 * log(5 / 15) + 10 * log(10 / 15)) / 20
entropy3 = -(5 * log(5 / 7) + 2 * log(2 / 7)) / 20
@test isapprox(
impurity_importance(model), [entropy1 - entropy2, entropy2 - entropy3, entropy3]
)
@test split_importance(model) == [1, 1, 1]
# prune_tree
pt = prune_tree(model, 0.7)
@test isapprox(impurity_importance(pt), [entropy1 - entropy2, entropy2 - entropy3, 0])
@test split_importance(pt) == [1, 1, 0]
pt = prune_tree(model, 0.6)
@test isapprox(impurity_importance(pt), [entropy1 - entropy2, 0, 0])
@test split_importance(pt) == [1, 0, 0]
# regressor
model = build_tree(float.(y), X, 0, -1, 2)
mse1 = ((1^2 * 10 + 0^2 * 10) / 20 - ((1 * 10 + 0 * 10) / 20)^2)
mse2 = ((1^2 * 10 + 0^2 * 5) / 15 - ((1 * 10 + 0 * 5) / 15)^2) * 15 / 20
mse3 = ((1^2 * 2 + 0^2 * 5) / 7 - ((1 * 2 + 0 * 5) / 7)^2) * 7 / 20
@test isapprox(impurity_importance(model), [mse1 - mse2, mse2 - mse3, mse3])
@test split_importance(model) == [1, 1, 1]
# prune_tree
pt = prune_tree(model, 0.7)
@test isapprox(impurity_importance(pt), [mse1 - mse2, mse2 - mse3, 0])
@test split_importance(pt) == [1, 1, 0]
pt = prune_tree(model, 0.6)
@test isapprox(impurity_importance(pt), [mse1 - mse2, 0, 0])
@test split_importance(pt) == [1, 0, 0]
# Increase samples for testing permutation_importance and ensemble models
X2 = repeat(X; inner=(50, 1)) .+ rand(StableRNG(1), 1000, 3)
y2 = repeat(y; inner=50)
# classifier
model = build_tree(y2, X2; rng=StableRNG(1))
p1 = permutation_importance(
model,
y2,
X2,
(model, y, X) -> accuracy(y, apply_tree(model, X)),
10;
rng=StableRNG(1),
)
@test similarity(
impurity_importance(model), [entropy1 - entropy2, entropy2 - entropy3, entropy3]
) > 0.9
@test similarity(split_importance(model), [1, 1, 1]) > 0.9
@test argmax(p1.mean) == 1
@test argmin(p1.mean) == 3
model = build_forest(y2, X2, -1, 100; rng=StableRNG(1))
i1 = impurity_importance(model)
s1 = split_importance(model)
p1 = permutation_importance(
model,
y2,
X2,
(model, y, X) -> accuracy(y, apply_forest(model, X)),
10;
rng=StableRNG(1),
)
model = build_forest(y2, X2, -1, 100; rng=StableRNG(100))
i2 = impurity_importance(model)
s2 = split_importance(model)
p2 = permutation_importance(
model,
y2,
X2,
(model, y, X) -> accuracy(y, apply_forest(model, X)),
10;
rng=StableRNG(100),
)
@test argmin(p1.mean) == argmin(p2.mean)
@test (-(sort(p1.mean; rev=true)[1:2]...) - -(sort(p2.mean; rev=true)[1:2]...)) < 0.2
@test similarity(i1, i2) > 0.9
@test similarity(s1, s2) > 0.9
model, coeffs = build_adaboost_stumps(y2, X2, 20; rng=StableRNG(1))
s1 = split_importance(model)
p1 = permutation_importance(
(model, coeffs),
y2,
X2,
(model, y, X) -> accuracy(y, apply_adaboost_stumps(model, X)),
10;
rng=StableRNG(1),
)
model, coeffs = build_adaboost_stumps(y2, X2, 20; rng=StableRNG(100))
s2 = split_importance(model)
p2 = permutation_importance(
(model, coeffs),
y2,
X2,
(model, y, X) -> accuracy(y, apply_adaboost_stumps(model, X)),
10;
rng=StableRNG(100),
)
@test argmin(p1.mean) == argmin(p2.mean)
@test (-(sort(p1.mean; rev=true)[1:2]...) - -(sort(p2.mean; rev=true)[1:2]...)) < 0.1
@test similarity(s1, s2) > 0.9
# regressor
y2 = y2 .+ rand(StableRNG(1), 1000) ./ 100
model = build_tree(y2, X2, 0, 3, 5, 2, 0.01; rng=StableRNG(1))
p1 = permutation_importance(
model, y2, X2, (model, y, X) -> R2(y, apply_tree(model, X)), 10; rng=StableRNG(1)
)
@test similarity(impurity_importance(model), [mse1 - mse2, mse2 - mse3, mse3]) > 0.9
@test similarity(split_importance(model), [1, 1, 1]) > 0.9
@test argmax(p1.mean) == 1
@test argmin(p1.mean) == 3
model = build_forest(y2, X2, 0, 100, 0.7, 3, 5, 2, 0.01; rng=StableRNG(1))
i1 = impurity_importance(model)
s1 = split_importance(model)
p1 = permutation_importance(
model, y2, X2, (model, y, X) -> R2(y, apply_forest(model, X)), 10; rng=StableRNG(1)
)
model = build_forest(y2, X2, 0, 100, 0.7, 3, 5, 2, 0.01; rng=StableRNG(100))
i2 = impurity_importance(model)
s2 = split_importance(model)
p2 = permutation_importance(
model,
y2,
X2,
(model, y, X) -> R2(y, apply_forest(model, X)),
10;
rng=StableRNG(100),
)
@test argmax(p1.mean) == argmax(p2.mean)
@test argmin(p1.mean) == argmin(p2.mean)
@test similarity(i1, i2) > 0.9
@test similarity(s1, s2) > 0.9
# Common datasets
X, y = load_data("digits")
# classifier
model = build_tree(y, X, 0, 3, 1, 2; rng=StableRNG(1))
# sklearn equivalent:
# model = DecisionTreeClassifier(max_depth = 3, criterion = 'entropy', random_state = 1)
# model.fit(X, y)
@test isapprox(
filter(x -> >(x, 0), impurity_importance(model; normalize=true)),
[0.11896482, 0.15168659, 0.17920925, 0.29679316, 0.11104555, 0.14230064],
atol=0.0000005,
)
# regressor
model = build_tree(float.(y), X, 0, 3, 1, 2; rng=StableRNG(1))
# sklearn equivalent:
# model = DecisionTreeRegressor(max_depth = 3, random_state = 1)
# model.fit(X, y)
@test isapprox(
filter(x -> >(x, 0), impurity_importance(model; normalize=true)),
[0.1983883, 0.02315617, 0.09821539, 0.06591425, 0.19884457, 0.14939765, 0.26608367],
atol=0.0000005,
)
X, y = load_data("iris")
y = String.(y)
# classifier
model = build_forest(y, X, -1, 100, 0.7, 2; rng=StableRNG(100))
# sklearn:
# model = RandomForestClassifier(n_estimators = 100, max_depth = 2, random_state = 100, criterion = 'entropy')
# model.fit(X, y)
f1 = impurity_importance(model)
@test sum(f1[1:2]) < 0.25 # About 0.1% will fail among different rng
@test abs(f1[4] - f1[3]) < 0.35 # About 1% will fail among different rng
model, coeffs = build_adaboost_stumps(y, X, 10; rng=StableRNG(1))
# sklearn:
# model = AdaBoostClassifier(base_estimator = DecisionTreeClassifier(max_depth = 1, criterion = 'entropy'),
# algorithm = 'SAMME', n_estimators = 10, random_state = 1)
# model.fit(X, y)
f1 = impurity_importance(model, coeffs)
@test sum(f1[1:2]) < 0.1 # Very Stable
@test 0.35 < (f1[3] - f1[4]) < 0.45 # Very Stable
# regressor
y2 = repeat([1.0, 2.0, 3.0]; inner=50)
model = build_forest(y2, X, 0, 100, 0.7, 2; rng=StableRNG(100))
# sklearn:
# model = RandomForestRegressor(n_estimators = 100, max_depth = 2, random_state = 100)
# model.fit(X, y)
f1 = impurity_importance(model)
@test sum(f1[1:2]) < 0.1 # Very Stable
X = X[:, 1:3] # leave only one important feature
# classifier
model = build_forest(y, X, -1, 100, 0.7, 2; rng=StableRNG(100))
# sklearn:
# model = RandomForestClassifier(n_estimators = 100, max_depth = 2, random_state = 100, criterion = 'entropy')
# model.fit(X, y)
f1 = impurity_importance(model)
@test argmax(f1) == 3
model, coeffs = build_adaboost_stumps(y, X, 10; rng=StableRNG(1))
# sklearn:
# model = AdaBoostClassifier(base_estimator = DecisionTreeClassifier(max_depth = 1, criterion = 'entropy'),
# algorithm = 'SAMME', n_estimators = 10, random_state = 1)
# model.fit(X, y)
@test 0.85 < split_importance(model, coeffs)[3] < 0.95 # Very Stable
# regressor
model = build_forest(y2, X, 0, 100, 0.7, 2; rng=StableRNG(100))
# sklearn:
# model = RandomForestRegressor(n_estimators = 100, max_depth = 2, random_state = 100)
# model.fit(X, y)
f1 = impurity_importance(model)
@test sum(f1[1:2]) < 0.1 # Very Stable
end
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 772 | # Test parallelization of random forests
@testset "parallel.jl" begin
Distributed.addprocs(1)
@test Distributed.nprocs() > 1
Distributed.@everywhere using DecisionTree
Random.seed!(16)
# Classification
n, m = 10^3, 5
features = rand(n, m)
weights = rand(-1:1, m)
labels = round.(Int, features * weights)
model = build_forest(labels, features, 2, 10)
preds = apply_forest(model, features)
cm = confusion_matrix(labels, preds)
@test cm.accuracy > 0.8
# Regression
n, m = 10^3, 5
features = randn(n, m)
weights = rand(-2:2, m)
labels = features * weights
model = build_forest(labels, features, 2, 10)
preds = apply_forest(model, features)
@test R2(labels, preds) > 0.8
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2364 | @testset "digits.jl" begin
X, Y = load_data("digits")
Y = float.(Y) # labels/targets to Float to enable regression
min_samples_leaf = 1
n_subfeatures = 0
max_depth = -1
t = DecisionTree.build_tree(Y, X, n_subfeatures, max_depth, min_samples_leaf)
@test length(t) in [190, 191]
min_samples_leaf = 5
t = DecisionTree.build_tree(Y, X, n_subfeatures, max_depth, min_samples_leaf)
@test length(t) == 126
min_samples_leaf = 5
n_subfeatures = 0
max_depth = 6
t = DecisionTree.build_tree(Y, X, n_subfeatures, max_depth, min_samples_leaf)
@test length(t) == 44
@test depth(t) == 6
min_samples_leaf = 1
n_subfeatures = 0
max_depth = -1
min_samples_split = 20
t = DecisionTree.build_tree(
Y, X, n_subfeatures, max_depth, min_samples_leaf, min_samples_split
)
@test length(t) == 122
min_samples_leaf = 1
n_subfeatures = 0
max_depth = -1
min_samples_split = 2
min_purity_increase = 0.25
t = DecisionTree.build_tree(
Y,
X,
n_subfeatures,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase,
)
@test length(t) == 103
n_subfeatures = 3
n_trees = 10
partial_sampling = 0.7
max_depth = -1
min_samples_leaf = 5
min_samples_split = 2
min_purity_increase = 0.0
model = build_forest(
Y,
X,
n_subfeatures,
n_trees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng=StableRNG(1),
)
preds = apply_forest(model, X)
@test R2(Y, preds) > 0.8
preds_MT = apply_forest(model, X; use_multithreading=true)
@test R2(Y, preds_MT) > 0.8
@test sum(abs.(preds .- preds_MT)) < 1e-8
println("\n##### 3 foldCV Regression Tree #####")
n_folds = 5
r2 = nfoldCV_tree(Y, X, n_folds; rng=StableRNG(1), verbose=false)
@test mean(r2) > 0.55
println("\n##### 3 foldCV Regression Forest #####")
n_subfeatures = 2
n_trees = 10
n_folds = 5
partial_sampling = 0.5
r2 = nfoldCV_forest(
Y,
X,
n_folds,
n_subfeatures,
n_trees,
partial_sampling;
rng=StableRNG(1),
verbose=false,
)
@test mean(r2) > 0.55
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 938 | # Regression Test - Appliances Energy Prediction Data Set
# https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction
@testset "energy.jl" begin
energy = let
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00374/energydata_complete.csv"
tmp_path = download(url)
readcsv(tmp_path)
end
features = energy[2:end, 3:end]
labels = float.(energy[2:end, 2])
# over-fitting
n_subfeatures = 0
max_depth = -1
min_samples_leaf = 1
model = build_tree(labels, features, n_subfeatures, max_depth, min_samples_leaf)
preds = apply_tree(model, features)
@test R2(labels, preds) > 0.99
println("\n##### nfoldCV Regression Tree #####")
r2 = nfoldCV_tree(labels, features, 3)
@test mean(r2) > 0.05
println("\n##### nfoldCV Regression Forest #####")
r2 = nfoldCV_forest(labels, features, 2, 10, 3)
@test mean(r2) > 0.35
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 3639 | @testset "low_precision.jl" begin
Random.seed!(5)
n, m = 10^3, 5
features = Array{Any}(undef, n, m)
features[:, :] = randn(StableRNG(1), n, m)
features[:, 1] = round.(Int32, features[:, 1])
weights = rand(StableRNG(1), -2:2, m)
labels = float.(features * weights)
min_samples_leaf = Int32(1)
n_subfeatures = Int32(0)
max_depth = Int32(-1)
min_samples_split = Int32(2)
min_purity_increase = 0.5
model = build_tree(
labels,
round.(Int32, features),
n_subfeatures,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng=StableRNG(1),
)
preds = apply_tree(model, round.(Int32, features))
@test R2(labels, preds) < 0.95
@test typeof(preds) <: Vector{Float64}
n_subfeatures = Int32(3)
n_trees = Int32(10)
partial_sampling = 0.7
max_depth = Int32(-1)
min_samples_leaf = Int32(5)
min_samples_split = Int32(2)
min_purity_increase = 0.0
model = build_forest(
labels,
features,
n_subfeatures,
n_trees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng=StableRNG(1),
)
preds = apply_forest(model, features)
@test R2(labels, preds) > 0.9
@test typeof(preds) <: Vector{Float64}
preds_MT = apply_forest(model, features; use_multithreading=true)
@test R2(labels, preds_MT) > 0.9
@test typeof(preds_MT) <: Vector{Float64}
@test sum(abs.(preds .- preds_MT)) < 1.0e-8
println("\n##### nfoldCV Regression Tree #####")
n_folds = Int32(3)
pruning_purity = 1.0
max_depth = Int32(-1)
min_samples_leaf = Int32(5)
min_samples_split = Int32(2)
min_purity_increase = 0.0
r2 = nfoldCV_tree(
labels,
features,
n_folds,
pruning_purity,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng=StableRNG(1),
)
@test mean(r2) > 0.6
println("\n##### nfoldCV Regression Forest #####")
n_trees = Int32(10)
n_subfeatures = Int32(2)
n_folds = Int32(3)
max_depth = Int32(-1)
min_samples_leaf = Int32(5)
min_samples_split = Int32(2)
min_purity_increase = 0.0
r2 = nfoldCV_forest(
labels,
features,
n_folds,
n_subfeatures,
n_trees,
partial_sampling,
max_depth,
min_samples_leaf,
min_samples_split,
min_purity_increase;
rng=StableRNG(1),
)
@test mean(r2) > 0.8
# Test Float16 labels, and Float16 features
features = Float16.(features)
labels = Float16.(labels)
model = build_stump(labels, features)
preds = apply_tree(model, features)
@test typeof(preds) == Vector{Float16}
model = build_tree(labels, features)
preds = apply_tree(model, features)
@test typeof(preds) == Vector{Float16}
model = build_forest(labels, features)
preds = apply_forest(model, features)
@test typeof(preds) == Vector{Float16}
# Verify that the `preds` were calculated based on `labels` of the same type.
# If the code at some point converts the numbers to, say, `Float64`, then this test will fail.
@test !all(x->(x in labels), preds)
preds_MT = apply_forest(model, features; use_multithreading=true)
@test typeof(preds_MT) == Vector{Float16}
@test sum(abs.(preds .- preds_MT)) < 1.0e-8
model = build_tree(labels, features)
preds = apply_tree(model, features)
@test typeof(preds) == Vector{Float16}
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 2095 | @testset "scikitlearn.jl" begin
n, m = 10^3, 5
features = rand(StableRNG(1), n, m)
weights = rand(StableRNG(1), -1:1, m)
labels = features * weights
model = fit!(
DecisionTreeRegressor(; rng=StableRNG(1), min_samples_split=5), features, labels
)
@test R2(labels, predict(model, features)) > 0.8
@test impurity_importance(model) == impurity_importance(model.root)
@test isapprox(
permutation_importance(model, features, labels; rng=StableRNG(1)).mean,
permutation_importance(
model.root,
labels,
features,
(model, y, X) -> R2(y, apply_tree(model, X));
rng=StableRNG(1),
).mean,
)
let
regressor = RandomForestRegressor(;
rng=StableRNG(1), n_trees=10, min_samples_leaf=5, n_subfeatures=2
)
model = fit!(regressor, features, labels)
@test R2(labels, predict(model, features)) > 0.8
@test impurity_importance(model) == impurity_importance(model.ensemble)
@test split_importance(model) == split_importance(model.ensemble)
@test isapprox(
permutation_importance(model, features, labels; rng=StableRNG(1)).mean,
permutation_importance(
model.ensemble,
labels,
features,
(model, y, X) -> R2(y, apply_forest(model, X));
rng=StableRNG(1),
).mean,
)
end
Random.seed!(2)
N = 3000
X = randn(StableRNG(1), N, 10)
y = randn(StableRNG(1), N)
max_depth = 5
model = fit!(DecisionTreeRegressor(; rng=StableRNG(1), max_depth), X, y)
@test depth(model) == max_depth
## Test that the RNG arguments work as expected
X = randn(StableRNG(1), 100, 10)
y = randn(StableRNG(1), 100)
@test fit_predict!(RandomForestRegressor(; rng=10), X, y) ==
fit_predict!(RandomForestRegressor(; rng=10), X, y)
@test fit_predict!(RandomForestRegressor(; rng=10), X, y) !=
fit_predict!(RandomForestRegressor(; rng=22), X, y)
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | code | 1444 | @testset "scikitlearn.jl" begin
n, m = 10^3, 5
features = rand(StableRNG(1), n, m)
weights = rand(StableRNG(1), -1:1, m)
labels = features * weights
let
regressor = DecisionTreeRegressor(;
rng=StableRNG(1), min_samples_leaf=5, pruning_purity_threshold=0.1
)
model = fit!(regressor, features, labels)
@test R2(labels, predict(model, features)) > 0.8
end
model = fit!(
DecisionTreeRegressor(; rng=StableRNG(1), min_samples_split=5), features, labels
)
@test R2(labels, predict(model, features)) > 0.8
let
regressor = RandomForestRegressor(;
rng=StableRNG(1), n_trees=10, min_samples_leaf=5, n_subfeatures=2
)
model = fit!(regressor, features, labels)
@test R2(labels, predict(model, features)) > 0.8
end
N = 3000
X = randn(StableRNG(1), N, 10)
y = randn(StableRNG(1), N)
max_depth = 5
model = fit!(DecisionTreeRegressor(; rng=StableRNG(1), max_depth), X, y)
@test depth(model) == max_depth
## Test that the RNG arguments work as expected
X = randn(StableRNG(1), 100, 10)
y = randn(StableRNG(1), 100)
@test fit_predict!(RandomForestRegressor(; rng=10), X, y) ==
fit_predict!(RandomForestRegressor(; rng=10), X, y)
@test fit_predict!(RandomForestRegressor(; rng=10), X, y) !=
fit_predict!(RandomForestRegressor(; rng=22), X, y)
end # @testset
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | docs | 8091 | # Modal Decision Trees & Forests
[](https://aclai-lab.github.io/ModalDecisionTrees.jl)
[](https://aclai-lab.github.io/ModalDecisionTrees.jl/dev)
[](https://cirrus-ci.com/github/aclai-lab/ModalDecisionTrees.jl)
[](https://codecov.io/gh/aclai-lab/ModalDecisionTrees.jl)
[](https://mybinder.org/v2/gh/aclai-lab/ModalDecisionTrees.jl/HEAD?labpath=pluto-demo.jl)
<!-- [](https://github.com/invenia/BlueStyle) -->
### Interpretable models for native time-series & image classification!
This package provides algorithms for learning *decision trees* and *decision forests* with enhanced abilities.
Leveraging the express power of Modal Logic, these models can extract *temporal/spatial patterns*, and can natively handle *time series* and *images* (without any data preprocessing). Currently available via [MLJ.jl](https://github.com/alan-turing-institute/MLJ.jl) and [*Sole.jl*](https://github.com/aclai-lab/Sole.jl).
#### Features & differences with [DecisionTree.jl](https://github.com/JuliaAI/DecisionTree.jl):
The MLJ models provided (`ModalDecisionTree` and `ModalRandomForest`) can act as drop in replacements for DecisionTree.jl's tree and forest models. The main difference is that the two models provided are [probabilistic](https://alan-turing-institute.github.io/MLJ.jl/dev/adding_models_for_general_use/#Overview) and can perform both classification (with y labels of type `String` or `CategoricalValue`), and regression (with numeric y labels).
<!-- Also feature_importance = :impurity is not supported -->
Additionally, these models:
- Are able to handle variables that are `AbstractVector{<:Real}` or `AbstractMatrix{<:Real}`;
- Support [multimodal](https://en.wikipedia.org/wiki/Multimodal_learning) learning (e.g., learning from *combinations* of scalars, time series and images);
- A unique algorithm that extends CART and C4.5;
<!-- - Fully optimized implementation (fancy data structures, multithreading, memoization, minification, Pareto-based pruning optimizations, etc); -->
<!-- - TODO -->
<!-- - Four pruning conditions: max_depth, min_samples_leaf, min_purity_increase, max_purity_at_leaf -->
<!-- TODO - Top-down pre-pruning & post-pruning -->
<!-- - Bagging (Random Forests) TODO dillo meglio -->
#### Current limitations (also see [TODOs](#todos)):
- Only supports numeric features;
- Does not support `missing` or `NaN` values.
#### JuliaCon 2022 8-minute talk
<!-- [](https://youtu.be/8F1vZsl8Zvg) -->
<div align="center">
<a target="_blank" href="https://youtu.be/8F1vZsl8Zvg">
<img src="https://img.youtube.com/vi/8F1vZsl8Zvg/0.jpg">
</a>
</div>
<!--
## Installation
Simply type the following commands in Julia's REPL:
```julia
using Pkg; Pkg.add("ModalDecisionTrees");
```
-->
## Installation & Usage
Simply type the following commands in Julia's REPL:
```julia
# Install package
using Pkg; Pkg.add("MLJ");
using Pkg; Pkg.add("ModalDecisionTrees");
# Import packages
using MLJ
using ModalDecisionTrees
using Random
# Load an example dataset (a temporal one)
X, y = ModalDecisionTrees.load_japanesevowels()
N = length(y)
# Instantiate an MLJ machine based on a Modal Decision Tree with ≥ 4 samples at leaf
mach = machine(ModalDecisionTree(min_samples_leaf=4), X, y)
# Split dataset
p = randperm(N)
train_idxs, test_idxs = p[1:round(Int, N*.8)], p[round(Int, N*.8)+1:end]
# Fit
fit!(mach, rows=train_idxs)
# Perform predictions, compute accuracy
yhat = predict_mode(mach, X[test_idxs,:])
accuracy = MLJ.accuracy(yhat, y[test_idxs])
# Print model
report(mach).printmodel(3)
# Access raw model
model = fitted_params(mach).model
```
<!--
# TODO
# Render raw model
Pkg.add("GraphRecipes"); Pkg.add("Plots")
using GraphRecipes
using Plots
#wrapped_model = ModalDecisionTrees.wrap(model.root, (variable_names_map = report(mach).var_grouping,))
# for _method in [:spectral, :sfdp, :circular, :shell, :stress, :spring, :tree, :buchheim, :arcdiagram, :chorddiagram]
wrapped_model = ModalDecisionTrees.wrap(model.root, (; threshold_display_method = x->round(x, digits=2)), use_feature_abbreviations = true)
for _method in [:tree, :buchheim]
for _nodeshape in [:rect] # , [:rect, :ellipse]
display(plot(
TreePlot(wrapped_model),
method = _method,
nodeshape = _nodeshape,
# nodesize = (3,10),
# root = :left,
curves = false,
fontsize = 10,
size=(860, 640),
title = "$(_method)"
))
end
end
-->
<!-- TODO (`Y isa Vector{<:{Integer,String}}`) -->
<!--
Detailed usage instructions are available for each model using the doc method. For example:
```julia
using MLJ
doc("DecisionTreeClassifier", pkg="ModalDecisionTrees")
```
Available models are: AdaBoostStumpClassifier, DecisionTreeClassifier, DecisionTreeRegressor, RandomForestClassifier, RandomForestRegressor.
-->
<!--
## Visualization
A DecisionTree model can be visualized using the print_tree-function of its native interface (for an example see above in section 'Classification Example'). -->
<!-- ## TODOs
- [x] Enable loss functions different from Shannon's entropy (*untested*)
- [x] Enable regression (*untested*)
- [x] Proper test suite
- [ ] Visualizations of modal rules/patterns
<!-- - [x] AbstractTrees interface -->
## Want to know more?
[Della Monica, Dario, et al. "Decision trees with a modal flavor." International Conference of the Italian Association for Artificial Intelligence. Cham: Springer International Publishing, 2022.](https://link.springer.com/chapter/10.1007/978-3-031-27181-6_4)
Most of the works in *symbolic learning* are based either on Propositional Logics (PLs) or First-order Logics (FOLs); PLs are the simplest kind of logic and can only handle *tabular data*, while FOLs can express complex entity-relation concepts. Machine Learning with FOLs enables handling data with complex topologies, such as time series, images, or videos; however, these logics are computationally challenging. Instead, Modal Logics (e.g. [Interval Logic](https://en.wikipedia.org/wiki/Interval_temporal_logic)) represent a perfect trade-off in terms of computational tractability and expressive power, and naturally lend themselves for expressing some forms of *temporal/spatial reasoning*.
Recently, symbolic learning techniques such as Decision Trees, Random Forests and Rule-Based models have been extended to the use of Modal Logics of time and space. *Modal Decision Trees* and *Modal Random Forests* have been applied to classification tasks, showing statistical performances that are often comparable to those of functional methods (e.g., neural networks), while providing, at the same time, highly-interpretable classification models. Examples of these tasks are COVID-19 diagnosis from cough/breath audio [[1]](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4102488), [[2]](https://drops.dagstuhl.de/opus/volltexte/2021/14783/pdf/LIPIcs-TIME-2021-7.pdf), land cover classification from aereal images [[3]](https://arxiv.org/abs/2109.08325), EEG-related tasks [[4]](https://link.springer.com/chapter/10.1007/978-3-031-06242-1_53), and gas turbine trip prediction.
This technology also offers a natural extension for *multimodal* learning [[5]](http://ceur-ws.org/Vol-2987/paper7.pdf).
## Credits
*ModalDecisionTrees.jl* lives within the [*Sole.jl*](https://github.com/aclai-lab/Sole.jl) framework for *symbolic machine learning*.
The package is developed by the [ACLAI Lab](https://aclai.unife.it/en/) @ University of Ferrara.
Thanks to Ben Sadeghi ([@bensadeghi](https://github.com/bensadeghi/)), author of [DecisionTree.jl](https://github.com/JuliaAI/DecisionTree.jl),
which inspired the construction of this package.
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 0.5.0 | 200b2bd9dee3dfe2dcffa1fb51b6dd00d371ed2b | docs | 309 | ```@meta
CurrentModule = ModalDecisionTrees
```
# ModalDecisionTrees
Welcome to the documentation for [ModalDecisionTrees](https://github.com/aclai-lab/ModalDecisionTrees.jl).
```@index
```
```@autodocs
Modules = [ModalDecisionTrees, ModalDecisionTrees.MLJInterface, ModalDecisionTrees.experimentals]
```
| ModalDecisionTrees | https://github.com/aclai-lab/ModalDecisionTrees.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | code | 466 | using Documenter
using RealPolyhedralHomotopy
push!(LOAD_PATH,"../src/")
makedocs(
sitename = "RealPolyhedralHomotopy.jl",
pages = [
"RealPolyhedralHomotopy" => "index.md",
]
)
# Documenter can also automatically deploy documentation to gh-pages.
# See "Hosting Documentation" and deploydocs() in the Documenter manual
# for more information.
deploydocs(
repo = "github.com/klee669/RealPolyhedralHomotopy.jl.git",
devbranch = "main"
)
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | code | 550 | """
RealPolyhedralHomotopy
A package for finding real roots of systems of polynomial equations using polyhedral homotopy.
"""
module RealPolyhedralHomotopy
# internal
using HomotopyContinuation
using AbstractAlgebra
using PolynomialRoots
using MixedSubdivisions
using LinearAlgebra
# exported
using Reexport
@reexport using MixedSubdivisions, HomotopyContinuation, AbstractAlgebra, PolynomialRoots, LinearAlgebra
include("certify_patchwork.jl")
include("generate_binomials.jl")
include("rph_track.jl")
end # module
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | code | 5905 | export certify_patchwork
"""
certify_patchwork(F::System; Number_Real_Solutions = false)
Certify if a given system is patchworked that all real solutions can be found using the real polyhedral homotopy.
It returns the value `1` if the system `F` is certified to be patchworked according to the certification inequality.
Otherwise, `0` is returned.
# Arguments
* `F` : The target system for the real polyhedral homotopy.
```julia
@var x y;
F = System([-1 - 24000*y + x^3, -9 + 50*x*y - 1*y^2]);
result = certify_patchwork(F)
```
```
1
```
* There is an option `Number_Real_Solutions` returning `(1,k)` where `k` is number of real solutions to the target system when the target system is patchedworked. The default value for the option is false.
```julia
result = certify_patchwork(F; Number_Real_Solutions = true)
```
```
(1,4)
```
"""
function certify_patchwork(F::System;Number_Real_Solutions::Bool = false)
neqs = length(F)
n =neqs
varsF = variables(F);
# Define matrices that are the monomial support and coefficients of F
A = support_coefficients(F)[1];
B = support_coefficients(F)[2];
vB = reduce(vcat, B);
# Use Log(|C|) to define lift
l1 = round.(-1*(10^6)*log.(abs.(B[1])));
l1 = convert.(Int,l1);
lifts = [l1];
for i in 2:neqs
l = round.(-1*(10^6)*log.(abs.(B[i])))
l = convert.(Int,l)
append!(lifts, [l])
end
# Compute mixed cells
cells = mixed_cells(A, lifts);
ncells = length(cells);
#Construct the Cayley matrix
mats = [];
for i in 1:length(A)
sz = size(A[i])[2]
m1 = A[i];
m2 = zeros(i-1, sz);
m3 = ones(1, sz);
m4 = zeros(neqs - i, sz)
M = [m1 ; m2 ; m3 ; m4]
append!(mats, [M])
end
M = reduce(hcat, mats);
## Make inequality for each patchworked system
failed_cells = [];
success_cells = [];
scales = [];
for i in 1:ncells
in_cols = [];
mixedCells = indices(cells[i])
for j in 1:n
offset = sum(size(A[k])[2] for k in 1:j) - size(A[j])[2]
col1 = mixedCells[j][1] + offset
col2 = mixedCells[j][2] + offset
append!(in_cols, col1)
append!(in_cols, col2)
end
out_cols = [ j for j in 1:size(M)[2] if j ∉ in_cols ]
fails = [];
for j in 1:length(out_cols)
cols = vcat(in_cols, out_cols[j]);
sort!(cols);
M_cells = M[:, cols];
null = nullspace(M_cells);
vBmod = vB[cols];
lhs = abs.(dot(null, log.(abs.(vBmod))))
rhs = log(size(M)[2])*norm(null,1)
if lhs < rhs
append!(fails, 1)
append!(scales, rhs/lhs)
end
end
if length(fails) == 0
append!(success_cells, i)
else
append!(failed_cells, i)
#println(failed)
end
end
##### Constructing binomial systems begins #####
# Use Log(|C|) to define lift
w1 = round.(-1*(10^6)*log.(abs.(B[1])));
w1 = convert.(Int,w1);
lifts = [w1];
for i in 2:neqs
w = round.(-1*(10^6)*log.(abs.(B[i])))
w = convert.(Int,w)
append!(lifts, [w])
end
# Compute mixed cells
cells = mixed_cells(A, lifts);
ncells = length(cells);
# Define binomial systems from mixed cells
binomial_systems = [];
for i in 1:ncells
system = [];
mons = indices(cells[i])
for j in 1:neqs
mons2 = mons[j]
bi1 = transpose(A[j])[mons2[1]:mons2[1],:]
bi2 = transpose(A[j])[mons2[2]:mons2[2],:]
term1 = B[j][mons2[1]]*prod(varsF.^(transpose(bi1)))
term2 = B[j][mons2[2]]*prod(varsF.^(transpose(bi2)))
p = term1 + term2;
append!(system,p)
end
append!(binomial_systems, [system])
end
# Convert binomial systems from type Any to type Expression
for i in 1:length(binomial_systems)
binomial_systems[i] = convert(Array{Expression,1}, binomial_systems[i])
end
##### Constructing binomial systems ends #####
##### Finding solutions for binomial systems begins #####
# Find real solutions to binomial systems
r_binomial_sols = [];
for i in 1:ncells
T = System(binomial_systems[i])
D = zero_matrix(ZZ, n, n)
Bz = zeros(n)
T_mons = support_coefficients(T)[1]
T_coeffs = support_coefficients(T)[2]
for j in 1:n
v = T_mons[j][:,1] - T_mons[j][:,2]
for k in 1:length(v)
D[k,j] = v[k]
end
Bz[j] = -1*T_coeffs[j][2]/T_coeffs[j][1]
end
D = transpose(D)
H,U = hnf_with_transform(D)
Bz_new = zeros(n);
for j in 1:n
v = Array(U[j, :])
v1 = transpose(Bz).^v
Bz_new[j] = prod(v1)
end
#Create dictionary to store real solutions
sols = Dict()
#Initialize first entry of sols
poly_root = zeros(H[n,n]+1)
poly_root[end] = 1;
poly_root[1] = -1*Bz_new[end]
R = PolynomialRoots.roots(poly_root)
real_R = findall(x->norm(x)<10^(-10), imag.(R));
test = [];
for j in 1:length(real_R)
append!(test, [[real(R[real_R[j]])]])
end
sols[n] = test
for k in 1:n-1
poly_root = im*zeros(H[n-k,n-k]+1)
test =[];
for j in 1:length(sols[n-k+1])
poly_root[end] = prod(sols[n-k+1][j][end-l]^H[n-k,n-l] for l in 0:k-1)
poly_root[1] = -1*Bz_new[n-k]
R = PolynomialRoots.roots(poly_root)
real_R = findall(x->norm(x)<10^(-10), imag.(R));
for l in 1:length(real_R)
append!(test, [[real.(R[real_R[l]]),sols[n-k+1][j]]])
end
end
for l in 1:length(test)
test[l] = collect(Iterators.flatten(test[l]))
end
sols[n-k] = test
end
append!(r_binomial_sols, [sols[1]])
end
##### Finding solutions for binomial systems ends #####
if Number_Real_Solutions == true
if length(failed_cells)>0
return 0
else
return (1, length(collect(Iterators.flatten(r_binomial_sols))));
end
else
if length(failed_cells)>0
return 0
else
return 1
end
end
end
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | code | 2566 | export generate_binomials, Binomial_system_data
"""
Binomial_system_data
An object contains 4 vectors consist of binomial systems and its normal vectors, liftings, and mixed cells.
"""
struct Binomial_system_data
binomial_system::Vector{Any}
normal_vectors::Vector{Any}
lifts::Vector{Vector{Int64}}
cells::Vector{MixedCell}
end
function Base.show(io::IO,x::Binomial_system_data)
print(io,"Binomial_system_data")
end
"""
generate_binomials(F::System)
Return a wrapper object Binomial_system_data from an input polynomial system. Binomial systems obtained from the mixed cells induced by the ``Log|C|``-lifting.
The object Binomial_system_data contains the binomial system, normal vectors, lifting vectors, and cells from the mixed subdivision computed.
The object Binomial_system_data is used as an input for the rph_track function.
# Arguments
* `F` : The target system for the real polyhedral homotopy.
```julia
B = generate_binomials(F)
```
```
Binomial_system_data
```
```julia
B.binomial_system
```
```
2-element Vector{Any}:
Expression[-24000*y + x^3, 50*x*y - y^2]
Expression[-24000*y + x^3, -9 + 50*x*y]
```
"""
function generate_binomials(F::System)
x = variables(F);
neqs = length(F);
# Define matrices that are the monomial support and coefficients of F
A = support_coefficients(F)[1];
B = support_coefficients(F)[2];
# Use Log(|C|) to define lift
w1 = round.(-1*(10^6)*log.(abs.(support_coefficients(F)[2][1])));
w1 = convert.(Int,w1);
lifts = [w1];
for i in 2:neqs
w = round.(-1*(10^6)*log.(abs.(support_coefficients(F)[2][i])))
w = convert.(Int,w)
append!(lifts, [w])
end
# Compute mixed cells
cells = mixed_cells(A, lifts);
ncells = length(cells);
# Define binomial systems from mixed cells
binomial_systems = [];
normal_vectors = [];
for i in 1:ncells
system = [];
mons = indices(cells[i])
for j in 1:neqs
mons2 = mons[j]
bi1 = transpose(A[j])[mons2[1]:mons2[1],:]
bi2 = transpose(A[j])[mons2[2]:mons2[2],:]
term1 = B[j][mons2[1]]*prod(x.^(transpose(bi1)))
term2 = B[j][mons2[2]]*prod(x.^(transpose(bi2)))
p = term1 + term2;
append!(system,p)
end
append!(binomial_systems, [system])
append!(normal_vectors, [normal(cells[i])])
end
# Convert binomial systems from type Any to type Expression
for i in 1:length(binomial_systems)
binomial_systems[i] = convert(Array{Expression,1}, binomial_systems[i])
end
return Binomial_system_data(binomial_systems, normal_vectors, lifts, cells)
end
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | code | 5653 | export rph_track
"""
rph_track(B::Binomial_system_data, F::System; Certification = false)
Return the output of tracking the real solutions of a given list of binomial systems to the target system.
# Arguments
* `B` : The object `Binomial_system_data` obtained from `generate_binomials(F)`.
* `F` : The target system for the real polyhedral homotopy.
```julia
@var x y;
F = System([-1 - 24000*y + x^3, -9 + 50*x*y - 1*y^2]);
B = generate_binomials(F);
realSols = rph_track(B,F)
```
```
4-element Vector{Vector{Float64}}:
[-1095.4451129504978, -54772.25548320812]
[1095.4451137838312, 54772.255524874796]
[8.111114476617955, 0.02219298606763958]
[-8.103507635567631, -0.02221382112196499]
```
* The optional argument `Certification` certifies that all real solutions to a patchedworked system are found.
This is done by an a posteriori certification for numerical approximations obtained by the real polyhedral homotopy.
When the real polyhedral homotopy root-finding is certified, it returns a list of solutions to the target and `1`; otherwise, it returns `0`. The default value for the option is false.
```julia
realSols = rph_track(B,F; Certification = true)
```
```
([[-1095.4451129504978, -54772.25548320812], [1095.4451137838312, 54772.255524874796], [8.111114476617955, 0.02219298606763958], [-8.103507635567631, -0.022213821121964985]], 1)
```
"""
function rph_track(BData::Binomial_system_data,F::System;Certification::Bool = false)
neqs = length(F);
n = neqs;
varsF = variables(F);
binomial_systems = BData.binomial_system;
normal_vectors = BData.normal_vectors;
l = BData.lifts;
ncells = length(binomial_systems);
# Find real solutions to binomial systems
r_binomial_sols = [];
for i in 1:ncells
T = System(binomial_systems[i])
D = zero_matrix(ZZ, n, n)
B = zeros(n)
T_mons = support_coefficients(T)[1]
T_coeffs = support_coefficients(T)[2]
for j in 1:n
v = T_mons[j][:,1] - T_mons[j][:,2]
for k in 1:length(v)
D[k,j] = v[k]
end
B[j] = -1*T_coeffs[j][2]/T_coeffs[j][1]
end
D = transpose(D)
H,U = hnf_with_transform(D)
B_new = zeros(n);
for l in 1:n
v = Array(U[l, :])
v1 = transpose(B).^v
B_new[l] = prod(v1)
end
#Create dictionary to store real solutions
sols = Dict()
#Initialize first entry of sols
poly_root = zeros(H[n,n]+1)
poly_root[end] = 1;
poly_root[1] = -1*B_new[end]
R = PolynomialRoots.roots(poly_root)
real_R = findall(x->norm(x)<10^(-10), imag.(R));
test = [];
for l in 1:length(real_R)
append!(test, [[real(R[real_R[l]])]])
end
sols[n] = test
for k in 1:n-1
poly_root = im*zeros(H[n-k,n-k]+1)
test =[];
for j in 1:length(sols[n-k+1])
poly_root[end] = prod(sols[n-k+1][j][end-l]^H[n-k,n-l] for l in 0:k-1)
poly_root[1] = -1*B_new[n-k]
R = PolynomialRoots.roots(poly_root)
real_R = findall(x->norm(x)<10^(-10), imag.(R));
for l in 1:length(real_R)
append!(test, [[real.(R[real_R[l]]),sols[n-k+1][j]]])
end
end
for l in 1:length(test)
test[l] = collect(Iterators.flatten(test[l]))
end
sols[n-k] = test
end
append!(r_binomial_sols, [sols[1]])
end
### Define homotopies
# Define our homotopy variable
@var t
# Define matrices that are the monomial support and coefficients of F
A = support_coefficients(F)[1];
B = support_coefficients(F)[2];
#Create homotopy for each mixed cell
homotopies = [];
c = BData.cells;
for k in 1:length(c)
rn = normal_vectors[k]
#Find exponents for homotopy variable t
tvecs = [];
A = support_coefficients(F)[1];
B = support_coefficients(F)[2];
Anew = A;
for i in 1:n
tlist = transpose(A[i])*rn + l[i]
m = minimum(tlist)
tlist = tlist - m*ones(length(tlist))
tlist = round.(tlist);
tlist = convert.(Int, tlist)
append!(tvecs, [tlist])
Anew[i] = [A[i] ; tlist']
end
varsFt = collect(Iterators.flatten([varsF, t]))
eqs = [];
for i in 1:n
varmat = varsFt.^Anew[i]
eq = sum(B[i][j]*prod(varmat[:, j]) for j in 1:size(varmat)[2])
append!(eqs, [eq])
end
#send t->1-t since Homotopy goes from 1 to 0
for i in 1:length(eqs)
eqs[i] = subs(eqs[i], t=>(1-t))
end
append!(homotopies, [eqs])
end
# Change homotopies from type Any to type Expression
for i in 1:length(homotopies)
homotopies[i] = convert(Array{Expression,1}, homotopies[i])
end
# Create our homotopy systems
homotopy_systems = [];
for i in 1:ncells
append!(homotopy_systems, [Homotopy(homotopies[i], varsF, t)])
end
#This gives the real solutions after only tracking the real solutions from the binomial systems
real_sols = [];
# Track real solutions from binomial systems to target system
for i in 1:length(r_binomial_sols)
H1 = InterpretedHomotopy(homotopy_systems[i])
R2 = HomotopyContinuation.solve(H1, r_binomial_sols[i],show_progress=false)
append!(real_sols, real_solutions(R2))
end
if Certification == true
for i in 1:ncells
cr = certify(binomial_systems[i],convert(Array{Vector{Float64}},r_binomial_sols[i]));
if nreal_certified(cr) < length(r_binomial_sols[i])
return 0
end
end
for i in 1:length(real_sols)
cr = certify(F,real_sols[i]);
if nreal_certified(cr) < 1
return 0
end
end
return (convert(Array{Vector{Float64}},real_sols),1)
else
return convert(Array{Vector{Float64}},real_sols)
end
end
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | code | 419 | module RealPolyhedralHomotopy
# internal
using HomotopyContinuation
using AbstractAlgebra
using PolynomialRoots
using MixedSubdivisions
using LinearAlgebra
# exported
using Reexport
@reexport using MixedSubdivisions, HomotopyContinuation, AbstractAlgebra, PolynomialRoots, LinearAlgebra
include("certify_patchwork.jl")
include("generate_binomials.jl")
include("rph_track.jl")
end # module
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | code | 5662 | export certify_patchwork
@doc """
certify_patchwork(polySystem::System; Number_Real_Solutions = false)
Certify if a given system is patchworked that all real solutions can be found using the real polyhedral homotopy.
It returns the value `1` if the system `F` is certified to be patchworked according to the certification inequality.
Otherwise, `0` is returned.
"""
function certify_patchwork(polySystem;Number_Real_Solutions::Bool = false)
F = polySystem
neqs = length(F)
n =neqs
varsF = variables(F);
# Define matrices that are the monomial support and coefficients of F
A = support_coefficients(F)[1];
B = support_coefficients(F)[2];
vB = reduce(vcat, B);
# Use Log(|C|) to define lift
l1 = round.(-1*(10^6)*log.(abs.(support_coefficients(F)[2][1])));
l1 = convert.(Int,l1);
lifts = [l1];
for i in 2:neqs
l = round.(-1*(10^6)*log.(abs.(support_coefficients(F)[2][i])))
l = convert.(Int,l)
append!(lifts, [l])
end
# Compute mixed cells
cells = mixed_cells(A, lifts);
ncells = length(cells);
#Construct the Cayley matrix
mats = [];
for i in 1:length(A)
sz = size(A[i])[2]
m1 = A[i];
m2 = zeros(i-1, sz);
m3 = ones(1, sz);
m4 = zeros(neqs - i, sz)
M = [m1 ; m2 ; m3 ; m4]
append!(mats, [M])
end
M = reduce(hcat, mats);
## Make inequality for each patchworked system
failed_cells = [];
success_cells = [];
scales = [];
for i in 1:ncells
in_cols = [];
mixedCells = indices(cells[i])
for j in 1:n
offset = sum(size(A[k])[2] for k in 1:j) - size(A[j])[2]
col1 = mixedCells[j][1] + offset
col2 = mixedCells[j][2] + offset
append!(in_cols, col1)
append!(in_cols, col2)
end
out_cols = [];
for j in 1:size(M)[2]
t = findall(x->x==j, in_cols)
if length(t) == 0
append!(out_cols, j)
end
end
fails = [];
for j in 1:length(out_cols)
cols = vcat(in_cols, out_cols[j]);
sort!(cols);
M_cells = M[1:end, cols];
null = nullspace(M_cells);
vBmod = vB[cols];
lhs = abs.(dot(null, log.(abs.(vBmod))))
rhs = log(size(M)[2])*norm(null,1)
if lhs < rhs
append!(fails, 1)
append!(scales, rhs/lhs)
end
end
if length(fails) == 0
append!(success_cells, i)
else
append!(failed_cells, i)
#println(failed)
end
end
##### Constructing binomial systems begins #####
# Use Log(|C|) to define lift
w1 = round.(-1*(10^6)*log.(abs.(support_coefficients(F)[2][1])));
w1 = convert.(Int,w1);
lifts = [w1];
for i in 2:neqs
w = round.(-1*(10^6)*log.(abs.(support_coefficients(F)[2][i])))
w = convert.(Int,w)
append!(lifts, [w])
end
# Compute mixed cells
cells = mixed_cells(A, lifts);
ncells = length(cells);
# Define binomial systems from mixed cells
binomial_systems = [];
for i in 1:ncells
system = [];
mons = indices(cells[i])
for j in 1:neqs
mons2 = mons[j]
bi1 = transpose(A[j])[mons2[1]:mons2[1],1:end]
bi2 = transpose(A[j])[mons2[2]:mons2[2],1:end]
term1 = B[j][mons2[1]]*prod(varsF.^(transpose(bi1)))
term2 = B[j][mons2[2]]*prod(varsF.^(transpose(bi2)))
p = term1 + term2;
append!(system,p)
end
append!(binomial_systems, [system])
end
# Convert binomial systems from type Any to type Expression
for i in 1:length(binomial_systems)
binomial_systems[i] = convert(Array{Expression,1}, binomial_systems[i])
end
##### Constructing binomial systems ends #####
##### Finding solutions for binomial systems begins #####
# Find real solutions to binomial systems
r_binomial_sols = [];
for i in 1:ncells
T = System(binomial_systems[i])
D = zero_matrix(ZZ, n, n)
Bz = zeros(n)
T_mons = support_coefficients(T)[1]
T_coeffs = support_coefficients(T)[2]
for j in 1:n
v = T_mons[j][1:end,1:1] - T_mons[j][1:end,2:2]
for k in 1:length(v)
D[k,j] = v[k]
end
Bz[j] = -1*T_coeffs[j][2]/T_coeffs[j][1]
end
D = transpose(D)
H,U = hnf_with_transform(D)
Bz_new = zeros(n);
for i in 1:n
v = Array(U[i:i, 1:end])
v1 = transpose(Bz).^v
Bz_new[i] = prod(v1)
end
#Create dictionary to store real solutions
sols = Dict()
#Initialize first entry of sols
poly_root = zeros(H[n,n]+1)
poly_root[end] = 1;
poly_root[1] = -1*Bz_new[end]
R = PolynomialRoots.roots(poly_root)
real_R = findall(x->norm(x)<10^(-10), imag.(R));
test = [];
for i in 1:length(real_R)
append!(test, [[real(R[real_R[i]])]])
end
sols[n] = test
for k in 1:n-1
poly_root = im*zeros(H[n-k,n-k]+1)
test =[];
for j in 1:length(sols[n-k+1])
poly_root[end] = prod(sols[n-k+1][j][end-l]^H[n-k,n-l] for l in 0:k-1)
poly_root[1] = -1*Bz_new[n-k]
R = PolynomialRoots.roots(poly_root)
real_R = findall(x->norm(x)<10^(-10), imag.(R));
for l in 1:length(real_R)
append!(test, [[real.(R[real_R[l]]),sols[n-k+1][j]]])
end
end
for l in 1:length(test)
test[l] = collect(Iterators.flatten(test[l]))
end
sols[n-k] = test
end
append!(r_binomial_sols, [sols[1]])
end
##### Finding solutions for binomial systems ends #####
if Number_Real_Solutions == true
if length(failed_cells)>0
return 0
else
return (1, length(collect(Iterators.flatten(r_binomial_sols))));
end
else
if length(failed_cells)>0
return 0
else
return 1
end
end
end
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | code | 1363 | export generate_binomials
function generate_binomials(poly_system)
F = poly_system;
neqs = length(F);
varsF = variables(F);
# Define matrices that are the monomial support and coefficients of F
A = support_coefficients(F)[1];
B = support_coefficients(F)[2];
# Use Log(|C|) to define lift
w1 = round.(-1*(10^6)*log.(abs.(support_coefficients(F)[2][1])));
w1 = convert.(Int,w1);
lifts = [w1];
for i in 2:neqs
w = round.(-1*(10^6)*log.(abs.(support_coefficients(F)[2][i])))
w = convert.(Int,w)
append!(lifts, [w])
end
# Compute mixed cells
cells = mixed_cells(A, lifts);
ncells = length(cells);
# Define binomial systems from mixed cells
binomial_systems = [];
for i in 1:ncells
system = [];
mons = indices(cells[i])
for j in 1:neqs
mons2 = mons[j]
bi1 = transpose(A[j])[mons2[1]:mons2[1],1:end]
bi2 = transpose(A[j])[mons2[2]:mons2[2],1:end]
term1 = B[j][mons2[1]]*prod(varsF.^(transpose(bi1)))
term2 = B[j][mons2[2]]*prod(varsF.^(transpose(bi2)))
p = term1 + term2;
append!(system,p)
end
append!(binomial_systems, [system])
end
# Convert binomial systems from type Any to type Expression
for i in 1:length(binomial_systems)
binomial_systems[i] = convert(Array{Expression,1}, binomial_systems[i])
end
return binomial_systems
end
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | code | 4214 | export rph_track
function rph_track(binomialSystem, targetSystem;Certification::Bool = false)
F = targetSystem;
neqs = length(F);
n = neqs;
varsF = variables(F);
F_eqs = expressions(F);
binomial_systems = binomialSystem;
ncells = length(binomial_systems);
# Find real solutions to binomial systems
r_binomial_sols = [];
for i in 1:ncells
T = System(binomial_systems[i])
D = zero_matrix(ZZ, n, n)
B = zeros(n)
T_mons = support_coefficients(T)[1]
T_coeffs = support_coefficients(T)[2]
for j in 1:n
v = T_mons[j][1:end,1:1] - T_mons[j][1:end,2:2]
for k in 1:length(v)
D[k,j] = v[k]
end
B[j] = -1*T_coeffs[j][2]/T_coeffs[j][1]
end
D = transpose(D)
H,U = hnf_with_transform(D)
B_new = zeros(n);
for i in 1:n
v = Array(U[i:i, 1:end])
v1 = transpose(B).^v
B_new[i] = prod(v1)
end
#Create dictionary to store real solutions
sols = Dict()
#Initialize first entry of sols
poly_root = zeros(H[n,n]+1)
poly_root[end] = 1;
poly_root[1] = -1*B_new[end]
R = PolynomialRoots.roots(poly_root)
real_R = findall(x->norm(x)<10^(-10), imag.(R));
test = [];
for i in 1:length(real_R)
append!(test, [[real(R[real_R[i]])]])
end
sols[n] = test
for k in 1:n-1
poly_root = im*zeros(H[n-k,n-k]+1)
test =[];
for j in 1:length(sols[n-k+1])
poly_root[end] = prod(sols[n-k+1][j][end-l]^H[n-k,n-l] for l in 0:k-1)
poly_root[1] = -1*B_new[n-k]
R = PolynomialRoots.roots(poly_root)
real_R = findall(x->norm(x)<10^(-10), imag.(R));
for l in 1:length(real_R)
append!(test, [[real.(R[real_R[l]]),sols[n-k+1][j]]])
end
end
for l in 1:length(test)
test[l] = collect(Iterators.flatten(test[l]))
end
sols[n-k] = test
end
append!(r_binomial_sols, [sols[1]])
end
# G is an array of the polynomial systems that are the set of monomials not in each binomial system
G = [];
for i in 1:ncells
append!(G, [F_eqs - binomial_systems[i]])
end
# Define our homotopy variable
@var t
# Create systems consisting of monomials not in binomial system multiplied by (1-t)^a for a positive integer a
homotopies = [];
for k in 1:ncells
J = System(G[k])
G_sup1 = support_coefficients(J)[1];
G_sup2 = support_coefficients(J)[2];
h1 = [];
for i in 1:neqs
t1, t2 = size(transpose(G_sup1[i]))
monomial_list = [];
for j in 1:t1
a = abs.(rand(Int8))%10 + 1
mon_vec = transpose(G_sup1[i])[j:j,1:end]
mon1 = G_sup2[i][j]*prod(varsF.^(transpose(mon_vec)))
append!(monomial_list, mon1*((1-t)^a))
end
append!(h1, [sum(monomial_list)])
end
append!(homotopies, [h1])
end
# Change homotopies from type Any to type Expression
for i in 1:length(homotopies)
homotopies[i] = convert(Array{Expression,1}, homotopies[i])
end
# Create our homotopy systems by adding each binomial system to other systems that have monomials with (1-t)^a attached
homotopy_systems = [];
for i in 1:ncells
combined_sys = homotopies[i] + binomial_systems[i]
append!(homotopy_systems, [Homotopy(combined_sys, varsF, t)])
end
#This gives the real solutions after only tracking the real solutions from the binomial systems
real_sols = [];
# Track real solutions from binomial systems to target system
for i in 1:length(r_binomial_sols)
H1 = InterpretedHomotopy(homotopy_systems[i])
R2 = HomotopyContinuation.solve(H1, r_binomial_sols[i],show_progress=false)
append!(real_sols, real_solutions(R2))
end
if Certification == true
for i in 1:ncells
cr = certify(binomial_systems[i],convert(Array{Vector{Float64}},r_binomial_sols[i]));
if nreal_certified(cr) < length(r_binomial_sols[i])
return 0
end
end
for i in 1:length(real_sols)
cr = certify(F,real_sols[i]);
if nreal_certified(cr) < 1
return 0
end
end
return (convert(Array{Vector{Float64}},real_sols),1)
else
return convert(Array{Vector{Float64}},real_sols)
end
end
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | code | 396 | using Test, RealPolyhedralHomotopy
@var x y;
F = System([-1 - 24000*y + x^3, -9 + 50*x*y - 1*y^2]);
result = certify_patchwork(F);
@test result == 1
result = certify_patchwork(F; Number_Real_Solutions = true);
@test result[2] == 4
B = generate_binomials(F);
@test length(B.binomial_system) == 2
realSols = rph_track(B,F)
@test length(realSols) == 4
println("tests completed successfully")
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | docs | 1305 | # RealPolyhedralHomotopy.jl
`RealPolyhedralHomotopy.jl` is a package for finding real roots of systems of polynomial equations using polyhedral homotopy.
The package implements the algorithm for the real polyhedral homotopy establised in [A Polyhedral Homotopy Algorithm For Real Zeros](https://arxiv.org/abs/1910.01957). The idea for the real polyhedral homotopy motivated from the celebrated article [A Polyhedral Method for Solving Sparse Polynomial Systems](https://www.jstor.org/stable/2153370?seq=1) to apply the polyhedral homotopy method for real roots finding.
## Documents
[](https://klee669.github.io/RealPolyhedralHomotopy.jl/stable)
[](https://klee669.github.io/RealPolyhedralHomotopy.jl/dev)
## Installation
The package can be installed via the Julia package manager. It is suggested to use Julia 1.6.1 or higher.
```julia
pkg> add RealPolyhedralHomotopy
```
## Getting Started
To find a preliminary example of how the functions in this package work see [this tutorial](https://github.com/klee669/RealPolyhedralHomotopy.jl/blob/main/docs/src/index.md), otherwise see the [documentation](https://klee669.github.io/RealPolyhedralHomotopy.jl/dev/#Installation) for more details.
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 1.2.1 | fe0b322c6a90a0a38725908427a1dd5982bc2e1b | docs | 2103 | # RealPolyhedralHomotopy.jl
[RealPolyhedralHomotopy.jl](https://github.com/klee669/RealPolyhedralHomotopy.jl)
is a package for finding real roots of systems of polynomial equations using polyhedral homotopy.
The package implements the algorithm for the real polyhedral homotopy establised in [A Polyhedral Homotopy Algorithm For Real Zeros](https://arxiv.org/abs/1910.01957). The idea for the real polyhedral homotopy motivated from the celebrated article [A Polyhedral Method for Solving Sparse Polynomial Systems](https://www.jstor.org/stable/2153370?seq=1) to apply the polyhedral homotopy method for real roots finding.
The authors of this package are
* [Kisun Lee](https://klee669.github.io)
* [Julia Lindberg](https://sites.google.com/view/julialindberg)
* [Jose Israel Rodriguez](https://sites.google.com/wisc.edu/jose)
## Installation
The package can be installed via the Julia package manager. It is suggested to use Julia 1.6.1 or higher.
```julia
pkg> add RealPolyhedralHomotopy
```
## Introduction
We support system input through the [HomotopyContinuation](https://www.juliahomotopycontinuation.org) package.
```julia
using RealPolyhedralHomotopy
@var x y;
F = System([-1 - 24000*y + x^3, -9 + 50*x*y - 1*y^2]);
```
For finding real roots, the list of binomial systems corresponding to `F` is required as a start system.
```julia
B = generate_binomials(F);
```
```
Binomial_system_data
```
```julia
B.binomial_system
```
```
2-element Vector{Any}:
Expression[-24000*y + x^3, 50*x*y - y^2]
Expression[-24000*y + x^3, -9 + 50*x*y]
```
Using the function `rph_track`, real roots are found by tracking the real polyhedral homotopy.
```julia
@var x y;
F = System([-1 - 24000*y + x^3, -9 + 50*x*y - 1*y^2]);
B = generate_binomials(F);
realSols = rph_track(B,F)
```
```
4-element Vector{Vector{Float64}}:
[-1095.4451129504978, -54772.25548320812]
[1095.4451137838312, 54772.255524874796]
[8.111114476617955, 0.02219298606763958]
[-8.103507635567631, -0.02221382112196499]
```
## Functions for the real polyhedral homotopy
```@docs
certify_patchwork
generate_binomials
rph_track
```
| RealPolyhedralHomotopy | https://github.com/klee669/RealPolyhedralHomotopy.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | code | 1608 | using Documenter, Nonconvex
Nonconvex.@load MMA Ipopt NLopt Percival Bayesian Hyperopt Juniper Pavito MTS Semidefinite
makedocs(
sitename="Nonconvex.jl",
pages = [
"Getting started" => "index.md",
"Problem definition" => [
"Overview" => "problem/problem.md",
"`Model` definition" => "problem/model.md",
"`DictModel` definition" => "problem/dict_model.md",
"Querying models" => "problem/queries.md",
],
"Gradients, Jacobians and Hessians" => [
"Overview" => "gradients/gradients.md",
"gradients/user_defined.md",
"gradients/other_ad.md",
"gradients/chainrules_fd.md",
"gradients/sparse.md",
"gradients/symbolic.md",
"gradients/implicit.md",
"gradients/history.md",
],
"Algorithms" => [
"Overview" => "algorithms/algorithms.md",
"algorithms/mma.md",
"algorithms/ipopt.md",
"algorithms/nlopt.md",
"algorithms/auglag.md",
"algorithms/minlp.md",
"algorithms/hyperopt.md",
"algorithms/surrogate.md",
"algorithms/mts.md",
"algorithms/sdp.md",
"algorithms/metaheuristics.md",
"algorithms/nomad.md",
"algorithms/tobs.md",
],
"Optimization result" => "result.md"
],
warnonly = true,
)
if get(ENV, "CI", nothing) == "true"
deploydocs(
repo = "github.com/JuliaNonconvex/Nonconvex.jl.git",
push_preview=true,
)
end
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | code | 2676 | module Nonconvex
using Reexport
@reexport using NonconvexCore
@reexport using NonconvexUtils
macro load(algo)
esc(_load(string(algo)))
end
macro load(algos...)
exprs = map(algos) do algo
:(Nonconvex.@load $algo)
end
return Expr(:block, exprs...)
end
function _load(algo)
if algo in ("MMA", "GCMMA", "MMA87", "MMA02")
return install_and_load_module(:NonconvexMMA)
elseif algo in ("Ipopt", "IpoptAlg")
return install_and_load_module(:NonconvexIpopt)
elseif algo in ("NLopt", "NLoptAlg")
return install_and_load_module(:NonconvexNLopt)
elseif algo in ("Juniper", "JuniperIpopt", "JuniperIpoptAlg")
return install_and_load_module(:NonconvexJuniper)
elseif algo in ("Pavito", "PavitoIpoptCbc", "PavitoIpoptCbcAlg")
return install_and_load_module(:NonconvexPavito)
elseif algo in ("Percival", "AugLag", "PercivalAlg")
return install_and_load_module(:NonconvexPercival)
elseif algo == "AugLag2"
return install_and_load_module(:NonconvexAugLagLab)
elseif algo in ("Bayesian", "BayesOpt", "BayesOptAlg")
return install_and_load_module(:NonconvexBayesian)
elseif algo in ("SDP", "Semidefinite", "SDPBarrier", "SDPBarrierAlg")
return install_and_load_module(:NonconvexSemidefinite)
elseif algo in ("Search", "MTS", "LS1", "MTSAlg", "LS1Alg")
return install_and_load_module(:NonconvexSearch)
elseif algo in ("Hyperopt", "Deflated", "Multistart", "HyperoptAlg", "DeflatedAlg")
return install_and_load_module(:NonconvexMultistart)
elseif algo == "TOBS"
return install_and_load_module(:NonconvexTOBS)
elseif algo == "Metaheuristics"
return install_and_load_module(:NonconvexMetaheuristics)
elseif algo == "NOMAD"
return install_and_load_module(:NonconvexNOMAD)
else
throw("Unsupported algorithm. Please check the documentation of Nonconvex.jl.")
end
end
function install_and_load_module(mod)
quote
using Pkg
modname = $(Meta.quot(mod))
try
@info "Attempting to load the package $modname."
using $mod
@info "Loading succesful."
catch
@info "Couldn't find the package $modname. Attempting to install it."
try
Pkg.add(string(modname))
catch err
@info "Package installation failed! Please report an issue."
rethrow(err)
end
@info "$modname installed."
@info "Attempting to load the package $modname."
using $mod
@info "Loading succesful."
end
end
end
end
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | code | 1419 | using Test, Nonconvex, Pkg
@test_throws ArgumentError using NonconvexIpopt
Nonconvex.@load Ipopt
IpoptAlg()
@test_throws ArgumentError using NonconvexNLopt
Nonconvex.@load NLopt
NLoptAlg(:LD_MMA)
@test_throws ArgumentError using NonconvexJuniper
Nonconvex.@load Juniper
JuniperIpoptAlg()
IpoptAlg()
@test_throws ArgumentError using NonconvexMMA
Nonconvex.@load MMA
MMA87()
MMA02()
@test_throws ArgumentError using NonconvexPavito
Nonconvex.@load Pavito
PavitoIpoptCbcAlg()
IpoptAlg()
@test_throws ArgumentError using NonconvexPercival
Nonconvex.@load AugLag
AugLag()
@test_throws ArgumentError using NonconvexBayesian
Nonconvex.@load Bayesian
BayesOptAlg(IpoptAlg())
#@test_throws ArgumentError using NonconvexAugLagLab
#Nonconvex.@load AugLag2
#AugLag2()
@test_throws ArgumentError using NonconvexSemidefinite
Nonconvex.@load Semidefinite
SDPBarrierAlg(IpoptAlg())
@test_throws ArgumentError using NonconvexSearch
Nonconvex.@load Search
MTSAlg()
LS1Alg()
@test_throws ArgumentError using NonconvexMultistart
Nonconvex.@load Multistart
HyperoptAlg(IpoptAlg())
@test_throws ArgumentError using NonconvexTOBS
Nonconvex.@load TOBS
TOBSAlg()
@test_throws ArgumentError using NonconvexMetaheuristics
Nonconvex.@load Metaheuristics
MetaheuristicsAlg(ECA)
Pkg.rm("NonconvexPercival") # https://github.com/ds4dm/Tulip.jl/issues/125
@test_throws ArgumentError using NonconvexNOMAD
Nonconvex.@load NOMAD
NOMADAlg()
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 19955 | # Nonconvex
[](https://github.com/JuliaNonconvex/Nonconvex.jl/actions)
[](https://codecov.io/gh/JuliaNonconvex/Nonconvex.jl)
[](https://JuliaNonconvex.github.io/Nonconvex.jl/stable)
[](https://JuliaNonconvex.github.io/Nonconvex.jl/dev)
`Nonconvex.jl` is an umbrella package over implementations and wrappers of a number of nonconvex constrained optimization algorithms and packages making use of automatic differentiation. Zero, first and second order methods are available. Nonlinear equality and inequality constraints as well as integer and nonlinear semidefinite constraints are supported. A detailed description of all the algorithms and features available in `Nonconvex` can be found in the [documentation](https://JuliaNonconvex.github.io/Nonconvex.jl/stable).
## The `JuliaNonconvex` organization
The `JuliaNonconvex` organization hosts a number of packages which are available for use in `Nonconvex.jl`. The correct package is loaded using the `Nonconvex.@load` macro with the algorithm or package name. See the [documentation](https://JuliaNonconvex.github.io/Nonconvex.jl/stable) for more details. The following is a summary of all the packages in the `JuliaNonconvex` organization.
| Package | Description | Tests | Coverage |
| ------- | ----------- | ----- | -------- |
| [Nonconvex.jl](https://github.com/mohamed82008/Nonconvex.jl) | Umbrella package for nonconvex optimization | [](https://github.com/JuliaNonconvex/Nonconvex.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/Nonconvex.jl) |
| [NonconvexCore.jl](https://github.com/JuliaNonconvex/NonconvexCore.jl) | All the interface functions and structs | [](https://github.com/JuliaNonconvex/NonconvexCore.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexCore.jl) |
| [NonconvexMMA.jl](https://github.com/JuliaNonconvex/NonconvexMMA.jl) | Method of moving asymptotes implementation in pure Julia | [](https://github.com/JuliaNonconvex/NonconvexMMA.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexMMA.jl) |
| [NonconvexIpopt.jl](https://github.com/JuliaNonconvex/NonconvexIpopt.jl) | [Ipopt.jl](https://github.com/jump-dev/Ipopt.jl) wrapper | [](https://github.com/JuliaNonconvex/NonconvexIpopt.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexIpopt.jl) |
| [NonconvexNLopt.jl](https://github.com/JuliaNonconvex/NonconvexNLopt.jl) | [NLopt.jl](https://github.com/JuliaOpt/NLopt.jl) wrapper | [](https://github.com/JuliaNonconvex/NonconvexNLopt.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexNLopt.jl) |
| [NonconvexPercival.jl](https://github.com/JuliaNonconvex/NonconvexPercival.jl) | [Percival.jl](https://github.com/JuliaSmoothOptimizers/Percival.jl) wrapper (an augmented Lagrangian algorithm implementation) | [](https://github.com/JuliaNonconvex/NonconvexPercival.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexPercival.jl) |
| [NonconvexJuniper.jl](https://github.com/JuliaNonconvex/NonconvexJuniper.jl) | [Juniper.jl](https://github.com/lanl-ansi/Juniper.jl) wrapper | [](https://github.com/JuliaNonconvex/NonconvexJuniper.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexJuniper.jl) |
| [NonconvexPavito.jl](https://github.com/JuliaNonconvex/NonconvexPavito.jl) | [Pavito.jl](https://github.com/jump-dev/Pavito.jl) wrapper | [](https://github.com/JuliaNonconvex/NonconvexPavito.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexPavito.jl) |
| [NonconvexSemidefinite.jl](https://github.com/JuliaNonconvex/NonconvexSemidefinite.jl) | Nonlinear semi-definite programming algorithm | [](https://github.com/JuliaNonconvex/NonconvexSemidefinite.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexSemidefinite.jl) |
| [NonconvexMultistart.jl](https://github.com/JuliaNonconvex/NonconvexMultistart.jl) | Multi-start optimization algorithms | [](https://github.com/JuliaNonconvex/NonconvexMultistart.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexMultistart.jl) |
| [NonconvexBayesian.jl](https://github.com/JuliaNonconvex/NonconvexBayesian.jl) | Constrained Bayesian optimization implementation | [](https://github.com/JuliaNonconvex/NonconvexBayesian.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexBayesian.jl) |
| [NonconvexSearch.jl](https://github.com/JuliaNonconvex/NonconvexSearch.jl) | Multi-trajectory and local search methods | [](https://github.com/JuliaNonconvex/NonconvexSearch.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexSearch.jl) |
| [NonconvexAugLagLab.jl](https://github.com/JuliaNonconvex/NonconvexAugLagLab.jl) | Experimental augmented Lagrangian package | [](https://github.com/JuliaNonconvex/NonconvexAugLagLab.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexAugLagLab.jl) |
| [NonconvexUtils.jl](https://github.com/JuliaNonconvex/NonconvexUtils.jl) | Some utility functions for automatic differentiation, history tracing, implicit functions and more. | [](https://github.com/JuliaNonconvex/NonconvexUtils.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexUtils.jl) |
| [NonconvexTOBS.jl](https://github.com/JuliaNonconvex/NonconvexTOBS.jl) | Binary optimization algorithm called "topology optimization of binary structures" ([TOBS](https://www.sciencedirect.com/science/article/abs/pii/S0168874X17305619?via%3Dihub)) which was originally developed in the context of optimal distribution of material in mechanical components. | [](https://github.com/JuliaNonconvex/NonconvexTOBS.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexTOBS.jl) |
| [NonconvexMetaheuristics.jl](https://github.com/JuliaNonconvex/NonconvexMetaheuristics.jl) | Metaheuristic gradient-free optimization algorithms as implemented in [`Metaheuristics.jl`](https://github.com/jmejia8/Metaheuristics.jl). | [](https://github.com/JuliaNonconvex/NonconvexMetaheuristics.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexMetaheuristics.jl) |
| [NonconvexNOMAD.jl](https://github.com/JuliaNonconvex/NonconvexNOMAD.jl) | [NOMAD algorithm](https://dl.acm.org/doi/10.1145/1916461.1916468) as wrapped in the [`NOMAD.jl`](https://github.com/bbopt/NOMAD.jl). | [](https://github.com/JuliaNonconvex/NonconvexNOMAD.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexNOMAD.jl) |
## Design philosophy
`Nonconvex.jl` is a Julia package that implements and wraps a number of constrained nonlinear and mixed integer nonlinear programming solvers. There are 3 focus points of `Nonconvex.jl` compared to similar packages such as `JuMP.jl` and `NLPModels.jl`:
1. Emphasis on a function-based API. Objectives and constraints are normal Julia functions.
2. The ability to nest algorithms to create more complicated algorithms.
3. The ability to automatically handle structs and different container types in the decision variables by automatically vectorizing and un-vectorizing them in an AD compatible way.
## Installing Nonconvex
To install `Nonconvex.jl`, open a Julia REPL and type `]` to enter the package mode. Then run:
```julia
add Nonconvex
```
Alternatively, copy and paste the following code to a Julia REPL:
```julia
using Pkg; Pkg.add("Nonconvex")
```
## Loading Nonconvex
To load and start using `Nonconvex.jl`, run:
```julia
using Nonconvex
```
## Quick example
```julia
using Nonconvex
Nonconvex.@load NLopt
f(x) = sqrt(x[2])
g(x, a, b) = (a*x[1] + b)^3 - x[2]
model = Model(f)
addvar!(model, [0.0, 0.0], [10.0, 10.0])
add_ineq_constraint!(model, x -> g(x, 2, 0))
add_ineq_constraint!(model, x -> g(x, -1, 1))
alg = NLoptAlg(:LD_MMA)
options = NLoptOptions()
r = optimize(model, alg, [1.0, 1.0], options = options)
r.minimum # objective value
r.minimzer # decision variables
```
## Algorithms
A summary of all the algorithms available in `Nonconvex` through different packages is shown in the table below. Scroll right to see more columns and see a description of the columns below the table.
| Algorithm name | Is meta-algorithm? | Algorithm package | Order | Finite bounds | Infinite bounds | Inequality constraints | Equality constraints | Semidefinite constraints | Integer variables |
| ------- | ----------- | ----- | -------- | -------- | -------- | -------- | -------- | -------- | -------- |
| Method of moving asymptotes (MMA) | ❌ | `NonconvexMMA.jl` (pure Julia) or `NLopt.jl` | 1 | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ |
| Primal dual interior point method | ❌ | `Ipopt.jl` | 1 or 2 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| DIviding RECTangles algorithm (DIRECT) | ❌ | `NLopt.jl` | 0 | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ |
| Controlled random search (CRS) | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Multi-Level Single-Linkage (MLSL) | Limited | `NLopt.jl` | Depends on sub-solver | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| StoGo | ❌ | `NLopt.jl` | 1 | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| AGS | ❌ | `NLopt.jl` | 0 | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ |
| Improved Stochastic Ranking Evolution Strategy (ISRES) | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| ESCH | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| COBYLA | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| BOBYQA | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| NEWUOA | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Principal AXIS (PRAXIS) | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Nelder Mead | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Subplex | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| CCSAQ | ❌ | `NLopt.jl` | 1 | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ |
| SLSQP | ❌ | `NLopt.jl` | 1 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| TNewton | ❌ | `NLopt.jl` | 1 | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Shifted limited-memory variable-metric | ❌ | `NLopt.jl` | 1 | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Augmented Lagrangian in `NLopt` | Limited | `NLopt.jl` | Depends on sub-solver | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Augmented Lagrangian in `Percival` | ❌ | `Percival.jl` | 1 or 2 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Multiple trajectory search | ❌ | `NonconvexSearch.jl` | 0 | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Branch and bound for mixed integer nonlinear programming | ❌ | `Juniper.jl` | 1 or 2 | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ |
| Sequential polyhedral outer-approximations for mixed integer nonlinear programming | ❌ | `Pavito.jl` | 1 or 2 | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ |
| Evolutionary centers algorithm (ECA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Differential evolution (DE) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Particle swarm optimization (PSO) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Artificial bee colony (ABC) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Gravitational search algorithm (GSA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Simulated annealing (SA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Whale optimization algorithm (WOA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Machine-coded compact genetic algorithm (MCCGA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Genetic algorithm (GA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Nonlinear optimization with the MADS algorithm (NOMAD) | ❌ | `NOMAD.jl` | 0 | ✅ | ✅ | ✅ | Limited | ❌ | ✅ |
| Topology optimization of binary structures (TOBS) | ❌ | `NonconvexTOBS.jl` | 1 | Binary | ❌ | ✅ | ❌ | ❌ | Binary |
| Hyperband | ✅ | `Hyperopt.jl` | Depends on sub-solver | ✅ | ❌ | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver |
| Random search | ✅ | `Hyperopt.jl` | Depends on sub-solver | ✅ | ❌ | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver |
| Latin hypercube search | ✅ | `Hyperopt.jl` | Depends on sub-solver | ✅ | ❌ | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver |
| Surrogate assisted optimization | ✅ | `NonconvexBayesian.jl` | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver |
| Log barrier method for nonlinear semidefinite constraint handling | ✅ | `NonconvexSemidefinite.jl` | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | ✅ | Depends on sub-solver |
The following is an explanation of all the columns in the table:
- Algorithm name. This is the name of the algorithm and/or its acronym. Some algorithms have multiple variants implemented in their respective packages. When that's the case, the whole family of algorithms is mentioned only once.
- Is meta-algorithm? Some algorithms are meta-algorithms that call a sub-algorithm to do the optimization after transforming the problem. In this case, a lot of the properties of the meta-algorithm are inherited from the sub-algorithm. So if the sub-algorithm requires gradients or Hessians of functions in the model, the meta-algorithm will also require gradients and Hessians of functions in the model. Fields where the property of the meta-algorithm is inherited from the sub-solver are indicated using the "Depends on sub-solver" entry. Some algorithms in `NLopt` have a "Limited" meta-algorithm status because they can only be used to wrap algorithms from `NLopt`.
- Algorithm package. This is the Julia package that either implements the algorithm or calls it from another programming language. `Nonconvex` wraps all these packages using a consistent API while allowing each algorithm to be customized where possible and have its own set of options.
- Order. This is the order of the algorithm. Zero-order algorithms only require the evaluation of the objective and constraint functions, they don't require any gradients or Hessians of objective and constraint functions. First-order algorithms require both the value and gradients of objective and/or constraint functions. Second-order algorithms require the value, gradients and Hessians of objective and/or constraint functions.
- Finite bounds. This is true if the algorithm supports finite lower and upper bound constraints on the decision variables. One special case is the `TOBS` algorithm which only supports binary decision variables so an entry of "Binary" is used instead of true/false.
- Infinite bounds. This is true if the algorithm supports unbounded decision variables either from below, above or both.
- Inequality constraints. This is true if the algorithm supports nonlinear inequality constraints.
- Equality constraints. This is true if the algorithm supports nonlinear equality constraints. Algorithms that only support linear equality constraints are given an entry of "Limited".
- Semidefinite constraints. This is true if the algorithm supports nonlinear semidefinite constraints.
- Integer variables. This is true if the algorithm supports integer/discrete/binary decision variables, not just continuous. One special case is the `TOBS` algorithm which only supports binary decision variables so an entry of "Binary" is used instead of true/false.
## How to contribute?
**A beginner?** The easiest way to contribute is to read the documentation, test the package and report issues.
**An impulsive tester?** Improving the test coverage of any package is another great way to contribute to the `JuliaNonconvex` org. Check the coverage report of any of the packages above by clicking the coverage badge. Find the red lines in the report and figure out tests that would cover these lines of code.
**An algorithm head?** There are plenty of optimization algorithms that can be implemented and interfaced in `Nonconvex.jl`. You could be developing the next big nonconvex semidefinite programming algorithm right now! Or the next constraint handling method for evolutionary algorithms!
**A hacker?** Let's figure out how to wrap some optimization package in Julia in the unique, simple and nimble `Nonconvex.jl` style.
**A software designer?** Let's talk about design decisions and how to improve the modularity of the ecosystem.
You can always reach out by opening an issue.
## How to cite?
If you use Nonconvex.jl for your own research, please consider citing the following publication: Mohamed Tarek. Nonconvex.jl: A Comprehensive Julia Package for Non-Convex Optimization. 2023. doi: 10.13140/RG.2.2.36120.37121.
```
@article{MohamedTarekNonconvexjl,
doi = {10.13140/RG.2.2.36120.37121},
url = {https://rgdoi.net/10.13140/RG.2.2.36120.37121},
author = {Tarek, Mohamed},
language = {en},
title = {Nonconvex.jl: A Comprehensive Julia Package for Non-Convex Optimization},
year = {2023}
}
```
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 1576 | # `Nonconvex.jl` Documentation
`Nonconvex.jl` is a Julia package that implements and wraps a number of constrained nonlinear and mixed integer nonlinear programming solvers. There are 3 focus points of `Nonconvex.jl` compared to similar packages such as `JuMP.jl` and `NLPModels.jl`:
1. Emphasis on a function-based API. Objectives and constraints are normal Julia functions.
2. The ability to nest algorithms to create more complicated algorithms.
3. The ability to automatically handle structs and different container types in the decision variables by automatically vectorizing and un-vectorizing them in an AD compatible way.
## Installing Nonconvex
To install `Nonconvex.jl`, open a Julia REPL and type `]` to enter the package mode. Then run:
```julia
add Nonconvex
```
Alternatively, copy and paste the following code to a Julia REPL:
```julia
using Pkg; Pkg.add("Nonconvex")
```
## Loading Nonconvex
To load and start using `Nonconvex.jl`, run:
```julia
using Nonconvex
```
## Quick start
```julia
using Nonconvex
Nonconvex.@load NLopt
f(x) = sqrt(x[2])
g(x, a, b) = (a*x[1] + b)^3 - x[2]
model = Model(f)
addvar!(model, [0.0, 0.0], [10.0, 10.0])
add_ineq_constraint!(model, x -> g(x, 2, 0))
add_ineq_constraint!(model, x -> g(x, -1, 1))
alg = NLoptAlg(:LD_MMA)
options = NLoptOptions()
r = optimize(model, alg, [1.0, 1.0], options = options)
r.minimum # objective value
r.minimizer # decision variables
```
## Table of contents
```@contents
Pages = ["problem/problem.md", "algorithms/algorithms.md", "gradients/gradients.md", "result.md"]
Depth = 3
```
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 768 | # Optimization result
Each algorithm is free to return a different result type from the `optimize` function. However, all the result types have 2 fields:
- `result.minimum`: stores the minimum objective value reached in the optimization
- `result.minimizer`: stores the optimal decision variables reached during optimization
Some result types store additional information returned by the solver, e.g. the convergence status. Please explore the fields of the `result` output from `optimize` and/or check the documentation of the individual algorithms in the [algorithms section](algorithms/algorithms.md) of the documentation. If you have further questions, feel free to open issues in the [`Nonconvex.jl` repository](https://github.com/JuliaNonconvex/Nonconvex.jl).
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 15114 | # Algorithms
## Overview of algorithms
A summary of all the algorithms available in `Nonconvex` through different packages is shown in the table below. Scroll right to see more columns and see a description of the columns below the table.
| Algorithm name | Is meta-algorithm? | Algorithm package | Order | Finite bounds | Infinite bounds | Inequality constraints | Equality constraints | Semidefinite constraints | Integer variables |
| ------- | ----------- | ----- | -------- | -------- | -------- | -------- | -------- | -------- | -------- |
| Method of moving asymptotes (MMA) | ❌ | `NonconvexMMA.jl` (pure Julia) or `NLopt.jl` | 1 | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ |
| Primal dual interior point method | ❌ | `Ipopt.jl` | 1 or 2 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| DIviding RECTangles algorithm (DIRECT) | ❌ | `NLopt.jl` | 0 | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ |
| Controlled random search (CRS) | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Multi-Level Single-Linkage (MLSL) | Limited | `NLopt.jl` | Depends on sub-solver | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| StoGo | ❌ | `NLopt.jl` | 1 | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| AGS | ❌ | `NLopt.jl` | 0 | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ |
| Improved Stochastic Ranking Evolution Strategy (ISRES) | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| ESCH | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| COBYLA | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| BOBYQA | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| NEWUOA | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Principal AXIS (PRAXIS) | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Nelder Mead | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Subplex | ❌ | `NLopt.jl` | 0 | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
| CCSAQ | ❌ | `NLopt.jl` | 1 | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ |
| SLSQP | ❌ | `NLopt.jl` | 1 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| TNewton | ❌ | `NLopt.jl` | 1 | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Shifted limited-memory variable-metric | ❌ | `NLopt.jl` | 1 | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ |
| Augmented Lagrangian in `NLopt` | Limited | `NLopt.jl` | Depends on sub-solver | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Augmented Lagrangian in `Percival` | ❌ | `Percival.jl` | 1 or 2 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Multiple trajectory search | ❌ | `NonconvexSearch.jl` | 0 | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Branch and bound for mixed integer nonlinear programming | ❌ | `Juniper.jl` | 1 or 2 | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ |
| Sequential polyhedral outer-approximations for mixed integer nonlinear programming | ❌ | `Pavito.jl` | 1 or 2 | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ |
| Evolutionary centers algorithm (ECA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Differential evolution (DE) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Particle swarm optimization (PSO) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Artificial bee colony (ABC) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Gravitational search algorithm (GSA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Simulated annealing (SA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Whale optimization algorithm (WOA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Machine-coded compact genetic algorithm (MCCGA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Genetic algorithm (GA) | ❌ | `Metaheuristics.jl` | 0 | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ |
| Nonlinear optimization with the MADS algorithm (NOMAD) | ❌ | `NOMAD.jl` | 0 | ✅ | ✅ | ✅ | Limited | ❌ | ✅ |
| Topology optimization of binary structures (TOBS) | ❌ | `NonconvexTOBS.jl` | 1 | Binary | ❌ | ✅ | ❌ | ❌ | Binary |
| Hyperband | ✅ | `Hyperopt.jl` | Depends on sub-solver | ✅ | ❌ | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver |
| Random search | ✅ | `Hyperopt.jl` | Depends on sub-solver | ✅ | ❌ | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver |
| Latin hypercube search | ✅ | `Hyperopt.jl` | Depends on sub-solver | ✅ | ❌ | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver |
| Surrogate assisted optimization | ✅ | `NonconvexBayesian.jl` | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver |
| Log barrier method for nonlinear semidefinite constraint handling | ✅ | `NonconvexSemidefinite.jl` | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | Depends on sub-solver | ✅ | Depends on sub-solver |
The following is an explanation of all the columns in the table:
- Algorithm name. This is the name of the algorithm and/or its acronym. Some algorithms have multiple variants implemented in their respective packages. When that's the case, the whole family of algorithms is mentioned only once.
- Is meta-algorithm? Some algorithms are meta-algorithms that call a sub-algorithm to do the optimization after transforming the problem. In this case, a lot of the properties of the meta-algorithm are inherited from the sub-algorithm. So if the sub-algorithm requires gradients or Hessians of functions in the model, the meta-algorithm will also require gradients and Hessians of functions in the model. Fields where the property of the meta-algorithm is inherited from the sub-solver are indicated using the "Depends on sub-solver" entry. Some algorithms in `NLopt` have a "Limited" meta-algorithm status because they can only be used to wrap algorithms from `NLopt`.
- Algorithm package. This is the Julia package that either implements the algorithm or calls it from another programming language. `Nonconvex` wraps all these packages using a consistent API while allowing each algorithm to be customized where possible and have its own set of options.
- Order. This is the order of the algorithm. Zero-order algorithms only require the evaluation of the objective and constraint functions, they don't require any gradients or Hessians of objective and constraint functions. First-order algorithms require both the value and gradients of objective and/or constraint functions. Second-order algorithms require the value, gradients and Hessians of objective and/or constraint functions.
- Finite bounds. This is true if the algorithm supports finite lower and upper bound constraints on the decision variables. One special case is the `TOBS` algorithm which only supports binary decision variables so an entry of "Binary" is used instead of true/false.
- Infinite bounds. This is true if the algorithm supports unbounded decision variables either from below, above or both.
- Inequality constraints. This is true if the algorithm supports nonlinear inequality constraints.
- Equality constraints. This is true if the algorithm supports nonlinear equality constraints. Algorithms that only support linear equality constraints are given an entry of "Limited".
- Semidefinite constraints. This is true if the algorithm supports nonlinear semidefinite constraints.
- Integer variables. This is true if the algorithm supports integer/discrete/binary decision variables, not just continuous. One special case is the `TOBS` algorithm which only supports binary decision variables so an entry of "Binary" is used instead of true/false.
## Wrapper packages
The `JuliaNonconvex` organization hosts a number of packages which wrap other optimization packages in Julia or implement their algorithms. The correct wrapper package is loaded using the `Nonconvex.@load` macro with the algorithm or package name. The following is a summary of all the wrapper packages in the `JuliaNonconvex` organization. To view the documentation of each package, click on the blue docs badge in the last column.
| Package | Description | Tests | Coverage | Docs |
| ------- | ----------- | ----- | -------- | -------- |
| [NonconvexMMA.jl](https://github.com/JuliaNonconvex/NonconvexMMA.jl) | Method of moving asymptotes implementation in pure Julia | [](https://github.com/JuliaNonconvex/NonconvexMMA.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexMMA.jl) | [](mma.md) |
| [NonconvexIpopt.jl](https://github.com/JuliaNonconvex/NonconvexIpopt.jl) | [Ipopt.jl](https://github.com/jump-dev/Ipopt.jl) wrapper | [](https://github.com/JuliaNonconvex/NonconvexIpopt.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexIpopt.jl) | [](ipopt.md) |
| [NonconvexNLopt.jl](https://github.com/JuliaNonconvex/NonconvexNLopt.jl) | [NLopt.jl](https://github.com/JuliaOpt/NLopt.jl) wrapper | [](https://github.com/JuliaNonconvex/NonconvexNLopt.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexNLopt.jl) | [](nlopt.md) |
| [NonconvexPercival.jl](https://github.com/JuliaNonconvex/NonconvexPercival.jl) | [Percival.jl](https://github.com/JuliaSmoothOptimizers/Percival.jl) wrapper (an augmented Lagrangian algorithm implementation) | [](https://github.com/JuliaNonconvex/NonconvexPercival.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexPercival.jl) | [](auglag.md) |
| [NonconvexJuniper.jl](https://github.com/JuliaNonconvex/NonconvexJuniper.jl) | [Juniper.jl](https://github.com/lanl-ansi/Juniper.jl) wrapper | [](https://github.com/JuliaNonconvex/NonconvexJuniper.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexJuniper.jl) | [](minlp.md) |
| [NonconvexPavito.jl](https://github.com/JuliaNonconvex/NonconvexPavito.jl) | [Pavito.jl](https://github.com/jump-dev/Pavito.jl) wrapper | [](https://github.com/JuliaNonconvex/NonconvexPavito.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexPavito.jl) | [](minlp.md) |
| [NonconvexSemidefinite.jl](https://github.com/JuliaNonconvex/NonconvexSemidefinite.jl) | Nonlinear semi-definite programming algorithm | [](https://github.com/JuliaNonconvex/NonconvexSemidefinite.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexSemidefinite.jl) | [](sdp.md) |
| [NonconvexMultistart.jl](https://github.com/JuliaNonconvex/NonconvexMultistart.jl) | Multi-start optimization algorithms | [](https://github.com/JuliaNonconvex/NonconvexMultistart.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexMultistart.jl) | [](hyperopt.md) |
| [NonconvexBayesian.jl](https://github.com/JuliaNonconvex/NonconvexBayesian.jl) | Constrained Bayesian optimization implementation | [](https://github.com/JuliaNonconvex/NonconvexBayesian.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexBayesian.jl) | [](surrogate.md) |
| [NonconvexSearch.jl](https://github.com/JuliaNonconvex/NonconvexSearch.jl) | Multi-trajectory and local search methods | [](https://github.com/JuliaNonconvex/NonconvexSearch.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexSearch.jl) | [](mts.md) |
| [NonconvexTOBS.jl](https://github.com/JuliaNonconvex/NonconvexTOBS.jl) | Binary optimization algorithm called "topology optimization of binary structures" ([TOBS](https://www.sciencedirect.com/science/article/abs/pii/S0168874X17305619?via%3Dihub)) which was originally developed in the context of optimal distribution of material in mechanical components. | [](https://github.com/JuliaNonconvex/NonconvexTOBS.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexTOBS.jl) | [](tobs.md) |
| [NonconvexMetaheuristics.jl](https://github.com/JuliaNonconvex/NonconvexMetaheuristics.jl) | Metaheuristic gradient-free optimization algorithms as implemented in [`Metaheuristics.jl`](https://github.com/jmejia8/Metaheuristics.jl). | [](https://github.com/JuliaNonconvex/NonconvexMetaheuristics.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexMetaheuristics.jl) | [](metaheuristics.md) |
| [NonconvexNOMAD.jl](https://github.com/JuliaNonconvex/NonconvexNOMAD.jl) | [NOMAD algorithm](https://dl.acm.org/doi/10.1145/1916461.1916468) as wrapped in the [`NOMAD.jl`](https://github.com/bbopt/NOMAD.jl). | [](https://github.com/JuliaNonconvex/NonconvexNOMAD.jl/actions) | [](https://codecov.io/gh/JuliaNonconvex/NonconvexNOMAD.jl) | [](nomad.md) |
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 2068 | # Augmented Lagrangian algorithm in pure Julia
## Description
[Percival.jl](https://github.com/JuliaSmoothOptimizers/Percival.jl) is a pure Julia implementation of the augmented Lagrangian algorithm. Both first and second order versions of the algorithm are available.
## Quick start
Given a model `model` and an initial solution `x0`, the following can be used to optimize the model using Percival.
```julia
using Nonconvex
Nonconvex.@load Percival
alg = AugLag()
options = AugLagOptions()
result = optimize(model, alg, x0, options = options)
```
Percival is an optional dependency of Nonconvex so you need to import it in order to use it.
## Construct an instance
To construct an instance of the Ipopt algorithm, use:
```julia
alg = AugAlg()
```
## Options
The options keyword argument to the `optimize` function shown above must be an instance of the `AugLagOptions` struct when the algorihm is an `AugLag`. To specify options use keyword arguments in the constructor of `AugLagOptions`, e.g:
```julia
options = AugLagOptions(first_order = false, rtol = 1e-4)
```
The most important option is `first_order` which is `true` by default. When `first_order` is `true`, the first order augmented Lagrangian algorithm will be used. And when it is `false`, the second order augmented Lagrangian algorithm will be used. Other arguments include:
- `atol`: absolute tolerance in the subproblem optimizer
- `rtol`: relative tolerance in the subproblem optimizer
- `ctol`: absolute feasibility tolerance
- `max_iter`: maximum number of iterations
- `max_time`: maximum time in seconds
- `max_eval`: maximum number of function evaluations
When using the first order augmented Lagrangian and a block constraint (i.e. a constraint function that returns a vector), the use of reverse-mode AD will only require calling the adjoint operator of the block constraint function in order to compute the gradient of the augmented Lagrangian. This is particularly suitable for constraint functions whose Jacobians are expensive but the adjoint operator is relatively inexpensive.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 3686 | # Multi-start and hyper-parameter optimization in pure Julia
## Description
[Hyperopt.jl](https://github.com/baggepinnen/Hyperopt.jl) is a Julia library that implements a number of hyperparameter optimization algorithms which can be used to optimize the starting point of the optimization. `NonconvexHyperopt.jl` allows the use of the algorithms in `Hyperopt.jl` as meta-algorithms using the `HyperoptAlg` struct.
## Quick start
Given a model `model` and an initial solution `x0`, the following can be used to optimize the model using Hyperopt.
```julia
using Nonconvex
Nonconvex.@load Hyperopt
alg = HyperoptAlg(IpoptAlg())
options = HyperoptOptions(sub_options = IpoptOptions(), sampler = GPSampler())
result = optimize(model, alg, x0, options = options)
```
Hyperopt is an optional dependency of Nonconvex so you need to import it in order to use it. `HyperoptAlg` can wrap any other algorithm in Nonconvex, e.g. `IpoptAlg()`. When the algorithm is a `HyperoptAlg`, the `options` keyword argument must of type `HyperoptOptions`. For more on the options available see below.
## Construct an instance
To construct an instance of the Hyperopt + Ipopt algorithm, use:
```julia
alg = HyperoptAlg(IpoptAlg())
```
`HyperoptAlg` can wrap any other algorithm in Nonconvex, e.g. `NLoptAlg(:LD_MMA)` or `AugLag()`.
## Options
The options keyword argument to the `optimize` function shown above must be an instance of the `HyperoptOptions` struct when the algorihm is a `HyperoptAlg`. To specify options, use keyword arguments in the constructor of `HyperoptOptions`. The `sampler` keyword argument determines the sampling algorithm used to propose new starting points in the multi-start procedure. The `sub_options` keyword argument can be used to pass in the options for the sub-optimizer. There are 2 different ways to pass the sub-options depending on the sampler type.
The `sampler` argument can be of type:
1. `RandomSampler`
2. `LHSampler`
3. `CLHSampler`
4. `GPSampler`
5. `Hyperband`
When optimizing the starting point, the upper and lower bounds on the initial solution must be finite, or finite bounds must be passed in to the `options` constructor. All the options that can be passed to the `HyperoptOptions` constructor are listed below:
```@docs
NonconvexMultistart.HyperoptOptions
```
### Sampler choice
#### RandomSampler, LHSampler, CLHSampler and GPSampler
All the sampler constructors are functions defined in Nonconvex wrapping the Hyperopt alternatives to define defaults. For `GPSampler`, `Hyperopt.Min` is always used by default in Nonconvex so you should not pass this argument. All the other arguments that can be passed to the sampler constructor can be found in the [Hyperopt documentation](https://github.com/baggepinnen/Hyperopt.jl#details). Example:
```julia
options = HyperoptOptions(sub_options = IpoptOptions(), sampler = GPSampler())
```
#### Hyperband
The [Hyperband algorithm](https://github.com/baggepinnen/Hyperopt.jl#hyperband) in Hyperopt requires a different way to pass in the sub-options. The Hyperband algorithm tries to optimize the allocation of resources. The `sub_options` argument must be a function with input as the "resources" and output as the sub-solver options. The `Hyperband` constructor accepts 3 arguments:
1. The maximum resources `R`
2. `η` which roughly determines the proportion of trials discarded between each round of successive halving
3. `inner` which specifies an inner sampler of type `RandomSampler`, `LHSampler` or `CLHSampler`.
Example:
```julia
options = HyperoptOptions(
sub_options = max_iter -> IpoptOptions(max_iter = max_iter),
sampler = Hyperband(R=100, η=3, inner=RandomSampler()),
)
```
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 3650 | # Interior point method using `Ipopt.jl`
## Description
[Ipopt](https://coin-or.github.io/Ipopt) is a well known interior point optimizer developed and maintained by COIN-OR. The Julia wrapper of Ipopt is [Ipopt.jl](https://github.com/jump-dev/Ipopt.jl). `Ipopt.jl` is wrapped in `NonconvexIpopt.jl`. `NonconvexIpopt` allows the use of `Ipopt.jl` using the `IpoptAlg` algorithm struct. `IpoptAlg` can be used as a second order optimizer computing the Hessian of the Lagrangian in every iteration. Alternatively, an [l-BFGS approximation](https://en.wikipedia.org/wiki/Limited-memory_BFGS) of the Hessian can be used instead turning `IpoptAlg` into a first order optimizer tha only requires the gradient of the Lagrangian.
## Quick start
Given a model `model` and an initial solution `x0`, the following can be used to optimize the model using Ipopt.
```julia
using Nonconvex
Nonconvex.@load Ipopt
alg = IpoptAlg()
options = IpoptOptions()
result = optimize(model, alg, x0, options = options)
```
## Construct an instance
To construct an instance of the Ipopt algorithm, use:
```julia
alg = IpoptAlg()
```
## Options
The options keyword argument to the `optimize` function shown above must be an instance of the `IpoptOptions` struct when the algorihm is an `IpoptAlg`. To specify options use keyword arguments in the constructor of `IpoptOptions`, e.g:
```julia
options = IpoptOptions(first_order = false, tol = 1e-4, sparse = false, symbolic = false)
```
There are 4 important and special options:
- `first_order`: `true` by default. When `first_order` is `true`, the first order Ipopt algorithm will be used. And when it is `false`, the second order Ipopt algorithm will be used.
- `symbolic`: `false` by default. When `symbolic` is set to `true`, the gradients, Jacobians and Hessians of the objective, constraint and Lagrangian functions will be calculated using symbolic differentiation from [`Symbolics.jl`](https://github.com/JuliaSymbolics/Symbolics.jl). This is the same approach used by `symbolify` which is described in the [symbolic differentiation section](../gradients/symbolic.md) in the documentation.
- `sparse`: `false` by default. When `sparse` is set to `true`, the gradients, Jacobians and Hessians of the objective, constraint and Lagrangian functions will be treated as sparse vectors/matrices. When combined with `symbolic = true`, the output of symbolic differentiation will be a sparse vector/matrix, akin to setting `sparse = true` in the `symbolify` function discussed in [symbolic differentiation section](../gradients/symbolic.md) in the documentation. When used alone with `symbolic = false`, [`SparseDiffTools.jl`](https://github.com/JuliaDiff/SparseDiffTools.jl) is used instead for the differentiation and `Symbolics` is only used to get the sparsity pattern, much like how `sparsify` works. For more details on `sparsify` and the way `SparseDiffTools` works, see the [sparsity section](../gradients/sparse.md) in the documentation is used instead.
- `linear_constraints`: `false` by default. When `linear_constraints` is `true`, the Jacobian of the constraints will be computed and sparsified once at the beginning. When it is `false`, dense Jacobians will be computed in every iteration.
> Note that there is no need to use `sparsify` or `symbolify` on the model or functions before optimizing it with an `IpoptAlg`. Setting the `sparse` and `symbolic` options above are enough to trigger the symbolic differentiation and/or sparsity exploitation.
All the other options that can be set can be found on the [Ipopt options](https://coin-or.github.io/Ipopt/OPTIONS.html) section of Ipopt's documentation.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 4225 | # A collection of meta-heuristic algorithms in pure Julia
## Description
[Metaheuristics.jl](https://github.com/jmejia8/Metaheuristics.jl) is an optimization library with a collection of [metaheuristic optimization algorithms](https://en.wikipedia.org/wiki/Metaheuristic) implemented. `NonconvexMetaheuristics.jl` allows the use of all the algorithms in the `Metaheuristics.jl` using the `MetaheuristicsAlg` struct.
The main advantage of metaheuristic algorithms is that they don't require the objective and constraint functions to be differentiable. One advantage of the `Metaheuristics.jl` package compared to other black-box optimization or metaheuristic algorithm packages is that a large number of the algorithms implemented in `Metaheuristics.jl` support bounds, inequality and equality constraints using constraint handling techniques for metaheuristic algorithms.
## Supported algorithms
`Nonconvex.jl` only supports the single objective optimization algorithms in `Metaheuristics.jl`. The following algorithms are supported:
- Evolutionary Centers Algorithm (`ECA`)
- Differential Evolution (`DE`)
- Differential Evolution (`PSO`)
- Artificial Bee Colony (`ABC`)
- Gravitational Search Algorithm (`CGSA`)
- Simulated Annealing (`SA`)
- Whale Optimization Algorithm (`WOA`)
- Machine-coded Compact Genetic Algorithm (`MCCGA`)
- Genetic Algorithm (`GA`)
For a summary of the strengths and weaknesses of each algorithm above, please refer to the table in the [algorithms page](https://jmejia8.github.io/Metaheuristics.jl/dev/algorithms/) in the `Metaheuristics` documentation. To define a `Metaheuristics` algorithm, you can use the `MetaheuristicsAlg` algorithm struct which wraps one of the above algorithm types, e.g. `MetaheuristicsAlg(ECA)` or `MetaheuristicsAlg(DE)`.
## Quick start
Given a model `model` and an initial solution `x0`, the following can be used to optimize the model using `Metaheuristics`.
```julia
using Nonconvex
Nonconvex.@load Metaheuristics
alg = MetaheuristicsAlg(ECA)
options = MetaheuristicsOptions(N = 1000) # population size
result = optimize(model, alg, x0, options = options)
```
`Metaheuristics` is an optional dependency of Nonconvex so you need to load the package to be able to use it.
## Options
The options keyword argument to the `optimize` function shown above must be an instance of the `MetaheuristicsOptions` struct when the algorihm is a `MetaheuristicsAlg`. To specify options use keyword arguments in the constructor of `MetaheuristicsOptions`, e.g:
```julia
options = MetaheuristicsOptions(N = 1000)
```
All the other options that can be set for each algorithm can be found in the [algorithms section](https://jmejia8.github.io/Metaheuristics.jl/dev/algorithms/) of the documentation of `Metaheuristics.jl`. Note that one notable difference between using `Metaheuristics` directly and using it through `Nonconvex` is that in `Nonconvex`, all the options must be passed in through the `options` struct and only the algorithm type is part of the `alg` struct.
## Variable bounds
When using `Metaheuristics` algorithms, finite variables bounds are necessary. This is because the initial population is sampled randomly in the finite interval of each variable. Use of `Inf` as an upper bound or `-Inf` is therefore not acceptable.
## Initialization
Most metaheuristic algorithms are population algorithms which can accept multiple initial solutions to be part of the initial population. In `Nonconvex`, you can specify multiple initial solutions by making `x0` a vector of solutions. However, since `Nonconvex` models support arbitrary collections as decision variables, you must specify that the `x0` passed in is indeed a population of solutions rather than a single solution that's a vector of vectors for instance. To specify that `x0` is a vector of solutions, you can set the `multiple_initial_solutions` option to `true` in the `options` struct, e.g:
```julia
options = MetaheuristicsOptions(N = 1000, multiple_initial_solutions = true)
x0 = [[1.0, 1.0], [0.0, 0.0]]
```
When fewer solutions are passed in `x0` compared to the population size, random initial solutions between the lower and upper bounds are sampled to complete the initial population.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 4200 | # First and second order mixed integer nonlinear programming algorithms
## Description
There are 2 first and second order MINLP solvers available in `Nonconvex`:
1. [Juniper.jl](https://github.com/lanl-ansi/Juniper.jl) with [Ipopt.jl](https://github.com/jump-dev/Ipopt.jl) as a sub-solver. `NonconvexJuniper.jl` allows the use of the branch and bound algorithm in `Juniper.jl` using the `JuniperIpoptAlg` struct.
2. [Pavito.jl](https://github.com/jump-dev/Pavito.jl) with [Ipopt.jl](https://github.com/jump-dev/Ipopt.jl) and [Cbc.jl](https://github.com/jump-dev/Cbc.jl) as sub-solvers. `NonconvexPavito.jl` allows the use of the sequential polyhedral outer-approximations algorithm in `Pavito.jl` using the `PavitoIpoptCbcAlg` struct.
## Juniper + Ipopt
### Quick start
Given a model `model` and an initial solution `x0`, the following can be used to optimize the model using Juniper and Ipopt.
```julia
using Nonconvex
Nonconvex.@load Juniper
alg = JuniperIpoptAlg()
options = JuniperIpoptOptions()
result = optimize(model, alg, x0, options = options)
```
Juniper is an optional dependency of Nonconvex, so you need to load it in order to use it. Note that the integer constraints must be specified when defining variables. See the [problem definition](../problem/problem.md) documentation for more details.
### Construct an instance
To construct an instance of the Juniper + Ipopt algorithm, use:
```julia
alg = JuniperIpoptAlg()
```
### Options
The options keyword argument to the `optimize` function shown above must be an instance of the `JuniperIpoptOptions` struct when the algorihm is a `JuniperIpoptAlg`. To specify options use, keyword arguments in the constructor of `JuniperIpoptOptions`, e.g:
```julia
options = JuniperIpoptOptions(first_order = false, linear_constraints = true, subsolver_options = IpoptOptions(), atol = 1e-4)
```
There are 3 important and special options you can pass to the optimizer:
- `first_order`: `true` by default. When `first_order` is `true`, the first order Ipopt algorithm will be used. And when it is `false`, the second order Ipopt algorithm will be used.
- `linear_constraints`: `false` by default. When `linear_constraints` is `true`, the Jacobian of the constraints will be computed and sparsified once at the beginning. When it is `false`, dense Jacobians will be computed in every iteration.
- `subsolver_options`: an instance of `IpoptOptions` to be used in the Ipopt sub-solver.
All the other options to Juniper can be found in the [Juniper documentation](https://lanl-ansi.github.io/Juniper.jl/stable/options/).
## Pavito + Ipopt + Cbc
### Quick start
Given a model `model` and an initial solution `x0`, the following can be used to optimize the model using Juniper and Ipopt.
```julia
using Nonconvex
Nonconvex.@load Pavito
alg = PavitoIpoptCbcAlg()
options = PavitoIpoptCbcOptions()
result = optimize(model, alg, x0, options = options)
```
Pavito is an optional dependency of Nonconvex, so you need to load it in order to use it. Note that the integer constraints must be specified when defining variables. See the [problem definition](../problem/problem.md) documentation for more details.
### Construct an instance
To construct an instance of the Pavito + Ipopt + Cbc algorithm, use:
```julia
alg = PavitoIpoptCbcAlg()
```
### Options
The options keyword argument to the `optimize` function shown above must be an instance of `PavitoIpoptCbcOptions` struct when the algorithm is a `PavitoIpoptCbcAlg`. To specify options, use keyword arguments in the constructor of `JuniperIpoptOptions` or `PavitoIpoptCbcOptions`, e.g:
```julia
options = PavitoIpoptCbcOptions(first_order = false, subsolver_options = IpoptOptions(), timeout = 120.0)
```
There are 2 important and special options you can pass to the optimizer:
- `first_order`: `true` by default. When `first_order` is `true`, the first order Ipopt algorithm will be used. And when it is `false`, the second order Ipopt algorithm will be used.
- `subsolver_options`: an instance of `IpoptOptions` to be used in the Ipopt sub-solver.
All the other options to Pavito can be found in the [Pavito documentation](https://github.com/jump-dev/Pavito.jl#pavito-solver-options).
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 2033 | # Method of moving asymptotes in pure Julia
## Description
There are 2 versions of the method of moving asymptotes (MMA) that are available in `NonconvexMMA.jl`:
1. The original MMA algorithm from the [1987 paper](https://onlinelibrary.wiley.com/doi/abs/10.1002/nme.1620240207).
2. The globally convergent MMA (GCMMA) algorithm from the [2002 paper](https://epubs.siam.org/doi/abs/10.1137/S1052623499362822).
The MMA algorithm only supports inequality constraints. However, the original algorithm was slightly generalized to handle infinite variable bounds.
## Quick start
Given a model `model` and an initial solution `x0`, the following can be used to optimize the model using MMA.
```julia
using Nonconvex
Nonconvex.@load MMA
alg = MMA87() # or MMA02()
options = MMAOptions()
result = optimize(model, alg, x0, options = options, convcriteria = KKTCriteria())
```
## Construct an instance
To construct an instance of the original MMA algorithm, use:
```julia
alg = MMA87()
```
or alternatively:
```julia
alg = MMA()
```
To construct an instance of the globally convergent MMA algorithm, use:
```julia
alg = MMA02()
```
or alternatively:
```julia
alg = GCMMA()
```
```@docs
MMA87
MMA02
```
## Options
To specify options for the MMA algorithm, you can construct an instance of `MMAOptions` and use keyword arguments.
```@docs
MMAOptions
```
The `tol` option in MMA can be set to an instance of the `Tolerance` struct:
```@docs
Tolerance
NonconvexCore.ConvergenceState
```
## Convergence criteria
There are 4 convergence criteria available for the MMA algorithm:
- `GenericCriteria`
- `KKTCriteria`
- `ScaledKKTCriteria`
- `IpoptCriteria`
```@docs
NonconvexCore.ConvergenceCriteria
NonconvexCore.GenericCriteria
NonconvexCore.KKTCriteria
NonconvexCore.ScaledKKTCriteria
NonconvexCore.IpoptCriteria
NonconvexCore.assess_convergence!
```
To specify the convergence criteria, use:
```julia
converiteria = GenericCriteria()
```
replacing `GenericCriteria()` by `KKTCriteria()`, `ScaledKKTCriteria()` or `IpoptCriteria()`.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 726 | # Multi-trajectory search algorithm in pure Julia
## Description
Multiple trajectory search (MTS) is a derivative-free heuristic optimization method presented by [Lin-Yu Tseng and Chun Chen, 2008](https://sci2s.ugr.es/sites/default/files/files/TematicWebSites/EAMHCO/contributionsCEC08/tseng08mts.pdf).
The `MTS` algorithm is implemented in the `NonconvexSearch.jl` package. This module implements all the optimization methods in the paper.
## Quick start
Using default `MTSOptions()`. `MTS` is used for optimization.
```julia
using Nonconvex
Nonconvex.@load MTS
alg = MTSAlg()
LS1_options = MTSOptions()
m = Model(f)
lb = [0, 0]
ub = [5, 5]
addvar!(m, lb, ub)
result = optimize(model, alg, x0, options = options)
```
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 2303 | # Various optimization algorithms from `NLopt.jl`
## Description
[NLopt](https://github.com/stevengj/nlopt) is an optimization library with a collection of optimization algorithms implemented. [NLopt.jl](https://github.com/JuliaOpt/NLopt.jl) is the Julia wrapper of `NLopt`. `NonconvexNLopt` allows the use of `NLopt.jl` using the `NLoptAlg` algorithm struct.
## Quick start
Given a model `model` and an initial solution `x0`, the following can be used to optimize the model using NLopt.
```julia
using Nonconvex
Nonconvex.@load NLopt
alg = NLoptAlg(:LD_SLSQP)
options = NLoptOptions()
result = optimize(model, alg, x0, options = options)
```
NLopt is an optional dependency of Nonconvex so you need to load the package to be able to use it.
## Construct an instance
To construct an instance of NLopt's `NLOPT_LD_SLSQP` algorithm, use:
```julia
alg = NLoptAlg(:LD_SLSQP)
```
All the algorithms available in NLopt are:
- `:GN_DIRECT`
- `:GN_DIRECT_L`
- `:GNL_DIRECT_NOSCAL`
- `:GN_DIRECT_L_NOSCAL`
- `:GN_DIRECT_L_RAND_NOSCAL`
- `:GN_ORIG_DIRECT`
- `:GN_ORIG_DIRECT_L`
- `:GN_CRS2_LM`
- `:G_MLSL_LDS`
- `:G_MLSL`
- `:GD_STOGO`
- `:GD_STOGO_RAND`
- `:GN_AGS`
- `:GN_ISRES`
- `:GN_ESCH`
- `:LN_COBYLA`
- `:LN_BOBYQA`
- `:LN_NEWUOA`
- `:LN_NEWUOA_BOUND`
- `:LN_PRAXIS`
- `:LN_NELDERMEAD`
- `:LN_SBPLX`
- `:LD_MMA`
- `:LD_CCSAQ`
- `:LD_SLSQP`
- `:LD_TNEWTON_PRECOND_RESTART`
- `:LD_TNEWTON_PRECOND`
- `:LD_TNEWTON_RESTART`
- `:LD_TNEWTON`
- `:LD_VAR2`
- `:LD_VAR1`
- `:AUGLAG`
- `:AUGLAG_EQ`
For a description of the above algorithms, please refer to the [algorithms section](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/) of NLopt's documentation.
---
**Disclaimer:**
Not all the algorithms have been tested with `Nonconvex`. So if you try one and it doesn't work, please open an issue.
---
## Options
The options keyword argument to the `optimize` function shown above must be an instance of the `NLoptOptions` struct when the algorihm is an `NLoptAlg`. To specify options use keyword arguments in the constructor of `NLoptOptions`, e.g:
```julia
options = NLoptOptions(ftol_rel = 1e-4)
```
All the other options that can be set for each algorithm can be found in the [algorithms section](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/) section of NLopt's documentation.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 2381 | # Nonlinear optimization with the MADS (NOMAD) algorithm for continuous and discrete, constrained optimization
## Description
[NOMAD.jl](https://github.com/bbopt/NOMAD.jl) is an optimization package wrapping the C++ implementation of the [NOMAD algorithm](https://dl.acm.org/doi/10.1145/1916461.1916468). `NonconvexNOMAD` allows the use of `NOMAD.jl` using the the `NOMADAlg` struct. `NOMAD.jl` supports continuous and integer decision variables as well as bounds and inequality constraints. Linear equality constraints are also supported when no integer decision variables are in the model.
## Quick start
Given a model `model` and an initial solution `x0`, the following can be used to optimize the model using `NOMAD`.
```julia
using Nonconvex
Nonconvex.@load NOMAD
alg = NOMADAlg()
options = NOMADOptions()
result = optimize(model, alg, x0, options = options)
```
`NOMAD` is an optional dependency of Nonconvex so you need to load the package to be able to use it.
## Algorithm types
There are 3 different variants of the `NOMADAlg` struct:
- `NOMADAlg(:explicit)`
- `NOMADAlg(:progressive)`
- `NOMADAlg(:custom)`
The explicit algorithm ensures all the constraints are satisfied at all times removing any infeasible point from the population. The progressive algorithm allows infeasible points to be part of the population but enforces feasibility in a progressive manner. The custom variant allows the use of flags on each constraint to declare it as `:explicit` or `:progressive`. For instance, assume `model` is the `Nonconvex` model and `g1` and `g2` are 2 constraint functions.
```julia
add_ineq_constraint!(model, g1, flags = [:explicit])
add_ineq_constraint!(m, g2, flags = [:progressive])
```
The above code declares the first constraint as explicit and the second as progressive. In other words, every point violating the first constraint will be removed from the population but the second constraint will be more progressively enforced.
## Options
The options keyword argument to the `optimize` function shown above must be an instance of the `NOMADOptions` struct when the algorihm is a `NOMADAlg`. To specify options use keyword arguments in the constructor of `NOMADOptions`, e.g:
```julia
options = NOMADOptions()
```
All the options that can be set can be found in the [`NOMAD.jl` documentation](https://bbopt.github.io/NOMAD.jl/stable/nomadProblem/).
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 2286 | # Interior point meta-algorithm for handling nonlinear semidefinite constraints
## Description
If you need to keep your any matrix-valued function of the decision variables positive semidefinite, Nonconvex supports an interface for the [barrier method for semidefinite programming](http://eaton.math.rpi.edu/faculty/Mitchell/courses/matp6640/notes/24A_SDPbarrierbeamer.pdf), which is a meta-algorithm transforming the optimization problem to a series of nonlinear programming problems and solving them using the pre-specified `sub_alg` and `sub_options`.
## Quick start
Optimizing over a multivariate gaussian distribution with artificial samples using `Ipopt`:
```julia
using Nonconvex, Distributions
Nonconvex.@load Semidefinite Ipopt
# Draw random multivariate gaussian samples
# Random groundtruth
mat_dim = 3
μ = randn(mat_dim)
Σ = rand(mat_dim, mat_dim)
Σ = Σ + Σ' + 2I
# Generate
n_sample = 1000
samples = rand(MvNormal(μ, Σ), n_sample)
# Define objective function
function f((x_L, x_D))
return -loglikelihood(MvNormal(μ, decompress_symmetric(x_L, x_D)), samples)
end
# Define the matrix-valued function
function sd_constraint((x_L, x_D))
return decompress_symmetric(x_L, x_D)
end
# Define settings
model = Model(f)
mat_x0 = rand(mat_dim, mat_dim)
mat_x0 = mat_x0 + mat_x0' + I
x0 = [mat_x0[NonconvexSemidefinite.lowertriangind(mat_x0)], diag(mat_x0)]
lbs = [fill(-Inf, length(x0[1])), zeros(length(x0[2]))]
ubs = [fill(Inf, length(x0[1])), fill(Inf, length(x0[2]))]
addvar!(model, lbs, ubs)
add_sd_constraint!(model, sd_constraint)
alg = SDPBarrierAlg(sub_alg=IpoptAlg())
options = SDPBarrierOptions(sub_options=IpoptOptions(max_iter=200), n_iter = 20)
# Optimize
result = optimize(model, alg, x0, options = options)
# Relative error norm
norm(sd_constraint(result.minimizer) - Σ) / norm(Σ)
```
## Options
```@docs
SDPBarrierOptions
```
## Optimizer
```@docs
SDPBarrierAlg
```
## Matrix interfaces
For every `n*n` real positive semidefinite matrix that optimization objective contains, please have two inputs `x_L` and `x_D` representing the lower-triangular and the diagonal part of it. In the function, call `decompress_symmetric(x_L, x_d)` to represent that matrix, which will be handled by Nonocnvex automatically
```@docs
decompress_symmetric
```
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 5146 | # Surrogate-assisted continuous and discrete, constrained optimization
## Description
Surrogate-assisted optimization replaces expensive functions in the objecitve and/or constraints by a surrogate. In Nonconvex, a Gaussian process (GP) from [AbstractGPs.jl](https://github.com/JuliaGaussianProcesses/AbstractGPs.jl) is used. A certain amount of "benefit of the doubt" is given to solutions by minimizing:
```julia
μ(x) - η * σ(x)
```
where `μ(x)` and `σ(x)` are the mean and standard deviation of the posterior GP's prediction of the function's value at point `x`.
`η` is a positive number that resembles how much benefit of the doubt we want to give the solution.
A high `η` means more exploration and a low `η` means more exploitation.
Similarly, expensive inequality constraints are replaced by:
```julia
μ(x) - η * σ(x) <= 0
```
giving the solution the benefit of the doubt. And each equality constraint is replaced by 2 inequality constraints as such:
```julia
μ(x) - η * σ(x) <= 0 <= μ(x) + η * σ(x)
```
Once the surrogates are formed, they are solved using a sub-optimizer to get the next query point to update the surrogate model. Prior to the optimization loop, initialization is done using a number of points using a Sobol sequence of points.
## Quick start
```julia
using Nonconvex
Nonconvex.@load BayesOpt
f(x) = sqrt(x[2])
g(x, a, b) = (a*x[1] + b)^3 - x[2]
model = Model()
set_objective!(model, f, flags = [:expensive])
addvar!(model, [1e-4, 1e-4], [10.0, 10.0])
add_ineq_constraint!(model, x -> g(x, 2, 0), flags = [:expensive])
add_ineq_constraint!(model, x -> g(x, -1, 1))
alg = BayesOptAlg(IpoptAlg())
options = BayesOptOptions(
sub_options = IpoptOptions(),
maxiter = 50, ftol = 1e-4, ctol = 1e-5,
)
r = optimize(model, alg, [1.234, 2.345], options = options)
```
Note that the `flags` keyword argument was used when defining the objective and constraints and set to `[:expensive]`. This is a hint to Nonconvex to use a surrogate in place of these constraint functions.
## Construct an instance
To construct an instance of the surrogate-assisted optimization algorithm, use:
```julia
alg = BayesOptAlg(subsolver)
```
where `subsolver` is any Nonconvex optimizer to be used to solve the surrogate model.
## Options
The options keyword argument to the `optimize` function shown above must be an instance of the `BayesOptOptions` struct when the algorihm is a `BayesOptAlg`. The following options can be set using keyword arguments when constructing `BayesOptOptions`.
- `sub_options`: options for the sub-optimizer
- `maxiter`: the maximum number of iterations in the Bayesian optimization routine
- `initialize`: `true` by default. If `true`, the GP will be initialized using a Sobol sequence of query points
- `ninit`: number of initialization points
- `ctol`: feasibility tolerance when accepting a solution
- `ftol`: relative tolerance in the function value
- `postoptimize`: `true` by default. If `true`, a local optimization procedure will be used after the Bayesian optimization is completed.
- `kernel`: the GP kernel used. All the kernels from [KernelFunctions.jl](https://github.com/JuliaGaussianProcesses/KernelFunctions.jl) are available.
- `noise`: GP observation noise parameter
- `std_multiple`: `η` in the description of the algorithm above.
## Advanced: manually constructing surrogate functions
Sometimes a function used in the model may need to be replaced by a surrogate but not the entire objective or constraint function. In this case, the surrogate function can be defined explicitly and passed in to the `optimize` function using the keyword argument `surrogates`. A surrogate for the function `f` can be constructed using:
```julia
s1 = Nonconvex.surrogate(f, x0)
```
where `x0` is the initial query point. The output of `s1(x)` will be an interval from [IntervalArithmetic.jl])(https://github.com/JuliaIntervals/IntervalArithmetic.jl) with `lo` and `hi` fields, where `lo = μ(x) - η * σ(x)` and `hi = μ(x) + η σ(x)`. This interval will propagate through the objective function and/or contraint functions outputting an interval or an array of intervals at the end.
To define the objective or constraint functions using the manually contructed surrogates, one needs to return the `lo` field of the output manually at the end of the objective function or inequality constraint function definitions. Equality constraints should also be transformed to a 2-block inequality constraint manually as described above. When manually passing surrogates to the `optimize` function, the `:expensive` flag is redundant and will be ignored.
Example:
```julia
x0 = [1.234, 2.345]
s1 = Nonconvex.surrogate(f, x0)
s2 = Nonconvex.surrogate(x -> [g(x, 2, 0), g(x, -1, 1)], x0)
model = Model()
set_objective!(model, x -> s1(x).lo)
addvar!(model, [1e-4, 1e-4], [10.0, 10.0])
add_ineq_constraint!(model, x -> getproperty.(s2(x), :lo))
alg = BayesOptAlg(IpoptAlg())
options = BayesOptOptions(
sub_options = IpoptOptions(print_level = 0), maxiter = 50, ctol = 1e-4,
ninit = 2, initialize = true, postoptimize = false,
)
r = optimize(model, alg, x0, options = options, surrogates = [s1, s2])
```
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 4510 | # Topology optimization of binary structures (TOBS), a nonlinear binary optimization heuristic
## Description
The method of topology optimization of binary structures ([TOBS](https://www.sciencedirect.com/science/article/abs/pii/S0168874X17305619?via%3Dihub)) was originally developed in the context of optimal distribution of material in mechanical components. The TOBS algorithm only supports binary decision variables. The TOBS algorithm is a heuristic that relies on the sequential linearization of the objective and constraint functions, progressively enforcing the constraints in the process. The resulting binary linear program can be solved using any mixed integer linear programming (MILP) solver such `Cbc`. This process is repeated iteratively until convergence. This package implements the heuristic for binary nonlinear programming problems.
## Construct an instance
To construct an instance of the `TOBS` algorithm, use:
```julia
alg = TOBSAlg()
```
When optimizing a model using `TOBSAlg`, all the variables are assumed to be binary if their lower and upper bounds are 0 and 1 respectively even if the `isinteger` flag was not used. If there are variables with other bounds' values, the optimization will give an error.
## Example
In this example, the classic topology optimization problem of minimizing the compliance of the structure subject to a volume constraint. Begin by installing and loading the packages required.
```julia
import Nonconvex
Nonconvex.@load TOBS
using Pkg
Pkg.add("TopOpt")
using TopOpt
```
Define the problem and its parameters using [TopOpt.jl](https://github.com/JuliaTopOpt/TopOpt.jl).
```julia
E = 1.0 # Young’s modulus
v = 0.3 # Poisson’s ratio
f = 1.0 # downward force
rmin = 6.0 # filter radius
xmin = 0.001 # minimum density
V = 0.5 # maximum volume fraction
p = 3.0 # SIMP penalty
# Define FEA problem
problem_size = (160, 100) # size of rectangular mesh
x0 = fill(1.0, prod(problem_size)) # initial design
problem = PointLoadCantilever(Val{:Linear}, problem_size, (1.0, 1.0), E, v, f)
solver = FEASolver(Direct, problem; xmin=xmin)
TopOpt.setpenalty!(solver, p)
cheqfilter = DensityFilter(solver; rmin=rmin) # filter function
comp = TopOpt.Compliance(problem, solver) # compliance function
```
Define the objective and constraint functions.
```julia
obj(x) = comp(cheqfilter(x)) # compliance objective
constr(x) = sum(cheqfilter(x)) / length(x) - V # volume fraction constraint
```
Finally, define the optimization problem using `Nonconvex.jl` and optimize it.
```julia
m = Model(obj)
addvar!(m, zeros(length(x0)), ones(length(x0)))
Nonconvex.add_ineq_constraint!(m, constr)
options = TOBSOptions()
r = optimize(m, TOBSAlg(), x0; options=options)
r.minimizer
r.minimum
```
The following is a visualization of the optimization history using this example.


## Options
The following are the options that can be set by passing them to `TOBSOptions`, e.g. `TOBSOptions(movelimit = 0.1)`.
- `movelimit`: the maximum move limit in each iteration as a ratio of the total number of variables. Default value is 0.1, i.e. a maximum of 10% of the variables are allowed to flip value in each iteration.
- `convParam`: the tolerance value. The algrotihm is said to have converged if the moving average of the relative change in the objective value in the last `pastN` iterations is less than `convParam`. Default value is 0.001.
- `pastN`: the number of past iterations used to compute the moving average of the relative change in the objective value. Default value is 20.
- `constrRelax`: the amount of constraint relaxation applied to the linear approximation in each iteration. This is the relative constraint relaxation if the violation is higher than `constrRelax` and the absolute constraint relaxation otherwise. Default value is 0.1.
- `timeLimit`: the time limit (in seconds) of each MILP solve for the linearized sub-problem. Default value is 1.0.
- `optimizer`: the `JuMP` optimizer type used to solve the MILP sub-problem. Default value is `Cbc.Optimizer`.
- `maxiter`: the maximum number of iterations for the algorithm. Default value is 200.
- `timeStable`: a boolean value that when set to `true` switches on the time stability filter of the objective's gradient, discussed in the paper. Default value is `true`.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 994 | # Using ChainRules in ForwardDiff
`ForwardDiff` is a forward-mode AD package that pre-dates `ChainRules`. `ForwardDiff` therefore does not use the `frule`s defined in `ChainRules`. In order to force `ForwardDiff` to use the `frule` defined for a function, one can use the `Nonconvex.NonconvexUtils.@ForwardDiff_frule` macro provided in `Nonconvex`. This is useful in case `ForwardDiff` is used for the entire function but a component of this function has an efficient `frule` defined that you want to take advantage of. To force `ForwardDiff` to use the `frule` defined for a function `f(x::AbstractVector)`, you can use:
```julia
Nonconvex.NonconvexUtils.@ForwardDiff_frule f(x::AbstractVector{<:ForwardDiff.Dual})
```
The signature of the function specifies the method that will be re-directed to use the `frule` from `ChainRules`. Such `frule` therefore needs to be defined for `f` to begin with. `f` with multiple inputs, scalar inputs and other input collection types are also supported.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 1751 | # Gradients, Jacobians and Hessians
By default, `Nonconvex` uses:
- The reverse-mode automatic differentiation (AD) package, [`Zygote.jl`](https://github.com/FluxML/Zygote.jl), for computing gradients and Jacobians of functions, and
- The forward-mode AD package, [`ForwardDiff.jl`](https://github.com/JuliaDiff/ForwardDiff.jl), over `Zygote.jl` for computing Hessians.
However, one can force `Nonconvex` to use other AD packages or even user-defined gradients and Hessians using special function modifiers. Those special function modifiers customize the behaviour of functions without enforcing the same behaviour on other functions. For instance:
- A specific AD package can be used for one constraint function while the default AD packages are used for other functions in the optimization problem.
- The history of gradients of a specific function can be stored without storing all the gradients of all the functions.
- For functions with a sparse Jacobian or Hessian, the sparsity can be used to speedup the AD using sparse, forward-mode AD for these functions.
In some cases, function modifiers can even be composed on top of each other to create more complex behaviours.
> In `Nonconvex`, function modifiers modify the behaviour of a function when differentiated once or twice using either `ForwardDiff` or any [`ChainRules`](https://github.com/JuliaDiff/ChainRules.jl)-compatible AD package, such as `Zygote.jl`. The following features are all implemented in [`NonconvexUtils.jl`](https://github.com/JuliaNonconvex/NonconvexUtils.jl) and re-exported from `Nonconvex`.
## Table of contents
```@contents
Pages = ["user_defined.md", "other_ad.md", "chainrules_fd.md", "sparse.md", "symbolic.md", "implicit.md", "history.md"]
Depth = 3
```
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 1157 | # Storing history of gradients
Often one may want to store intermediate solutions, function values and gradients for visualisation or post-processing. This is currently not possible with `Nonconvex.jl` as not all solvers support a callback mechanism. To workround this, the `TraceFunction` modifier can be used to store input, output and optionally gradient values
during the optimization:
```julia
F = TraceFunction(f; on_call = false, on_grad = true)
```
`F` can now be used inplace of `f` in objective and/or constraint functions in a `Nonconvex` model. If the `on_call` keyword argument is set to `true` (default is `true`), the input and output values are stored every time the function `F` is called. If the `on_grad` keyword argument is set to `true` (default is `true`), the input, output and gradient values are stored every time the function `F` is differentiated with either `ForwardDiff` or any `ChainRules`-compatible AD package such as `Zygote.jl`. The history is stored in `F.trace`. The `TraceFunction` modifier can be compsed with other AD-centric function modifiers in `Nonconvex`, e.g. the `sparsify` or `symbolify` function modifiers.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 5997 | # Implicit differentiation
## Background
Differentiating implicit functions efficiently using the implicit function theorem has many applications including:
- Nonlinear partial differential equation constrained optimization
- Differentiable optimization layers in deep learning (aka deep declarative networks)
- Differentiable fixed point iteration algorithms for optimal transport (e.g. the Sinkhorn methods)
- Gradient-based bi-level and robust optimization (aka anti-optimization)
- Multi-parameteric programming (aka optimization sensitivity analysis)
For more on implicit differentation, refer to the last part of the [_Understanding automatic differentiation (in Julia)_](https://www.youtube.com/watch?v=UqymrMG-Qi4) video on YouTube and the [_Efficient and modular implicit differentiation_](https://arxiv.org/abs/2105.15183) manuscript for an introduction to the methods implemented here.
## Relationship to [`ImplicitDifferentiation.jl`](https://github.com/gdalle/ImplicitDifferentiation.jl)
[`ImplicitDifferentiation.jl`](https://github.com/gdalle/ImplicitDifferentiation.jl) is an attempt to simplify the implementation in `Nonconvex` making it more lightweight and better documented. For instance, the [documentation of `ImplicitDifferentiation`](https://gdalle.github.io/ImplicitDifferentiation.jl/) presents a number of examples of implicit functions all of which can be defined using `Nonconvex` instead.
## Explicit parameters
There are 4 components to any implicit function:
1. The parameters `p`
2. The variables `x`
3. The residual `f(p, x)` which is used to define `x(p)` as the `x` which satisfies `f(p, x) == 0` for a given value `p`
4. The algorithm used to evaluate `x(p)` satisfying the condition `f(p, x) == 0`
In order to define a differentiable implicit function using `Nonconvex`, you have to specify the "forward" algorithm which finds `x(p)`. For instance, consider the following example:
```julia
using SparseArrays, NLsolve, Zygote, Nonconvex
N = 10
A = spdiagm(0 => fill(10.0, N), 1 => fill(-1.0, N-1), -1 => fill(-1.0, N-1))
p0 = randn(N)
f(p, x) = A * x + 0.1 * x.^2 - p
function forward(p)
# Solving nonlinear system of equations
sol = nlsolve(x -> f(p, x), zeros(N), method = :anderson, m = 10)
# Return the zero found (ignore the second returned value for now)
return sol.zero, nothing
end
```
`forward` above solves for `x` in the nonlinear system of equations `f(p, x) == 0` given the value of `p`. In this case, the residual function is the same as the function `f(p, x)` used in the forward pass. One can then use the 2 functions `forward` and `f` to define an implicit function using:
```julia
imf = ImplicitFunction(forward, f)
xstar = imf(p0)
```
where `imf(p0)` solves the nonlinear system for `p = p0` and returns the zero `xstar` of the nonlinear system. This function can now be part of any arbitrary Julia function differentiated by Zygote, e.g. it can be part of an objective function in an optimization problem using gradient-based optimization:
```julia
obj(p) = sum(imf(p))
g = Zygote.gradient(obj, p0)[1]
```
In the implicit function's adjoint rule definition, the partial Jacobian `∂f/∂x` is used according to the implicit function theorem. Often this Jacobian or a good approximation of it might be a by-product of the `forward` function. For example when the `forward` function does an optimization using a BFGS-based approximation of the Hessian of the Lagrangian function, the final BFGS approximation can be a good approximation of `∂f/∂x` where the residual `f` is the gradient of the Lagrangian function wrt `x`. In those cases, this Jacobian by-product can be returned as the second argument from `forward` instead of `nothing`.
## Implicit parameters
In some cases, it may be more convenient to avoid having to specify `p` as an explicit argument in `forward` and `f`. The following is also valid to use and will give correct gradients with respect to `p`:
```julia
function obj(p)
N = length(p)
f(x) = A * x + 0.1 * x.^2 - p
function forward()
# Solving nonlinear system of equations
sol = nlsolve(f, zeros(N), method = :anderson, m = 10)
# Return the zero found (ignore the second returned value for now)
return sol.zero, nothing
end
imf = ImplicitFunction(forward, f)
return sum(imf())
end
g = Zygote.gradient(obj, p0)[1]
```
Notice that `p` was not an explicit argument to `f` or `forward` in the above example and that the implicit function is called using `imf()`. Using some explicit parameters and some implicit parameters is also supported.
## Matrix-free linear solver in the adjoint
In the adjoint definition of implicit functions, a linear system:
```julia
(df/dy) * x = v
```
is solved to find the adjoint vector. To solve the system using a matrix-free iterative solver (GMRES by default) that avoids constructing the Jacobian `df/dy`, you can set the `matrixfree` keyword argument to `true` (default is `false`). When set to `false`, the entrie Jacobian matrix is formed and the linear system is solved using LU factorization.
## Arbitrary data structures
Both `p` and `x` above can be arbitrary data structures, not just arrays of numbers.
## Tolerance
The implicit function theorem assumes that some conditions `f(p, x) == 0` is satisfied. In practice, this will only be approximately satisfied. When this condition is violated, the gradient reported by the implicit function theorem cannot be trusted since its assumption is violated. The maximum tolerance allowed to "accept" the solution `x(p)` and the gradient is given by the keyword argument `tol` (default value is `1e-5`). When the norm of the residual function `f(p, x)` is greater than this tolerance, `NaN`s are returned for the gradient instead of the value computed via the implicit function theorem. If additionally, the keyword argument `error_on_tol_violation` is set to `true` (default value is `false`), an error is thrown if the norm of the residual exceeds the specified tolerance `tol`.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 4121 | # Using other AD packages
`Nonconvex` uses `Zygote` and `ForwardDiff` by default. There are other AD packages in Julia with different tradeoffs. It is possible to use other AD packages to differentiate specific functions in `Nonconvex` using function modifiers.
[`AbstractDifferentiation.jl`](https://github.com/JuliaDiff/AbstractDifferentiation.jl) is a package that defines a unified API for multiple AD packages. Each AD package has a "backend type" in `AbstractDifferentiation`. You can use any `AbstractDifferentiation`-compatible AD package to differentiate specific functions in `Nonconvex`. The list of `AbstractDifferentiation`-compatible AD packages (other than `Zygote`) are:
- [`FiniteDifferences.jl`](https://github.com/JuliaDiff/FiniteDifferences.jl)
- [`ForwardDiff.jl`](https://github.com/JuliaDiff/ForwardDiff.jl)
- [`ReverseDiff.jl`](https://github.com/JuliaDiff/ReverseDiff.jl)
- [`Tracker.jl`](https://github.com/FluxML/Tracker.jl)
For more on how to construct a backend struct for each AD package, please refer to the README file of the [`AbstractDifferentiation`](https://github.com/JuliaDiff/AbstractDifferentiation.jl) repository.
## `abstractdiffy`ing a function
In order to use a specific `AbstractDifferentiation`-compatible AD package to differentiate a function `f(x...)` used in a `Nonconvex` objective/constraint, you can use the `abstractdiffy` function modifier from `Nonconvex`:
```julia
F = abstractdiffy(f, backend, x...)
F(x...)
```
where `backend` is an `AbstractDifferentiation` backend struct for the desired AD package, and `x` are all the input arguments to `f`.
The following are common `backend` choices:
- `AbstractDifferentiation.ForwardDiffBackend()` for `ForwardDiff`
- `AbstractDifferentiation.FiniteDifferencesBackend()` for `FiniteDifferences`
- `AbstractDifferentiation.ReverseDiffBackend()` for `ReverseDiff`
- `AbstractDifferentiation.TrackerBackend()` for `Tracker`
Note that in order to define such backend type, one must first load the `AbstractDifferentiation` package as well as the AD package to be used, e.g.:
```
using AbstractDifferentiation, ReverseDiff
backend = AbstractDifferentiation.ReverseDiffBackend()
```
Having defined `F` like this, whenever `ForwardDiff` or any `ChainRules`-compatible AD package such as `Zygote` is used to differentiate `F`, the AD package corresponding to the chosen `backend` will be used instead.
To use [`ForwardDiff`](https://github.com/JuliaDiff/ForwardDiff.jl) as the backend of choice, a shortcut is also available using the `forwarddiffy` function modifier instead of the more general `abstractdiffy`:
```julia
F = forwarddiffy(f, x...)
F(x...)
```
which is short for:
```julia
backend = AbstractDifferentiation.ForwardDiffBackend()
F = abstractdiffy(f, backend, x...)
```
## `abstractdiffy`ing a model
Instead of `abstractdiffy`ing or `forwarddiffy`ing one function at a time, the user can instead `abstractdiffy` or `forwarddiffy` an entire `Nonconvex` model including the objective, all the inequality constraint functions, all the equality constraint functions and all the semidefinite constraint functions.
```julia
ad_model = abstractdiffy(model, backend)
```
where `model` is of type `Model` or `DictModel`. `ad_model` can now be optimized using any of the `Nonconvex` algorithms compatible with the model. Similarly, `forwarddiffy` can be used on an entire model:
```julia
fd_model = forwarddiffy(model)
```
By default, the objective and all the constraint functions will be modified with `abstractdiffy`/`forwarddiffy`. To prevent the modification of some component of the model, any of the following keyword arguments can be set to `false` (default is `true`):
- `objective = false`
- `ineq_constraints = false`
- `eq_constraints = false`
- `sd_constraints = false`
Setting the `objective`, `ineq_constraints`, `eq_constraints`, and/or `sd_constraints` keyword arguments to `false` (default is `true`) will prevent the modification of the objective, all the inequality constraint functions, all the equality constraint functions, and/or all the semidefinite constraint functions respectively.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 7092 | # Sparse Jacobian or Hessian
## Background
For functions with a sparse Jacobian or Hessian, it can sometimes be useful to exploit such sparsity to speedup the computation of the Jacobian. This can be done using the [`SparseDiffTools.jl`](https://github.com/JuliaDiff/SparseDiffTools.jl) package.
`SparseDiffTools` can compute multiple columns of the Jacobian matrix of a vector-valued function `y = f(x)` simulatenously using a single Jacobian-vector product operation. Such columns corresponding to a subset of the input variables, e.g. `(x[1], x[3])`, however need not overlap in the output variables they influence. For instance, assume
- `y[1]` and `y[2]` are a function of `x[1]` and `x[2]` only, and
- `y[3]` and `y[4]` are a function of `x[3]` and `x[4]` only.
The Jacobian `dy/dx` will therefore have a block diagonal structure. Additionally, since `x[1]` and `x[3]` do not affect the same output variables, their corresponding columns in the block-diagonal Jacobian can be computed simulatenously using a single Jacobian-vector block. The same thing for `x[2]` and `x[4]`. Finding such subsets of input variables such that no 2 input variables in a subset affect the same output is done using `SparseDiffTools`. In the diagonal Jacobian case, all the input variables do not overlap so all the columns of the Jacobian can be obtained using a single Jacobian-vector product.
The problem of finding the optimal splitting of input variables to require the least number of Jacobian-vector products when computing the full Jacobian can be formulated as a [graph coloring](https://en.wikipedia.org/wiki/Graph_coloring) problem in computer science, which is an [NP-hard](https://en.wikipedia.org/wiki/NP-hardness) problem. `SparseDiffTools` uses a tractable heuristic to find reasonable splittings for different Jacobian or Hessian sparsity patterns. The sparsity pattern of the Jacobian or Hessian can either be user-provided or it will be automatically uncovered using [`Symbolics.jl`](https://github.com/JuliaSymbolics/Symbolics.jl).
In `Nonconvex`, you can enforce the use of `SparseDiffTools` for specific functions using the `sparsify` function modifier. In particular, the `rrule` and `frule` of the modified function will be using `SparseDiffTools` to find the full Jacobian first and then doing either a Jacobian-vector product in the `frule` or a vector-Jacobian product in the `rrule`. Such `frule` will also be used by `ForwardDiff` if used to differentiate the modified function. For more on `frule`s, `rrule`s, Jacobian-vector products and vector-Jacobian products, refer to the following video on [Understanding autmoatic differentiation (in Julia)](https://www.youtube.com/watch?v=UqymrMG-Qi4).
## Sparsifying a function
### First order derivatives
In order to force `Nonconvex` to use `SparseDiffTools` when differentiating a function `f(x...)` once, the `sparsify` function modifier can be used:
```julia
F = sparsify(f, x...; hessian = false)
F(x...)
```
where `x` is some sample input arguments to `f`. `F(x...)` can now be used inplace of `f(x...)` in objectives and/or constraints to be differentiated. Whenever `ForwardDiff` or any `ChainRules`-compatible AD package such as `Zygote` is used to differentiate `F` once, `SparseDiffTools` will now be used.
### Second order derivatives
When `hessian = false` (the default value), only the Jacobian/gradient of `F` will be treated as sparse. In order to use `SparseDiffTools` to compute sparse second order derivatives as well, you can set `hessian = true`. This is recommended for scalar-valued functions with sparse Hessian matrices. Setting `hessian = true` will also work for vector-valued functions `f` or for functions `f` that return multiple, non-vector outputs. The sparsity of the third order derivative tensor will be used to compute the third order tensor efficiently.
### User-defined sparsity patterns
Using `sparsify` as shown above will make use of `Symbolics` to uncover the sparsity of the Jacobian and Hessian matrices of `f`. In some cases, the function `f` may not be `Symbolics`-compatible or it may have a known sparsity pattern. The user can therefore use the `jac_pattern` or `hess_pattern` keyword arguments to set the pattern manually.
The `jac_pattern` is expected to be a `SparseMatrixCSC` of element type `Bool` with `true` where there is a structural non-zero, and `false` where there is a structural zero in the Jacobian matrix. The size of `jac_pattern` should be `noutputs x ninputs` where `noutputs` is the number of outputs of `f` and `ninputs` is the number of inputs to `f`. When the inputs and/or outputs are multiple and/or non-vector, they are assumed to be flattened to vectors and `noutputs`/`ninputs` is the length of the flat vector.
Passing the Hessian sparsity pattern is also possible using the `hess_pattern` keyword argument. For scalar-valued functions, `hess_pattern` should have size `ninputs x ninputs` where `ninputs` is the number of input variables in the flattened input arguments. For vector-valued functions, the sparsity pattern will be the sparsity pattern of the Jacobian of the linearized Jacobian of `f`. Assume `f` takes a single vector input `x` and returns a single output vector `y`. The sparsity pattern will be that of `d(vec(dy/dx))/dx`. `hess_pattern` should therefore have size `(noutputs * ninputs) x ninputs`. For example, assume `y` is a vector of length 2 and `x` is a vector of length 3. The Jacobian `dy/dx` will be a matrix of size `2 x 3`. `vec(dy/dx)` will be a vector of length 6. `d(vec(dy/dx))/dx` will be a matrix of size `6 x 3`. `hess_pattern` should therefore be a `SparseMatrixCSC` with element type `Bool` and size `6 x 3`.
For general functions `f` with multiple or non-vector inputs or outputs, `noutputs` and `ninputs` are the lengths of the flattened outputs and inputs respectively.
## Sparsifying a model
Instead of sparsifying one function at a time, the user can instead sparsify an entire `Nonconvex` model including the objective, all the inequality constraint functions, all the equality constraint functions and all the semidefinite constraint functions.
```julia
sp_model = sparsify(model, hessian = true)
```
where `model` is of type `Model` or `DictModel` and `hessian` has the same intepretation from the function sparisfication above. `sp_model` can now be optimized using any of the `Nonconvex` algorithms compatible with the model.
By default, the objective and all the constraint functions will be sparsified. To prevent the sparisfication of some component of the model, any of the following keyword arguments can be set to `false` (default is `true`):
- `objective = false`
- `ineq_constraints = false`
- `eq_constraints = false`
- `sd_constraints = false`
Setting the `objective`, `ineq_constraints`, `eq_constraints`, and/or `sd_constraints` keyword arguments to `false` (default is `true`) will prevent the sparisification of the objective, all the inequality constraint functions, all the equality constraint functions, and/or all the semidefinite constraint functions respectively.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 3671 | # Symbolic differentiation
## Background
For functions, a tractable symbolic gradient/Jacobian/Hessian may exist. [`Symbolics.jl`](https://github.com/JuliaSymbolics/Symbolics.jl) is a symbolic mathematics package in Julia that can uncover the mathematical expression from Julia functions and then symbolically differentiate the resulting expression. Symbolic simplifications and cancellations can sometimes lead to computational savings compared to algorithmic differentiation. Symbolic differentiation can further exploit the sparsity of the graident, Jacobian and/Hessian if one exists.
In `Nonconvex`, you can enforce the use of `Symbolics` to symbolically differentiate specific functions using the `symbolify` function modifier. In particular, the `Symbolics`-derived gradient/Jacobian/Hessian functions will be used whenever `ForwardDiff` or any `ChainRules`-compatible AD package such as `Zygote` is used to differentiate the modified function.
## Symbolifying a function
### First order derivatives
In order to force `Nonconvex` to use `Symbolics` when differentiating a function `f(x...)` once, the `symbolify` function modifier can be used:
```julia
F = symbolify(f, x...; hessian = false, sparse = false, simplify = false)
F(x...)
```
where `x` is some sample input arguments to `f`. `F(x...)` can now be used inplace of `f(x...)` in objectives and/or constraints to be differentiated. Whenever `ForwardDiff` or any `ChainRules`-compatible AD package such as `Zygote` is used to differentiate `F` once, the `Symbolics`-derived gradient/Jacobian will now be used.
The `sparse` keyword argument can be set to `true` (default is `false`) to tell `Symbolics` to return a sparse gradient/Jacobian for the function `F`. The `simplify` keyword argument can be set to `true` (default is `false`) to tell `Symbolics` to simplify the mathematical expressions for the gradient/Jacobian functions.
### Second order derivatives
When `hessian = false` (the default value), only the Jacobian/gradient of `F` will be computed with `Symbolics`. In order to use `Symbolics` to differentiate the function `F` twice, you can set `hessian = true`. Setting `hessian = true` will also work for vector-valued functions `f` or for functions `f` that return multiple, non-vector outputs. The `sparse` and `simplify` keyword arguments work the same way when `hessian` is set to `true`.
## Symbolifying a model
Instead of symbolifying one function at a time, the user can instead symbolify an entire `Nonconvex` model including the objective, all the inequality constraint functions, all the equality constraint functions and all the semidefinite constraint functions.
```julia
sym_model = symbolify(model, hessian = true, simplify = true, sparse = true)
```
where `model` is of type `Model` or `DictModel` and `hessian`, `simplify` and `sparse` have the same intepretation from the function symbolification above. `sym_model` can now be optimized using any of the `Nonconvex` algorithms compatible with the model.
By default, the objective and all the constraint functions will be symbolified. To prevent the symbolification of some component of the model, any of the following keyword arguments can be set to `false` (default is `true`):
- `objective = false`
- `ineq_constraints = false`
- `eq_constraints = false`
- `sd_constraints = false`
Setting the `objective`, `ineq_constraints`, `eq_constraints`, and/or `sd_constraints` keyword arguments to `false` (default is `true`) will prevent the symbolification of the objective, all the inequality constraint functions, all the equality constraint functions, and/or all the semidefinite constraint functions respectively.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 1904 | # User-defined gradient, Jacobian or Hessian
## Gradients and Jacobians
To use a user-defined gradient/Jacobian function `g(x)` for a function `f(x)`, you can use the `CustomGradFunction` modifier:
```julia
F = CustomGradFunction(f, g)
F(x)
```
`F` can be then used in place of `f` as an objective function, as a constraint function or as part of any such function. When `f` is scalar-valued, `g` is expected to return a gradient vector. When `f` is vector-valued, `g` is expected to return a Jacobian matrix. Whenever `ForwardDiff` or any `ChainRules`-compatible AD package such as `Zygote` is used to differentiate `F`, the custom gradient/Jacobian function `g` will be used.
## Hessian or Hessian vector product
For second-order optimization algorithms, a user-defined Hessian function `h(x)` can be used for any scalar-valued function `f(x)` with gradient `g(x)`. To use a user-defined Hessian function `h(x)`, you can use the `CustomHessianFunction` modifier:
```julia
F = CustomHessianFunction(f, g, h)
F(x)
```
`F` can be then used in place of `f` as an objective function, as a constraint function or as part of any such function. `f` is expected to return a scalar, `g` is expected to return a gradient vector and `h` is expected to return a symmetric Hessian matrix. Whenever `ForwardDiff` or any `ChainRules`-compatible AD package such as `Zygote` is used to differentiate `F`, the custom gradient and Hessian functions will be used.
Instead of a Hessian function `h`, alternatively a Hessian-vector product operator `h(x, v)` can be used, which multiplies the Hessian of `f` at `x` by the vector `v`. To use a Hessian-vector product operator `hvp(x, v)` instead of computing the full Hessian, you can pass the `hvp` function as the third argument to `CustomHessianFunction` and set the `hvp` keyword argument to `true`:
```julia
F = CustomHessianFunction(f, g, hvp; hvp = true)
F(x)
```
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 3660 | # `DictModel` definition
There are 2 ways to define a `DictModel`. The direct method is:
```julia
model = DictModel()
```
To pass an objective function while constructing the model, use:
```julia
model = DictModel(obj)
```
where `obj` is a function that takes a single `OrderedDict` argument.
## JuMP model to Nonconvex DictModel
JuMP.jl has an excellent API for defining variables and linear constraints. Using JuMP makes it straightforward to copy a set of linear constraints and variable definitions from a paper. In Nonconvex, you can start with a JuMP model, define variables and constraints using JuMP's API then convert it to a `DictModel`. For example:
```julia
jump_model = JuMP.Model()
@variable jump_model 0 <= x[i=1:3] <= 1
@constraint jump_model sum(x) <= 1
model = DictModel(jump_model)
```
The objective can also be defined either using JuMP or Nonconvex. Once you convert the JuMP model to a Nonconvex model, you can go ahead and define more variables and constraints and/or set the objective in Nonconvex.
## Variable definition
### Add a single variable
Each variable in a `DictModel` has a name which can be a symbol or string. Each named variable can have an arbitrary type, e.g:
1. Vectors or arrays in general
2. Dictionaries
3. Structs
4. `Tuple`s
5. `NamedTuple`s
6. Nested data structures
Vectorization and de-vectorization are handled automatically by Nonconvex.
To add a new named variable to a `DictModel` with a name `:a` and lower and upper bounds `lb` and `ub` respectively, use:
```julia
addvar!(model, :a, lb, ub)
```
Similar to `Model`, optional keyword arguments `init` and `integer` can be set, and the types of the initial value, `lb` and `ub` must be the same.
### Add multiple variables
There is no way to add multiple variables simultaneously to a `DictModel` however a single named variable that's a vector can be added.
## Objective definition
To specify an objective function after creating the model, use:
```julia
set_objective!(model, obj)
```
where `obj` is a function that takes a single `OrderedDict` argument. The `OrderedDict` input to `obj` will be of the same structure, shape and types as the `OrderedDict` initial solution, lower bounds and upper bounds.
## Inequality constraint definition
To define an inequality constraint `f(x) <= 0`, where `f` is a Julia function that accepts a single `OrderedDict` input, use:
```julia
add_ineq_constraint!(model, f)
```
The `OrderedDict` input to `f` will be of the same structure, shape and types as the `OrderedDict` initial solution, lower bounds and upper bounds. The function `f` can return:
1. A number, in which case the constraint will be `f(x) <= 0`
2. A vector or array of numbers, in which case the constraint will be applied element-wise `f(x) .<= 0`.
3. An arbitrary container or data structure, in which case the output will be vectorized first and the constraint will be applied element-wise on the vectorized output.
## Equality constraint definition
To define an inequality constraint `f(x) == 0`, where `f` is a Julia function that accepts a single `OrderedDict` input, use:
```julia
add_eq_constraint!(model, f)
```
The `OrderedDict` input to `f` will be of the same structure, shape and types as the `OrderedDict` initial solution, lower bounds and upper bounds. The function `f` can return:
1. A number, in which case the constraint will be `f(x) == 0`
2. A vector or array of numbers, in which case the constraint will be applied element-wise `f(x) .== 0`.
3. An arbitrary container or data structure, in which case the output will be vectorized first and the constraint will be applied element-wise on the vectorized output.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 4968 | # `Model` definition
To define an empty model, run:
```julia
model = Model()
```
To specify an objective function `obj` when creating the model, run:
```julia
model = Model(obj)
```
where `obj` is a function that takes a single vector argument.
## Variable definition
### Add a single variable
To add a new variable to a `Model` with lower and upper bounds `lb` and `ub` respectively, use:
```julia
addvar!(model, lb, ub)
```
The variables added will be stacked on top of each other with a linear integer index. The lower and upper bounds for each variable don't have to be numbers, they can be:
1. Dictionaries
2. Structs
3. `Tuple`s
4. `NamedTuple`s
5. Nested data structures
However, the values input cannot be vectors. A vector input has a different interpretation. See the next section.
The types of the lower and upper bounds of each variable must be the same and this type will be assumed to be the type of the decision variable. Different variables can have different types though. Vectorization and de-vectorization are handled automatically by Nonconvex.
To specify an initial value, use the `init` keyword argument. To add an integer constraint on the variable, use the `integer` keyword argument. For example:
```julia
addvar!(model, 0.0, 10.0, init = 1.0, integer = true)
```
`init` must have the same type as the lower and upper bounds.
### Add multiple variables
To add multiple variables simultaneously, pass in a vector of values for the bounds and optionally for the `init` and `integer` keyword arguments.
```julia
addvar!(model, [0.0, 0.0], [10.0, 10.0], init = [1.0, 1.0], integer = [true, false])
```
The elements of the vector can be:
1. Vectors or arrays in general
2. Dictionaries
3. Structs
4. `Tuple`s
5. `NamedTuple`s
6. Nested data structures
Note that the use of vectors as elements is allowed. Similarly, the types of the lower and upper bounds and the initial values must be the same.
## Objective definition
To specify an objective function after creating the model, use:
```julia
set_objective!(model, obj)
```
where `obj` is a function that takes a single vector argument. The vector input to `obj` will be of the same structure, shape and types as the initial solution, lower bound and upper bound vector.
## Inequality constraint definition
To define an inequality constraint `f(x) <= 0`, where `f` is a Julia function that accepts a single input vector, use:
```julia
add_ineq_constraint!(model, f)
```
The vector input to `f` will be of the same structure, shape and types as the initial solution, lower bound and upper bound vector. The function `f` can return:
1. A number, in which case the constraint will be `f(x) <= 0`
2. A vector or array of numbers, in which case the constraint will be applied element-wise `f(x) .<= 0`.
3. An arbitrary container or data structure, in which case the output will be vectorized first and the constraint will be applied element-wise on the vectorized output.
## Equality constraint definition
To define an inequality constraint `f(x) == 0`, where `f` is a Julia function that accepts a single input vector, use:
```julia
add_eq_constraint!(model, f)
```
The vector input to `f` will be of the same structure, shape and types as the initial solution, lower bound and upper bound vector. The function `f` can return:
1. A number, in which case the constraint will be `f(x) == 0`
2. A vector or array of numbers, in which case the constraint will be applied element-wise `f(x) .== 0`.
3. An arbitrary container or data structure, in which case the output will be vectorized first and the constraint will be applied element-wise on the vectorized output.
## Changing variable bounds
After defining the variables, it is possible to set the minimum and maximum variable bounds to different variables. This is useful for example in iterative procedures where the bounds are updated and the problem is resolved.
To update the entire vector of minimum bounds, you can use:
```julia
setmin!(model, newmin)
```
where `newmin` is a vector of bounds. `-Inf` is allowed in the bounds.
To set a new minimum bound for the `i`th variable only, you can use:
```julia
setmin!(model, i, newmin)
```
instead where `newmin` is a minimum bound of the appropriate type depending on the type of the `i`th variable.
Similarly, to update the entire vector of maximum bounds, you can use:
```julia
setmax!(model, newmax)
```
where `newmax` is a vector of bounds. `Inf` is allowed in the bounds.
To set a new maximum bound for the `i`th variable only, you can use:
```julia
setmax!(model, i, newmax)
```
instead where `newmax` is a maximum bound of the appropriate type depending on the type of the `i`th variable.
## Changing integrality constraints
To constrain a variable to be integer or relax the integrality constraint on the `i`th variable, you can use:
```julia
setinteger!(model, i, integer)
```
where `integer` is `true` to constrain the variable or `false` to relax the constraint.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 660 | # Problem definition
There are 3 ways to define a model in `Nonconvex.jl`:
1. `Model` which assumes all the variables are indexed by an integer index starting from 1. The decision variables are therefore a vector.
2. `DictModel` which assumes each variable has a name. The decision variables are stored in an `OrderedDict`, an ordered dictionary data structure.
3. Start from `JuMP.Model` and convert it to `DictModel`. This is convenient to make use of `JuMP`'s user-friendly macros for variable and linear expression, objective or constraint definitions.
## Table of contents
```@contents
Pages = ["model.md", "dict_model.md", "queries.md"]
Depth = 3
```
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"MIT"
] | 2.1.3 | c1421f259b2dd66ed19d9cede5edcea71636b7e6 | docs | 2510 | # Model queries
There are a number of information you can query about the model after constructing it. These can be useful to check that the model was defined correctly or in the post-processing step after the model has been optimized.
## Number of decision variables
To query the number of decision variables in a model, use:
```julia
NonconvexCore.getnvars(model)
```
## Number of constraints
To query the number of inequality constraints in a model, you can use:
```julia
NonconvexCore.getnineqconstraints(model)
```
A vector-valued constraint will be counted only once.
To query the number of equality constraints, you can use:
```julia
NonconvexCore.getneqconstraints(model)
```
To query the number of semidefinite constraints, you can use:
```julia
NonconvexCore.getnsdconstraints(model)
```
To query the total number of constraints in a model, you can use:
```julia
NonconvexCore.getnconstraints(model)
```
This is the sum of all the previous queries.
## Problem dimension
To get a quick overview of the number of constraints and variables in the model, you can use:
```julia
NonconvexCore.getdim(model)
```
which is short for:
```julia
(NonconvexCore.getnconstraints(model), NonconvexCore.getnvars(model))
```
## Objective and constraint functions
You can get the objective and constraint functions as regular Julia functions which can be evaluated. To get the objective function, you can use:
```julia
obj = NonconvexCore.getobjective(model)
```
To get a function for all the inequality constraints, you can use:
```julia
ineq = NonconvexCore.getineqconstraints(model)
```
To get a function for all the equality constraints, you can use:
```julia
ineq = NonconvexCore.geteqconstraints(model)
```
To get a function for all the semideifnite constraint functions, you can use:
```julia
ineq = NonconvexCore.getsdconstraints(model)
```
## Initial solution
You can the initial solution using:
```julia
x0 = NonconvexCore.getinit(model)
```
## Variables bounds
You can query the maximum variable bounds for all the variables using:
```julia
NonconvexCore.getmax(model)
```
Similarly, you can query the minimum variable bounds for all the variables using:
```julia
NonconvexCore.getmin(model)
```
## Integrality constraints
To get the vector indiciting which variables are integer, you can use:
```julia
model.integer
```
which will be a `BitVector` (similar to a vector of `Bool`) with `true` corresponding to an integer constraint and `false` corresponding to a continuous variable.
| Nonconvex | https://github.com/JuliaNonconvex/Nonconvex.jl.git |
|
[
"Apache-2.0"
] | 0.2.1 | ebbe3767049b5276a2158a33a417b2fb73a66392 | code | 7330 | module Scrypt
using Nettle
using SIMD
include("data/Salsa512.jl")
include("data/ScryptParameters.jl")
include("util.jl")
function scrypt(parameters::ScryptParameters, key::Vector{UInt8}, salt::Vector{UInt8}, derivedkeylength)
derivedkeylength > 0 || ArgumentError("Must be > 0.") |> throw
buffer = pbkdf2_sha256_1(key, salt, bufferlength(parameters))
parallelbuffer = reshape(reinterpret(Salsa512, buffer), (elementblockcount(parameters), parameters.p))
for i ∈ 1:parameters.p
element = @views reshape(parallelbuffer[:, i], elementblockcount(parameters))
smix!(element, parameters)
end
derivedkey = pbkdf2_sha256_1(key, buffer, derivedkeylength)
end
const HASH_LENGTH = 256 ÷ 8
function pbkdf2_sha256_1(key, salt::Vector{UInt8}, derivedkeylength)
blockcount = cld(derivedkeylength, HASH_LENGTH)
lastblockbytes = derivedkeylength - (blockcount - 1) * HASH_LENGTH
salt = [salt; zeros(UInt8, 4)]
salttail = view(salt, length(salt) - 3:length(salt))
derivedkey = Matrix{UInt8}(undef, HASH_LENGTH, blockcount)
for i ∈ 1:blockcount
salttail[:] = reinterpret(UInt8, [UInt32(i)]) |> reverse
derivedkey[:, i] = digest("sha256", key, salt)
end
derivedkey = reshape(derivedkey, blockcount * HASH_LENGTH)[1:derivedkeylength]
return derivedkey
end
function smix!(element::AbstractVector{Salsa512}, parameters::ScryptParameters)
workingbuffer = prepare(element)
shufflebuffer = valloc(Salsa512, length(workingbuffer))
scryptblock, workingbuffer, shufflebuffer = fillscryptblock!(workingbuffer, shufflebuffer, parameters.r, parameters.N)
workingbuffer = mixwithscryptblock!(workingbuffer, scryptblock, shufflebuffer, parameters.r, parameters.N)
restore!(element, workingbuffer)
end
const SALSA_BLOCK_REORDER_INDEXES = [13; 2; 7; 12; 1; 6; 11; 16; 5; 10; 15; 4; 9; 14; 3; 8]
function prepare(src::AbstractVector{Salsa512})
dest = valloc(Salsa512, length(src))
si = 1:length(src)
dj = [2:length(dest); 1]
for (i, j) ∈ zip(si, dj)
dest[j] = src[i]
permute!(uint32view(dest, j), SALSA_BLOCK_REORDER_INDEXES)
end
return dest
end #permute! is no faster than explicit vectorization, even with a few extra allocations
function restore!(dest::AbstractVector{Salsa512}, src::AbstractVector{Salsa512})
si = 1:length(src)
dj = [length(dest); 1:length(dest) - 1]
for (i, j) ∈ zip(si, dj)
dest[j] = src[i]
invpermute!(uint32view(dest, j), SALSA_BLOCK_REORDER_INDEXES)
end
end
function fillscryptblock!(workingbuffer::AbstractVector{Salsa512}, shufflebuffer::AbstractVector{Salsa512}, r, N)
scryptblock = reshape(valloc(Salsa512, 2r * N), (2r, N))
for i ∈ 1:N
scryptelement = view(scryptblock, :, i)
previousblock = lastblock = load_store!(workingbuffer, scryptelement, 1)
for j ∈ 2:2r
block = load_store!(workingbuffer, scryptelement, j)
block = mixblock_shuffle_store!(block, previousblock, shufflebuffer, shuffleposition(j, r))
previousblock = block
end
mixblock_shuffle_store!(lastblock, previousblock, shufflebuffer, 1)
workingbuffer, shufflebuffer = shufflebuffer, workingbuffer
end
return scryptblock, workingbuffer, shufflebuffer
end
shuffleposition(j, halfblockcount) = (j - 2) ÷ 2 + 2 + (iseven(j) ? 0 : halfblockcount)
uint32view(x, i) = @inbounds reinterpret(UInt32, view(x, i:i))
vloadsalsa(x, i) = @inbounds vloada(Vec{16, UInt32}, uint32view(x, i), 1)
vloadsalsant(x, i) = @inbounds vloadnt(Vec{16, UInt32}, uint32view(x, i), 1)
vstoresalsa(v, x, i) = @inbounds vstorea(v, uint32view(x, i), 1)
vstoresalsant(v, x, i) = @inbounds vstorent(v, uint32view(x, i), 1)
function load_store!(workingbuffer::AbstractVector{Salsa512}, scryptelement::AbstractVector{Salsa512}, i)
block = vloadsalsa(workingbuffer, i)
vstoresalsant(block, scryptelement, i)
return block
end
function mixwithscryptblock!(workingbuffer::AbstractVector{Salsa512}, scryptblock, shufflebuffer::AbstractVector{Salsa512}, r, N)
for i ∈ 1:N
n = integerify(workingbuffer, N)
scryptelement = reshape(view(scryptblock, :, n), 2r)
for j ∈ 1:r # prefetch first half of the element
vprefetchnt(scryptelement, j)
end
previousblock = lastblock = load_xor(workingbuffer, scryptelement, 1)
for j ∈ 2:2r
if j ≤ (r + 1) # prefetch one additional block through end
vprefetchnt(scryptelement, r + j - 1)
end
block = load_xor(workingbuffer, scryptelement, j)
block = mixblock_shuffle_store!(block, previousblock, shufflebuffer, shuffleposition(j, r))
previousblock = block
end
mixblock_shuffle_store!(lastblock, previousblock, shufflebuffer, 1)
workingbuffer, shufflebuffer = shufflebuffer, workingbuffer
end
return workingbuffer
end
integerify(x::AbstractVector{Salsa512}, N) = uint32view(x, 1)[5] % N + 1
function load_xor(workingbuffer::AbstractVector{Salsa512}, scryptelement::AbstractVector{Salsa512}, i)
block = vloadsalsa(workingbuffer, i)
block ⊻= vloadsalsant(scryptelement, i)
return block
end
function mixblock_shuffle_store!(block, previousblock, shufflebuffer, i)
block ⊻= previousblock
block = salsa20(block, 8)
vstoresalsa(block, shufflebuffer, i)
return block
end
function salsa20(block, iterations)
inputblock = block
lines = splitblock(block)
for i ∈ 1:iterations
lines = salsamix(lines)
lines = salsatranspose(lines)
end
block = joinlines(lines)
block += inputblock
return block
end
splitblock(block) = (shufflevector(block, Val((0,1,2,3))),
shufflevector(block, Val((4,5,6,7))),
shufflevector(block, Val((8,9,10,11))),
shufflevector(block, Val((12,13,14,15))))
joinlines(lines) = @inbounds shufflevector(shufflevector(lines[1], lines[2], Val((0, 1, 2, 3, 4, 5, 6, 7))),
shufflevector(lines[3], lines[4], Val((0, 1, 2, 3, 4, 5, 6, 7))),
Val((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)))
function salsamix(lines)
line1, line2, line3, line4 = lines
line3 = salsa(line1, line2, line3, 7)
line4 = salsa(line2, line3, line4, 9)
line1 = salsa(line3, line4, line1, 13)
line2 = salsa(line4, line1, line2, 18)
return (line1, line2, line3, line4)
end
function salsa(addend1, addend2, xor_operand, rotationmagnitude)
sum = addend1 + addend2
rot = (sum << rotationmagnitude) | (sum >>> (sizeof(UInt32) * 8 - rotationmagnitude))
return xor_operand ⊻ rot
end
function salsatranspose(lines)
toline3 = @inbounds shufflevector(lines[1], Val((1, 2, 3, 0)))
line1 = @inbounds shufflevector(lines[3], Val((3, 0, 1, 2)))
line3 = toline3
line4 = @inbounds shufflevector(lines[4], Val((2, 3, 0, 1)))
return @inbounds (line1, lines[2], line3, line4)
end
export scrypt
export ScryptParameters
end | Scrypt | https://github.com/BioTurboNick/Scrypt.jl.git |
|
[
"Apache-2.0"
] | 0.2.1 | ebbe3767049b5276a2158a33a417b2fb73a66392 | code | 2453 | """
ReadOrWrite: 0 for read, 1 for write.
Function follows the LLVM definition
https://llvm.org/docs/LangRef.html#llvm-prefetch-intrinsic
such that
prefetch(ptr, Val(3), Val(0))
corresponds to prefetch0 on x86 (extreme locality)
Adapted from SIMDPirates.jl
"""
@generated function prefetch(ptr::Ptr{T}, ::Val{Locality}, ::Val{ReadOrWrite}) where {T, Locality, ReadOrWrite}
if VERSION < v"1.6.0-DEV.674"
prefetch_call_string = """%addr = inttoptr i$(8sizeof(Int)) %0 to i8*
call void @llvm.prefetch(i8* %addr, i32 $ReadOrWrite, i32 $Locality, i32 1)
ret void"""
quote
$(Expr(:meta, :inline))
Base.llvmcall(
("declare void @llvm.prefetch(i8*, i32, i32, i32)",
$prefetch_call_string), Cvoid, Tuple{Ptr{$T}}, ptr
)
end
else
mod = """
declare void @llvm.prefetch(i8*, i32, i32, i32)
define void @entry(i$(8sizeof(Int))) #0 {
top:
%addr = inttoptr i$(8sizeof(Int)) %0 to i8*
call void @llvm.prefetch(i8* %addr, i32 $ReadOrWrite, i32 $Locality, i32 1)
ret void
}
attributes #0 = { alwaysinline }
"""
quote
$(Expr(:meta, :inline))
Base.llvmcall(
($mod, "entry"), Cvoid, Tuple{Ptr{$T}}, ptr
)
end
end
end
function vprefetch(a::SIMD.FastContiguousArray{T,1}, i::Integer) where {T}
GC.@preserve a begin
ptr = @inbounds pointer(a, i)
prefetch(ptr, Val{3}(), Val{0}())
end
end
function vprefetchnt(a::SIMD.FastContiguousArray{T,1}, i::Integer) where {T}
GC.@preserve a begin
ptr = @inbounds pointer(a, i)
prefetch(ptr, Val{0}(), Val{0}())
end
end
if VERSION < v"1.6.0-DEV"
import Base.stride
import Base.strides
function stride(a::Base.ReinterpretArray, i::Int)
a.parent isa StridedArray || ArgumentError("Parent must be strided.") |> throw
if i > ndims(a)
return length(a)
end
s = 1
for n = 1:(i-1)
s *= size(a, n)
end
return s
end
function strides(a::Base.ReinterpretArray)
a.parent isa StridedArray || ArgumentError("Parent must be strided.") |> throw
Base.size_to_strides(1, size(a)...)
end
end
| Scrypt | https://github.com/BioTurboNick/Scrypt.jl.git |
|
[
"Apache-2.0"
] | 0.2.1 | ebbe3767049b5276a2158a33a417b2fb73a66392 | code | 251 | primitive type Salsa512 512 end
Salsa512(x::Vector{UInt128}) = reinterpret(Salsa512, x) |> first
import SIMD.valloc
valloc(t::Type, n) = valloc(t, sizeof(t), n)
import Base.zero
zero(::Type{Salsa512}) = zeros(UInt128, 4) |> Salsa512
| Scrypt | https://github.com/BioTurboNick/Scrypt.jl.git |
|
[
"Apache-2.0"
] | 0.2.1 | ebbe3767049b5276a2158a33a417b2fb73a66392 | code | 1346 | const MAX_UINT = 2^32 - 1
struct ScryptParameters
r::Int # element length multiplier
N::Int # processing cost
p::Int # parallelization
function ScryptParameters(r, N, p)
r > 0 || ArgumentError("r Must be > 0.") |> throw
N > 0 || ArgumentError("N Must be > 0.") |> throw
p > 0 || ArgumentError("p Must be > 0.") |> throw
parameters = new(UInt(r), UInt(N), UInt(p))
p ≤ MAX_UINT * hashlength(parameters) / elementlength(parameters) ||
ArgumentError("p and r must satisfy the relationship p ≤ (2^32 - 1) * hashlength / elementlength)") |> throw
r * N * elementunitlength(parameters) ≤ Sys.total_memory() ||
ArgumentError("r and N must satisfy the relationship r * N * elementunitlength ≤ Sys.total_memory") |> throw
parameters
end
end
bytes(x) = x ÷ 8
hashbitslength(::ScryptParameters) = 256
hashlength(x::ScryptParameters) = hashbitslength(x) |> bytes
elementunitbitslength(::ScryptParameters) = 2 * 8 * sizeof(Salsa512)
elementunitlength(x::ScryptParameters) = elementunitbitslength(x) |> bytes
elementlength(x::ScryptParameters) = elementunitlength(x) * x.r
bufferlength(x::ScryptParameters) = elementlength(x) * x.p
elementblockcount(x::ScryptParameters) = elementlength(x) ÷ sizeof(Salsa512)
| Scrypt | https://github.com/BioTurboNick/Scrypt.jl.git |
|
[
"Apache-2.0"
] | 0.2.1 | ebbe3767049b5276a2158a33a417b2fb73a66392 | code | 1468 | using Scrypt
using Test
@testset "Scrypt Tests" begin
expected = [hex2bytes("77d6576238657b203b19ca42c18a0497f16b4844e3074ae8dfdffa3fede21442fcd0069ded0948f8326a753a0fc81f17e8d3e0fb2e0d3628cf35e20c38d18906"),
hex2bytes("b034a96734ebdc650fca132f40ffde0823c2f780d675eb81c85ec337d3b1176017061beeb3ba18df59802b95a325f5f850b6fd9efb1a6314f835057c90702b19"),
hex2bytes("fdbabe1c9d3472007856e7190d01e9fe7c6ad7cbc8237830e77376634b3731622eaf30d92e22a3886ff109279d9830dac727afb94a83ee6d8360cbdfa2cc0640"),
hex2bytes("7023bdcb3afd7348461c06cd81fd38ebfda8fbba904f8e3ea9b543f6545da1f2d5432955613f0fcf62d49705242a9af9e61e85dc0d651e40dfcf017b45575887"),
hex2bytes("2101cb9b6a511aaeaddbbe09cf70f881ec568d574a2ffd4dabe5ee9820adaa478e56fd8f4ba5d09ffa1c6d927c40f4c337304049e8a952fbcbf45c6fa77a41a4")]
output = [scrypt(ScryptParameters(1, 16, 1), Vector{UInt8}(b""), Vector{UInt8}(b""), 64),
scrypt(ScryptParameters(2, 32, 2), Vector{UInt8}(b"password"), Vector{UInt8}(b"NaCl"), 64),
scrypt(ScryptParameters(8, 1024, 16), Vector{UInt8}(b"password"), Vector{UInt8}(b"NaCl"), 64),
scrypt(ScryptParameters(8, 16384, 1), Vector{UInt8}(b"pleaseletmein"), Vector{UInt8}(b"SodiumChloride"), 64),
scrypt(ScryptParameters(8, 1048576, 1), Vector{UInt8}(b"pleaseletmein"), Vector{UInt8}(b"SodiumChloride"), 64)]
@test all(expected .== output)
end
| Scrypt | https://github.com/BioTurboNick/Scrypt.jl.git |
|
[
"Apache-2.0"
] | 0.2.1 | ebbe3767049b5276a2158a33a417b2fb73a66392 | docs | 3254 | # Scrypt.jl
[](https://travis-ci.com/github/BioTurboNick/Scrypt.jl)
[](https://codecov.io/github/BioTurboNick/Scrypt.jl?branch=master)
Port of my [Skryptonite](https://github.com/BioTurboNick/Skryptonite) C++/C# implementation of the Scrypt password-bassed key derivation algorithm / hash function, in pure Julia.
I make no guarantees other than that it passes the test vectors from the original paper. Contributions welcome.
Skryptonite code is more fully documented, if you wish to understand the logic. But in brief, the data is rearranged for optimal internal operations by placing the last block first and organizing the internal matrix blocks so that the diagonals are moved into columns.
One thing this lacks right now is parallelization for the p parameter.
Example:
```
r = 8
N = 16384
p = 1
key = Vector{UInt8}(b"pleaseletmein")
salt = Vector{UInt8}(b"SodiumChloride")
derivedkeylength = 64 # bytes
scrypt(ScryptParameters(r, N, p), key, salt, derivedkeylength)
```
Optimization notes:
- Initial: 7.511 s (93602212 allocations: 8.63 GiB) (commit 79ccff573b132d9079f908b02a717b58fa71a710)
- Moved constant array selectors into global constants: 7.206 s (81019300 allocations: 7.32 GiB) (commit 9195adc4a87f06068ba6b3e7da23188cf9c22c67)
- Just prior to the critical inner loop, copied the data to an MMatrix from StaticArrays: 1.455 s (81019300 allocations: 3.29 GiB) (commit 4b716febb788ff2b1493eb03e63e9034565b48e8)
- Refactored and simplified: 1.642 s (81281446 allocations: 3.27 GiB) (commit 98cdaee685836c636f1abdf6745d6260de219a79)
- Changed salsamix!() function to loop over rows instead of over columns, paradoxically: 1.130 s (17234346 allocations: 1.48 GiB) (commit 94e620944ca398af78eac778ea55580d81972343)
- Fully implemented SIMD for Salsa20/8 instead of StaticArrays: 312.388 ms (4651434 allocations: 471.02 MiB) (commit c08f960f82f043e0443b73307542ba30ecd97d0b)
- Cut down a few allocations by using `@code_warntype` to tighten up function types, but minimal improvment overall.
- Further vectorized, removed some abstraction. Weirdly, vectorization of the prepare/restore functions made it marginally slower, although no difference in allocations, did not keep: 261.690 ms (1311110 allocations: 196.05 MiB)
- Implemented memory-aligned and nontemporal load/store methods for fill/mix functions: 150.639 ms (524678 allocations: 88.07 MiB) (commit 857cd7a92a797bd67ca22d684e051432d6f7e48d)
- Got rid of an internal array I had introduced in the inner loop accidentally: 85.645 ms (390 allocations: 16.07 MiB) (commit 6a48816057494a1770c9406723440216da68df97)
- Implemented nontemporal store instructions, increased time a bit, but more secure: 90.233 ms (390 allocations: 16.07 MiB)
- Added @inbounds to load/store methods: 79.289 ms (390 allocations: 16.07 MiB)
16 MiB is about the lower limit of allocation amount for the parameters I was using.
End result: Only ~2 times slower than my original C++/C# package, after starting ~525 times slower. A bit more optimization to try to squeeze out.
| Scrypt | https://github.com/BioTurboNick/Scrypt.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | code | 715 | using DifferentiableExpectations
using Documenter
using DocumenterCitations
bib = CitationBibliography(joinpath(@__DIR__, "src", "DiffExp.bib"); style=:authoryear)
cp(joinpath(@__DIR__, "..", "README.md"), joinpath(@__DIR__, "src", "index.md"); force=true)
makedocs(;
modules=[DifferentiableExpectations],
authors="Members of JuliaDecisionFocusedLearning",
sitename="DifferentiableExpectations.jl",
format=Documenter.HTML(),
pages=[
"Home" => "index.md", #
"API reference" => "api.md",
"Background" => "background.md",
],
plugins=[bib],
)
deploydocs(;
repo="github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl",
devbranch="main",
)
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | code | 1046 | """
DifferentiableExpectations
A Julia package for differentiating through expectations with Monte-Carlo estimates.
# Exports
$(EXPORTS)
"""
module DifferentiableExpectations
using ChainRulesCore:
ChainRulesCore,
NoTangent,
ProjectTo,
RuleConfig,
Tangent,
@not_implemented,
rrule,
rrule_via_ad,
unthunk
using Compat: @compat
using DensityInterface: logdensityof
using Distributions: Distribution, MvNormal, Normal
using DocStringExtensions
using LinearAlgebra: Diagonal, cholesky, dot
using OhMyThreads: tmap, treduce, tmapreduce
using Random: Random, AbstractRNG, default_rng, seed!
using Statistics: Statistics, cov, mean, std
using StatsBase: StatsBase
include("utils.jl")
include("distribution.jl")
include("abstract.jl")
include("reinforce.jl")
include("reparametrization.jl")
export DifferentiableExpectation
export Reinforce
export Reparametrization
export FixedAtomsProbabilityDistribution
export empirical_distribution
@compat public atoms, weights
end # module DifferentiableExpectations
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | code | 2611 | """
DifferentiableExpectation{t}
Abstract supertype for differentiable parametric expectations `E : θ -> 𝔼[f(X)]` where `X ∼ p(θ)`, whose value and derivative are approximated with Monte-Carlo averages.
# Subtypes
- [`Reinforce`](@ref)
- [`Reparametrization`](@ref)
# Calling behavior
(E::DifferentiableExpectation)(θ...; kwargs...)
Return a Monte-Carlo average `(1/S) ∑f(xᵢ)` where the `xᵢ ∼ p(θ)` are iid samples.
# Type parameters
- `threaded::Bool`: specifies whether the sampling should be performed in parallel
# Required fields
- `f`: The function applied inside the expectation.
- `dist_constructor`: The constructor of the probability distribution.
- `rng::AbstractRNG`: The random number generator.
- `nb_samples::Integer`: The number of Monte-Carlo samples.
- `seed`::Union{Nothing,Integer}: The seed for the random number generator, reset before each call. Set to `nothing` for no seeding.
The field `dist_constructor` must be a callable such that `dist_constructor(θ...)` generates an object `dist` that corresponds to `p(θ)`.
The resulting object `dist` needs to satisfy:
- the [Random API](https://docs.julialang.org/en/v1/stdlib/Random/#Hooking-into-the-Random-API) for sampling with `rand(rng, dist)`
- the [DensityInterface.jl API](https://github.com/JuliaMath/DensityInterface.jl) for loglikelihoods with `logdensityof(dist, x)`
"""
abstract type DifferentiableExpectation{t} end
is_threaded(::DifferentiableExpectation{t}) where {t} = Val(t)
"""
empirical_predistribution(E::DifferentiableExpectation, θ...)
Return a uniform [`FixedAtomsProbabilityDistribution`](@ref) over `{x₁, ..., xₛ}`, where the `xᵢ ∼ p(θ)` are iid samples.
"""
function empirical_predistribution(E::DifferentiableExpectation, θ...)
(; dist_constructor, rng, nb_samples, seed) = E
dist = dist_constructor(θ...)
isnothing(seed) || seed!(rng, seed)
xs = maybe_eachcol(rand(rng, dist, nb_samples))
xdist = FixedAtomsProbabilityDistribution(xs; threaded=unval(is_threaded(E)))
return xdist
end
"""
empirical_distribution(E::DifferentiableExpectation, θ...; kwargs...)
Return a uniform [`FixedAtomsProbabilityDistribution`](@ref) over `{f(x₁), ..., f(xₛ)}`, where the `xᵢ ∼ p(θ)` are iid samples.
"""
function empirical_distribution(E::DifferentiableExpectation, θ...; kwargs...)
xdist = empirical_predistribution(E, θ...)
fk = FixKwargs(E.f, kwargs)
ydist = map(fk, xdist)
return ydist
end
function (E::DifferentiableExpectation)(θ...; kwargs...)
ydist = empirical_distribution(E, θ...; kwargs...)
return mean(ydist)
end
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | code | 4340 | """
FixedAtomsProbabilityDistribution{threaded}
A probability distribution with finite support and fixed atoms.
Whenever its expectation is differentiated, only the weights are considered active, whereas the atoms are considered constant.
# Example
```jldoctest
julia> using DifferentiableExpectations, Statistics, Zygote
julia> using DifferentiableExpectations: atoms, weights
julia> dist = FixedAtomsProbabilityDistribution([2, 3], [0.4, 0.6]);
julia> atoms(map(abs2, dist))
2-element Vector{Int64}:
4
9
julia> weights(map(abs2, dist))
2-element Vector{Float64}:
0.4
0.6
julia> mean(abs2, dist)
7.0
julia> gradient(mean, abs2, dist)[2]
(atoms = nothing, weights = [4.0, 9.0])
```
# Constructor
FixedAtomsProbabilityDistribution(
atoms::AbstractVector,
weights::AbstractVector=uniform_weights(atoms);
threaded=false
)
# Fields
$(TYPEDFIELDS)
"""
struct FixedAtomsProbabilityDistribution{
threaded,A<:AbstractVector,W<:AbstractVector{<:Real}
}
atoms::A
weights::W
function FixedAtomsProbabilityDistribution(
atoms::A, weights::W=uniform_weights(atoms); threaded::Bool=false
) where {A,W}
if isempty(atoms) || isempty(weights)
throw(ArgumentError("`atoms` and `weights` must be non-empty."))
elseif length(atoms) != length(weights)
throw(DimensionMismatch("`atoms` and `weights` must have the same length."))
elseif !isapprox(sum(weights), one(eltype(weights)); atol=1e-4)
throw(ArgumentError("`weights` must be normalized to `1`."))
end
return new{threaded,A,W}(atoms, weights)
end
end
"""
atoms(dist::FixedAtomsProbabilityDistribution)
Get the vector of atoms of a distribution.
"""
atoms(dist::FixedAtomsProbabilityDistribution) = dist.atoms
"""
weights(dist::FixedAtomsProbabilityDistribution)
Get the vector of weights of a distribution.
"""
weights(dist::FixedAtomsProbabilityDistribution) = dist.weights
is_threaded(::FixedAtomsProbabilityDistribution{t}) where {t} = Val(t)
Base.length(dist::FixedAtomsProbabilityDistribution) = length(dist.atoms)
"""
rand(rng, dist::FixedAtomsProbabilityDistribution)
Sample from the atoms of `dist` with probability proportional to their weights.
"""
function Random.rand(rng::AbstractRNG, dist::FixedAtomsProbabilityDistribution)
(; atoms, weights) = dist
return StatsBase.sample(rng, atoms, StatsBase.Weights(weights))
end
"""
map(f, dist::FixedAtomsProbabilityDistribution)
Apply `f` to the atoms of `dist`, leave the weights unchanged.
"""
function Base.map(f, dist::FixedAtomsProbabilityDistribution)
(; atoms, weights) = dist
new_atoms = mymap(is_threaded(dist), f, atoms)
return FixedAtomsProbabilityDistribution(new_atoms, weights)
end
"""
mean(dist::FixedAtomsProbabilityDistribution)
Compute the expectation of `dist`, i.e. the sum of all atoms multiplied by their respective weights.
"""
function Statistics.mean(dist::FixedAtomsProbabilityDistribution)
(; atoms, weights) = dist
return mymapreduce(is_threaded(dist), *, +, weights, atoms)
end
"""
mean(f, dist::FixedAtomsProbabilityDistribution)
Shortcut for `mean(map(f, dist))`.
"""
function Statistics.mean(f, dist::FixedAtomsProbabilityDistribution)
return mean(map(f, dist))
end
function ChainRulesCore.rrule(::typeof(mean), dist::FixedAtomsProbabilityDistribution)
(; atoms) = dist
e = mean(dist)
function dist_mean_pullback(Δe_thunked)
Δe = unthunk(Δe_thunked)
Δatoms = NoTangent()
Δweights = mymap(is_threaded(dist), Base.Fix1(dot, Δe), atoms)
Δdist = Tangent{FixedAtomsProbabilityDistribution}(; atoms=Δatoms, weights=Δweights)
return NoTangent(), Δdist
end
return e, dist_mean_pullback
end
function ChainRulesCore.rrule(::typeof(mean), f, dist::FixedAtomsProbabilityDistribution)
new_dist = map(f, dist)
new_atoms = new_dist.atoms
e = mean(new_dist)
function dist_fmean_pullback(Δe_thunked)
Δe = unthunk(Δe_thunked)
Δatoms = NoTangent()
Δweights = mymap(is_threaded(dist), Base.Fix1(dot, Δe), new_atoms)
Δdist = Tangent{FixedAtomsProbabilityDistribution}(; atoms=Δatoms, weights=Δweights)
return NoTangent(), NoTangent(), Δdist
end
return e, dist_fmean_pullback
end
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | code | 4699 | """
Reinforce{threaded} <: DifferentiableExpectation{threaded}
Differentiable parametric expectation `F : θ -> 𝔼[f(X)]` where `X ∼ p(θ)` using the REINFORCE (or score function) gradient estimator:
```
∂F(θ) = 𝔼[f(X) ∇₂logp(X,θ)ᵀ]
```
# Example
```jldoctest
using DifferentiableExpectations, Distributions, Zygote
E = Reinforce(exp, Normal; nb_samples=10^5)
E_true(μ, σ) = mean(LogNormal(μ, σ))
μ, σ = 0.5, 1,0
∇E, ∇E_true = gradient(E, μ, σ), gradient(E_true, μ, σ)
isapprox(collect(∇E), collect(∇E_true); rtol=1e-1)
# output
true
```
# Constructor
Reinforce(
f,
dist_constructor,
dist_logdensity_grad=nothing;
rng=Random.default_rng(),
nb_samples=1,
threaded=false,
seed=nothing
)
# Fields
$(TYPEDFIELDS)
# See also
- [`DifferentiableExpectation`](@ref)
"""
struct Reinforce{t,variance_reduction,F,D,G,R<:AbstractRNG,S<:Union{Int,Nothing}} <:
DifferentiableExpectation{t}
"function applied inside the expectation"
f::F
"constructor of the probability distribution `(θ...) -> p(θ)`"
dist_constructor::D
"either `nothing` or a parameter gradient callable `(x, θ...) -> ∇₂logp(x, θ)`"
dist_logdensity_grad::G
"random number generator"
rng::R
"number of Monte-Carlo samples"
nb_samples::Int
"seed for the random number generator, reset before each call. Set to `nothing` for no seeding."
seed::S
end
function Reinforce(
f::F,
dist_constructor::D,
dist_logdensity_grad::G=nothing;
rng::R=default_rng(),
nb_samples=1,
threaded=false,
variance_reduction=true,
seed::S=nothing,
) where {F,D,G,R,S}
return Reinforce{threaded,variance_reduction,F,D,G,R,S}(
f, dist_constructor, dist_logdensity_grad, rng, nb_samples, seed
)
end
function dist_logdensity_grad(rc::RuleConfig, E::Reinforce, x, θ...)
(; dist_constructor, dist_logdensity_grad) = E
if !isnothing(dist_logdensity_grad)
dθ = dist_logdensity_grad(x, θ...)
else
_logdensity_partial(_θ...) = logdensityof(dist_constructor(_θ...), x)
l, pullback = rrule_via_ad(rc, _logdensity_partial, θ...)
dθ = Base.tail(pullback(one(l)))
end
return dθ
end
function ChainRulesCore.rrule(
rc::RuleConfig, E::Reinforce{t,variance_reduction}, θ...; kwargs...
) where {t,variance_reduction}
project_θ = ProjectTo(θ)
(; f, nb_samples) = E
xdist = empirical_predistribution(E, θ...)
xs = atoms(xdist)
fk = FixKwargs(f, kwargs)
ydist = map(fk, xdist)
ys = atoms(ydist)
y = mean(ydist)
_dist_logdensity_grad_partial(x) = dist_logdensity_grad(rc, E, x, θ...)
gs = mymap(is_threaded(E), _dist_logdensity_grad_partial, xs)
ys_baseline = if (variance_reduction && nb_samples > 1)
mymap(is_threaded(E), yᵢ -> yᵢ .- y, ys)
else
ys
end
adjusted_nb_samples = nb_samples - (variance_reduction && nb_samples > 1)
function pullback_Reinforce(Δy_thunked)
Δy = unthunk(Δy_thunked)
ΔE = @not_implemented("The fields of the `Reinforce` object are constant.")
_single_sample_pullback(gᵢ, yᵢ) = gᵢ .* dot(yᵢ, Δy)
Δθ =
mymapreduce(is_threaded(E), _single_sample_pullback, .+, gs, ys_baseline) ./
adjusted_nb_samples
Δθ_proj = project_θ(Δθ)
return (ΔE, Δθ_proj...)
end
return y, pullback_Reinforce
end
function ChainRulesCore.rrule(
rc::RuleConfig,
::typeof(empirical_distribution),
E::Reinforce{t,variance_reduction},
θ...;
kwargs...,
) where {t,variance_reduction}
project_θ = ProjectTo(θ)
(; f, nb_samples) = E
xdist = empirical_predistribution(E, θ...)
xs = atoms(xdist)
fk = FixKwargs(f, kwargs)
ydist = map(fk, xdist)
_dist_logdensity_grad_partial(x) = dist_logdensity_grad(rc, E, x, θ...)
gs = mymap(is_threaded(E), _dist_logdensity_grad_partial, xs)
adjusted_nb_samples = nb_samples - (variance_reduction && nb_samples > 1)
function pullback_Reinforce_probadist(Δdist_thunked)
Δdist = unthunk(Δdist_thunked)
Δps = Δdist.weights
Δps_mean = mean(Δps)
Δps_baseline = if (variance_reduction && nb_samples > 1)
Δps .- Δps_mean
else
Δps
end
ΔE = @not_implemented("The fields of the `Reinforce` object are constant.")
_single_sample_pullback(gᵢ, Δpᵢ) = gᵢ .* Δpᵢ
Δθ =
mymapreduce(is_threaded(E), _single_sample_pullback, .+, gs, Δps_baseline) ./
adjusted_nb_samples
Δθ_proj = project_θ(Δθ)
return (NoTangent(), ΔE, Δθ_proj...)
end
return ydist, pullback_Reinforce_probadist
end
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | code | 4308 | """
TransformedDistribution
Represent the probability distribution `p` of a random variable `X ∼ p` with a transformation `X = T(Z)` where `Z ∼ q`.
# Fields
$(TYPEDFIELDS)
"""
struct TransformedDistribution{D,T}
"the distribution `q` that gets transformed into `p`"
base_dist::D
"the transformation function `T`"
transformation::T
end
"""
rand(rng, dist::TransformedDistribution)
Sample from `dist` by applying `dist.transformation` to `dist.base_dist`.
"""
function Random.rand(rng::AbstractRNG, dist::TransformedDistribution)
(; base_dist, transformation) = dist
return transformation(rand(rng, base_dist))
end
"""
reparametrize(dist)
Turn a probability distribution `p` into a [`TransformedDistribution`](@ref) `(q, T)` such that the new distribution `q` does not depend on the parameters of `p`.
These parameters are encoded (closed over) in the transformation function `T`.
"""
function reparametrize end
function reparametrize(dist::Normal{T}) where {T}
base_dist = Normal(zero(T), one(T))
μ, σ = mean(dist), std(dist)
transformation(z) = μ + σ * z
return TransformedDistribution(base_dist, transformation)
end
function reparametrize(dist::MvNormal{T}) where {T}
n = length(dist)
base_dist = MvNormal(fill(zero(T), n), Diagonal(fill(one(T), n)))
μ, Σ = mean(dist), cov(dist)
C = cholesky(Σ)
transformation(z) = μ .+ C.L * z
return TransformedDistribution(base_dist, transformation)
end
"""
Reparametrization{threaded} <: DifferentiableExpectation{threaded}
Differentiable parametric expectation `F : θ -> 𝔼[f(X)]` where `X ∼ p(θ)` using the reparametrization (or pathwise) gradient estimator: if `X = g(Z,θ)` where `Z ∼ q` then
```
∂F(θ) = 𝔼_q[∂f(g(Z,θ)) ∂₂g(Z,θ)ᵀ]
```
# Example
```jldoctest
using DifferentiableExpectations, Distributions, Zygote
E = Reparametrization(exp, Normal; nb_samples=10^4)
E_true(μ, σ) = mean(LogNormal(μ, σ))
μ, σ = 0.5, 1,0
∇E, ∇E_true = gradient(E, μ, σ), gradient(E_true, μ, σ)
isapprox(collect(∇E), collect(∇E_true); rtol=1e-1)
# output
true
```
# Constructor
Reparametrization(
f,
dist_constructor,
rng=Random.default_rng(),
nb_samples=1,
threaded=false,
seed=nothing
)
# Fields
$(TYPEDFIELDS)
# See also
- [`DifferentiableExpectation`](@ref)
"""
struct Reparametrization{t,F,D,R<:AbstractRNG,S<:Union{Int,Nothing}} <:
DifferentiableExpectation{t}
"function applied inside the expectation"
f::F
"constructor of the probability distribution `(θ...) -> p(θ)`"
dist_constructor::D
"random number generator"
rng::R
"number of Monte-Carlo samples"
nb_samples::Int
"seed for the random number generator, reset before each call. Set to `nothing` for no seeding."
seed::S
end
function Reparametrization(
f::F,
dist_constructor::D;
rng::R=default_rng(),
nb_samples=1,
threaded=false,
seed::S=nothing,
) where {F,D,R,S}
return Reparametrization{threaded,F,D,R,S}(f, dist_constructor, rng, nb_samples, seed)
end
function ChainRulesCore.rrule(rc::RuleConfig, E::Reparametrization, θ...; kwargs...)
project_θ = ProjectTo(θ)
(; f, dist_constructor, rng, nb_samples) = E
dist = dist_constructor(θ...)
transformed_dist = reparametrize(dist)
zs = maybe_eachcol(rand(rng, transformed_dist.base_dist, nb_samples))
zdist = FixedAtomsProbabilityDistribution(zs; threaded=unval(is_threaded(E)))
xdist = map(transformed_dist.transformation, zdist)
fk = FixKwargs(f, kwargs)
ydist = map(fk, xdist)
y = mean(ydist)
function h(zᵢ, θ)
transformed_dist = reparametrize(dist_constructor(θ...))
return f(transformed_dist.transformation(zᵢ); kwargs...)
end
function pullback_Reparametrization(Δy_thunked)
Δy = unthunk(Δy_thunked)
ΔE = @not_implemented("The fields of the `Reparametrization` object are constant.")
function _single_sample_pullback(zᵢ)
_, pb = rrule_via_ad(rc, h, zᵢ, θ)
_, _, Δθ = pb(Δy)
return Δθ
end
Δθ = mymapreduce(is_threaded(E), _single_sample_pullback, .+, zs) ./ nb_samples
Δθ_proj = project_θ(Δθ)
return (ΔE, Δθ_proj...)
end
return y, pullback_Reparametrization
end
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | code | 1039 | """
FixKwargs(f, kwargs)
Callable struct that fixes the keyword arguments of `f` to `kwargs...`, and only accepts positional arguments.
"""
struct FixKwargs{F,K}
f::F
kwargs::K
end
(fk::FixKwargs)(args...) = fk.f(args...; fk.kwargs...)
"""
maybe_eachcol(x::AbstractVector)
Return `x`.
"""
maybe_eachcol(x::AbstractVector) = x
"""
maybe_eachcol(x::AbstractMatrix)
Return `eachcol(x)`.
"""
maybe_eachcol(x::AbstractMatrix) = eachcol(x)
uniform_weights(x::AbstractArray) = ones(size(x)) ./ prod(size(x))
"""
mymap(::Val{threaded}, args...)
Apply either `tmap(args...)` or `map(args...)` depending on the value of `threaded`.
"""
mymap(::Val{true}, args...) = tmap(args...)
mymap(::Val{false}, args...) = map(args...)
"""
mymapreduce(::Val{threaded}, args...)
Apply either `tmapreduce(args...)` or `mapreduce(args...)` depending on the value of `threaded`.
"""
mymapreduce(::Val{true}, args...) = tmapreduce(args...)
mymapreduce(::Val{false}, args...) = mapreduce(args...)
unval(::Val{t}) where {t} = t
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | code | 1098 | using ChainRulesCore
using Distributions
using DifferentiableExpectations
using LinearAlgebra
using Random
using StableRNGs
using Statistics
using Test
using Zygote
rng = StableRNG(63)
@test_throws ArgumentError FixedAtomsProbabilityDistribution(Int[], Float64[])
@test_throws DimensionMismatch FixedAtomsProbabilityDistribution([2, 3], [1.0])
@test_throws ArgumentError FixedAtomsProbabilityDistribution([2, 3], [0.4, 0.8])
for threaded in (false, true)
dist = FixedAtomsProbabilityDistribution([2, 3], [0.4, 0.6]; threaded)
@test length(dist) == 2
@test mean(dist) ≈ 2.6
@test mean(abs2, dist) ≈ 7.0
@test mean([rand(rng, dist) for _ in 1:(10^5)]) ≈ 2.6 rtol = 0.1
@test mean(abs2, [rand(rng, dist) for _ in 1:(10^5)]) ≈ 7.0 rtol = 0.1
@test map(abs2, dist).weights == dist.weights
@test map(abs2, dist).atoms == [4, 9]
@test only(gradient(mean, dist)).atoms === nothing
@test only(gradient(mean, dist)).weights == [2, 3]
@test last(gradient(mean, abs2, dist)).atoms === nothing
@test last(gradient(mean, abs2, dist)).weights == [4, 9]
end
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | code | 4405 | using Distributions
using DifferentiableExpectations
using DifferentiableExpectations: atoms
using LinearAlgebra
using Random
using StableRNGs
using Statistics
using Test
using Zygote
exp_with_kwargs(x; correct=false) = correct ? exp(x) : sin(x)
vec_exp_with_kwargs(x; correct=false) = exp_with_kwargs.(x; correct)
normal_logdensity_grad(x, θ...) = gradient((_θ...) -> logpdf(Normal(_θ...), x), θ...)
@testset verbose = true "Univariate LogNormal" begin
μ, σ = 0.5, 1.0
seed = 63
true_mean(μ, σ) = mean(LogNormal(μ, σ))
true_std(μ, σ) = std(LogNormal(μ, σ))
∇mean_true = gradient(true_mean, μ, σ)
@testset verbose = true "Threaded: $threaded" for threaded in (false,) #
@testset "$(nameof(typeof(E)))" for E in [
Reinforce(
exp_with_kwargs,
Normal;
rng=StableRNG(seed),
nb_samples=10^4,
threaded=threaded,
seed=seed,
),
Reinforce(
exp_with_kwargs,
Normal,
normal_logdensity_grad;
rng=StableRNG(seed),
nb_samples=10^4,
threaded=threaded,
seed=seed,
),
Reparametrization(
exp_with_kwargs,
Normal;
rng=StableRNG(seed),
nb_samples=10^4,
threaded=threaded,
seed=seed,
),
]
string(E)
@test E(μ, σ; correct=true) == E(μ, σ; correct=true)
@test E.dist_constructor(μ, σ) == Normal(μ, σ)
@test E(μ, σ; correct=true) ≈ true_mean(μ, σ) rtol = 0.1
@test std(atoms(empirical_distribution(E, μ, σ; correct=true))) ≈ true_std(μ, σ) rtol =
0.1
∇mean_est = gradient((μ, σ) -> E(μ, σ; correct=true), μ, σ)
@test ∇mean_est[1] ≈ ∇mean_true[1] rtol = 0.2
@test ∇mean_est[2] ≈ ∇mean_true[2] rtol = 0.2
end
end
end;
@testset verbose = true "Multivariate LogNormal" begin
μ, σ = [2.0, 3.0], [0.2, 0.3]
true_mean(μ, σ) = mean.(LogNormal.(μ, σ))
true_std(μ, σ) = std.(LogNormal.(μ, σ))
∂mean_true = jacobian(true_mean, μ, σ)
@testset verbose = true "Threaded: $threaded" for threaded in (false, true)
@testset "$(nameof(typeof(E)))" for E in [
Reinforce(
vec_exp_with_kwargs,
(μ, σ) -> MvNormal(μ, Diagonal(σ .^ 2));
rng=StableRNG(63),
nb_samples=10^5,
threaded=threaded,
),
# Reparametrization(
# vec_exp_with_kwargs,
# (μ, σ) -> MvNormal(μ, Diagonal(σ .^ 2));
# rng=StableRNG(63),
# nb_samples=10^5,
# threaded=threaded,
# ),
]
@test E.dist_constructor(μ, σ) == MvNormal(μ, Diagonal(σ .^ 2))
@test E(μ, σ; correct=true) ≈ true_mean(μ, σ) rtol = 0.1
∂mean_est = jacobian((μ, σ) -> E(μ, σ; correct=true), μ, σ)
@test ∂mean_est[1] ≈ ∂mean_true[1] rtol = 0.1
@test ∂mean_est[2] ≈ ∂mean_true[2] rtol = 0.1
end
end
end
@testset "Reinforce variance reduction" begin
μ, σ = 0.5, 1.0
seed = 63
r = Reinforce(exp, Normal; nb_samples=100, variance_reduction=true, rng=StableRNG(seed))
r_no_vr = Reinforce(
exp, Normal; nb_samples=100, variance_reduction=false, rng=StableRNG(seed)
)
grads = [gradient(r, μ, σ) for _ in 1:1000]
grads_no_vr = [gradient(r_no_vr, μ, σ) for _ in 1:1000]
@test var(first.(grads)) < var(first.(grads_no_vr))
@test var(last.(grads)) < var(last.(grads_no_vr))
end
@testset "Reinforce proba dist rule" begin
μ, σ = 0.5, 1.0
seed = 63
r = Reinforce(
exp, Normal; nb_samples=100, variance_reduction=false, rng=StableRNG(seed), seed=0
)
r_split(θ...) = mean(empirical_distribution(r, θ...))
@test r(μ, σ) == r_split(μ, σ)
@test gradient(r, μ, σ) == gradient(r_split, μ, σ)
r = Reinforce(
exp, Normal; nb_samples=100, variance_reduction=true, rng=StableRNG(seed), seed=0
)
r_split(θ...) = mean(empirical_distribution(r, θ...))
@test r(μ, σ) == r_split(μ, σ)
@test all(isapprox.(gradient(r, μ, σ), gradient(r_split, μ, σ); atol=1e-10))
end
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | code | 719 | using DifferentiableExpectations: reparametrize
using Distributions
using StableRNGs
using Test
rng = StableRNG(63)
@testset "Univariate Normal" begin
dist = Normal(2.0, 1.0)
transformed_dist = reparametrize(dist)
@test mean([rand(rng, transformed_dist) for _ in 1:(10^4)]) ≈ mean(dist) rtol = 1e-1
@test std([rand(rng, transformed_dist) for _ in 1:(10^4)]) ≈ std(dist) rtol = 1e-1
end
@testset "Multivariate Normal" begin
dist = MvNormal([2.0, 3.0], [2.0 0.01; 0.01 1.0])
transformed_dist = reparametrize(dist)
@test mean([rand(rng, transformed_dist) for _ in 1:(10^4)]) ≈ mean(dist) rtol = 1e-1
@test cov([rand(rng, transformed_dist) for _ in 1:(10^4)]) ≈ cov(dist) rtol = 1e-1
end
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | code | 1174 | using Aqua: Aqua
using Documenter: Documenter
using JET: JET
using JuliaFormatter: JuliaFormatter
using DifferentiableExpectations
using Test
using Zygote
@testset verbose = true "DifferentiableExpectations" begin
@testset verbose = true "Formalities" begin
@testset "Aqua" begin
Aqua.test_all(
DifferentiableExpectations;
ambiguities=false,
deps_compat=(check_extras = false),
)
end
@testset "JET" begin
JET.test_package(DifferentiableExpectations; target_defined_modules=true)
end
@testset "JuliaFormatter" begin
@test JuliaFormatter.format(
DifferentiableExpectations; verbose=false, overwrite=false
)
end
@testset "Documenter" begin
Documenter.doctest(DifferentiableExpectations)
end
end
@testset "Distribution" begin
include("distribution.jl")
end
@testset verbose = true "Reparametrization" begin
include("reparametrization.jl")
end
@testset verbose = true "Expectation" begin
include("expectation.jl")
end
end
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | docs | 1453 | # DifferentiableExpectations.jl
[](https://JuliaDecisionFocusedLearning.github.io/DifferentiableExpectations.jl/stable/)
[](https://JuliaDecisionFocusedLearning.github.io/DifferentiableExpectations.jl/dev/)
[](https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl/actions/workflows/Test.yml?query=branch%3Amain)
[](https://app.codecov.io/gh/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl)
[](https://github.com/JuliaDiff/BlueStyle)
A Julia package for differentiating through expectations with Monte-Carlo estimates.
It allows the computation of approximate derivatives for functions of the form
```math
F(\theta) = \mathbb{E}_{p(\theta)}[f(X)]
```
The following estimators are implemented:
- [REINFORCE](https://jmlr.org/papers/volume21/19-346/19-346.pdf#section.20)
- [Reparametrization](https://jmlr.org/papers/volume21/19-346/19-346.pdf#section.56)
> Warning: this package is experimental, use at your own risk and expect frequent breaking releases.
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
|
[
"MIT"
] | 0.2.0 | 829dd95b32a41526923f44799ce0762fcd9a3a37 | docs | 225 | # API reference
```@meta
CollapsedDocStrings = true
```
## Public
```@autodocs
Modules = [DifferentiableExpectations]
Private = false
```
## Private
```@autodocs
Modules = [DifferentiableExpectations]
Public = false
```
| DifferentiableExpectations | https://github.com/JuliaDecisionFocusedLearning/DifferentiableExpectations.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.