licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 2954 | # activate local env
using Pkg
Pkg.activate(".")
# load packages/modules
using Base.Threads
using Distributed
using CSV
using DataFrames
using AutoMLPipeline
using Random
nprocs() ==1 && addprocs(exeflags=["--project=$(Base.active_project())"])
workers()
# disable warnings
@everywhere import PythonCall
@everywhere const PYC=PythonCall
@everywhere warnings = PYC.pyimport("warnings")
@everywhere warnings.filterwarnings("ignore")
@everywhere using DataFrames
@everywhere using AutoMLPipeline
# get data
profbdata = getprofb()
X = profbdata[:,2:end];
Y = profbdata[:,1] |> Vector;
topdf(x)=first(x,5)
topdf(profbdata)
#### Scaler
rb = SKPreprocessor("RobustScaler");
pt = SKPreprocessor("PowerTransformer");
norm = SKPreprocessor("Normalizer");
mx = SKPreprocessor("MinMaxScaler");
std = SKPreprocessor("StandardScaler")
disc = CatNumDiscriminator();
#### categorical preprocessing
ohe = OneHotEncoder();
#### Column selector
catf = CatFeatureSelector();
numf = NumFeatureSelector();
# load filters
#### Decomposition
apca = SKPreprocessor("PCA",Dict(:autocomponent=>true));
pca = SKPreprocessor("PCA");
afa = SKPreprocessor("FactorAnalysis",Dict(:autocomponent=>true));
fa = SKPreprocessor("FactorAnalysis");
aica = SKPreprocessor("FastICA",Dict(:autocomponent=>true));
ica = SKPreprocessor("FastICA");
#### Learners
rf = SKLearner("RandomForestClassifier",Dict(:impl_args=>Dict(:n_estimators => 10)));
gb = SKLearner("GradientBoostingClassifier");
lsvc = SKLearner("LinearSVC");
mlp = SKLearner("MLPClassifier");
jrf = RandomForest();
stack = StackEnsemble();
rbfsvc = SKLearner("SVC");
ada = SKLearner("AdaBoostClassifier");
vote = VoteEnsemble();
best = BestLearner();
tree = PrunedTree()
sgd = SKLearner("SGDClassifier");
# filter categories and hotbit encode
pohe = @pipeline catf |> ohe |> fa |> lsvc ;
crossvalidate(pohe,X,Y)
# filter numeric and apply pca and ica (unigrams)
pdec = @pipeline (numf |> pca) + (numf |> ica);
tr = fit_transform!(pdec,X,Y)
# filter numeric, apply rb/pt transforms and ica/pca extractions (bigrams)
ppt = @pipeline (numf |> rb |> ica) + (numf |> pt |> pca);
tr = fit_transform!(ppt,X,Y)
# rf learn baseline
rfp1 = @pipeline ( (catf |> ohe) + (catf |> ohe) + (numf) ) |> rf;
crossvalidate(rfp1, X,Y)
# rf learn: bigrams, 2-blocks
rfp2 = @pipeline ((catf |> ohe) + numf |> rb |> ica) + (numf |> pt |> pca) |> ada;
crossvalidate(rfp2, X,Y)
# lsvc learn: bigrams, 3-blocks
plsvc = @pipeline ((numf |> rb |> pca)+(numf |> rb |> fa)+(numf |> rb |> ica)+(catf |> ohe )) |> lsvc;
pred = fit_transform!(plsvc,X,Y)
crossvalidate(plsvc,X,Y)
learners = [jrf,ada,sgd,tree,lsvc]
learners = @distributed (vcat) for learner in learners
pcmc = @pipeline disc |> ((catf |> ohe) + (numf)) |> rb |> pca |> learner
println(learner.name)
mean,sd,_ = crossvalidate(pcmc,X,Y,"accuracy_score",10)
DataFrame(name=learner.name,mean=mean,sd=sd)
end
sort!(learners,:mean,rev=true)
@show learners;
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 1140 | # note: this example only works on pure julia implementations of
# preprocessing elements and learners. Scikitlearn and other
# PyCall functions are not thread-safe and cannot be used inside
# the threads loop.
# activate local env
using Pkg
Pkg.activate(".")
using AutoMLPipeline
using AutoMLPipeline.Utils
using Base.Threads
using DataFrames
# disable warnings
import PythonCall
const PYC=PythonCall
warnings = PYC.pyimport("warnings")
warnings.filterwarnings("ignore")
begin
profbdata = getprofb()
X = profbdata[:,2:end]
Y = profbdata[:,1] |> Vector;
head(x)=first(x,5)
head(profbdata)
end
#### Column selector
catf = CatFeatureSelector();
numf = NumFeatureSelector()
ohe = OneHotEncoder()
#### Learners
rf = RandomForest();
ada = Adaboost()
dt=PrunedTree()
accuracy(X,Y)=score(:accuracy,X,Y)
acc=[]
learners=[rf,ada,dt]
@threads for i in 1:30
@threads for lr in learners
println(lr.name)
pipe=@pipeline ((catf |> ohe) +(numf )) |> lr
m=crossvalidate(pipe,X,Y,accuracy,10,true)
push!(acc,(m...,name=lr.name))
println(m)
end
end
res = DataFrame(acc)
sort!(res, :mean, rev=true)
res
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 743 | # make sure local environment is activated
using Pkg
Pkg.activate(".")
using Distributed
using DataFrames
using CSV
using Random
# add workers
nprocs() ==1 && addprocs(exeflags=["--project=$(Base.active_project())"])
workers()
# disable warnings
@everywhere import PythonCall
@everywhere const PYC=PythonCall
@everywhere warnings = PYC.pyimport("warnings")
@everywhere warnings.filterwarnings("ignore")
Random.seed!(10)
@everywhere include("twoblocks.jl")
@everywhere using Main.TwoBlocksPipeline
dataset = getiris()
X = dataset[:,1:4]
Y = dataset[:,5] |> collect
X = dataset[:,2:end-1]
results=TwoBlocksPipeline.twoblockspipelinesearch(X,Y)
println("best pipeline is: ", results[1,1],", with mean-sd ",results[1,2], " ± ",results[1,3])
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 4946 | module TwoBlocksPipeline
export twoblockspipelinesearch
using Distributed
#nprocs() == 1 && addprocs(; exeflags = "--project")
#nprocs() == 1 && addprocs()
@everywhere using AutoMLPipeline
@everywhere using DataFrames
@everywhere using DataFrames:DataFrame
@everywhere using AutoMLPipeline: score
@everywhere using Random
# disable truncation of dataframes columns
import Base.show
show(df::AbstractDataFrame) = show(df,truncate=0)
show(io::IO,df::AbstractDataFrame) = show(io,df;truncate=0)
# define scalers
const rb = SKPreprocessor("RobustScaler",Dict(:name=>"rb"))
const pt = SKPreprocessor("PowerTransformer",Dict(:name=>"pt"))
const norm = SKPreprocessor("Normalizer",Dict(:name=>"norm"))
const mx = SKPreprocessor("MinMaxScaler",Dict(:name=>"mx"))
const std = SKPreprocessor("StandardScaler",Dict(:name=>"std"))
# define extractors
const pca = SKPreprocessor("PCA",Dict(:name=>"pca"))
const fa = SKPreprocessor("FactorAnalysis",Dict(:name=>"fa"))
const ica = SKPreprocessor("FastICA",Dict(:name=>"ica"))
# define learners
const rf = SKLearner("RandomForestClassifier",Dict(:name => "rf"))
const ada = SKLearner("AdaBoostClassifier",Dict(:name => "ada"))
const gb = SKLearner("GradientBoostingClassifier",Dict(:name => "gb"))
const lsvc = SKLearner("LinearSVC",Dict(:name => "lsvc"))
const rbfsvc = SKLearner("SVC",Dict(:name => "rbfsvc"))
const dt = SKLearner("DecisionTreeClassifier",Dict(:name =>"dt"))
# preprocessing
const noop = Identity(Dict(:name =>"noop"))
const ohe = OneHotEncoder(Dict(:name=>"ohe"))
const catf = CatFeatureSelector(Dict(:name=>"catf"))
const numf = NumFeatureSelector(Dict(:name=>"numf"))
const vscalers = [rb,pt,norm,mx,std,noop]
const vextractors = [pca,fa,ica,noop]
const vlearners = [rf,gb,lsvc,rbfsvc,ada,dt]
const learnerdict = Dict("rf"=>rf,"gb"=>gb,"lsvc"=>lsvc,"rbfsvc"=>rbfsvc,"ada"=>ada,"dt"=>dt)
function oneblock_pipeline_factory(scalers,extractors,learners)
results = @distributed (vcat) for lr in learners
@distributed (vcat) for xt in extractors
@distributed (vcat) for sc in scalers
# baseline preprocessing
prep = @pipeline ((catf |> ohe) + numf)
# one-block prp
expx = @pipeline prep |> (sc |> xt) |> lr
scn = sc.name[1:end - 4];xtn = xt.name[1:end - 4]; lrn = lr.name[1:end - 4]
pname = "($scn |> $xtn) |> $lrn"
DataFrame(Description=pname,Pipeline=expx)
end
end
end
return results
end
function evaluate_pipeline(dfpipelines,X,Y;folds=3)
res=@distributed (vcat) for prow in eachrow(dfpipelines)
perf = crossvalidate(prow.Pipeline,X,Y,"balanced_accuracy_score";nfolds=folds)
DataFrame(;Description=prow.Description,mean=perf.mean,sd=perf.std,prow.Pipeline)
end
return res
end
function twoblock_pipeline_factory(scalers,extractors,learners)
results = @distributed (vcat) for lr in learners
@distributed (vcat) for xt1 in extractors
@distributed (vcat) for xt2 in extractors
@distributed (vcat) for sc1 in scalers
@distributed (vcat) for sc2 in scalers
prep = @pipeline ((catf |> ohe) + numf)
expx = @pipeline prep |> ((sc1 |> xt1) + (sc2 |> xt2)) |> lr
scn1 = sc1.name[1:end - 4];xtn1 = xt1.name[1:end - 4];
scn2 = sc2.name[1:end - 4];xtn2 = xt2.name[1:end - 4];
lrn = lr.name[1:end - 4]
pname = "($scn1 |> $xtn1) + ($scn2 |> $xtn2) |> $lrn"
DataFrame(Description=pname,Pipeline=expx)
end
end
end
end
end
return results
end
function model_selection_pipeline(learners)
results = @distributed (vcat) for lr in learners
prep = @pipeline ((catf |> ohe) + numf)
expx = @pipeline prep |> (rb |> pca) |> lr
pname = "(rb |> pca) |> $(lr.name[1:end-4])"
DataFrame(Description=pname,Pipeline=expx)
end
return results
end
function lname(n::Learner)
n.name[1:end-4]
end
function twoblockspipelinesearch(X::DataFrame,Y::Vector;scalers=vscalers,extractors=vextractors,learners=vlearners,nfolds=3)
dfpipes = model_selection_pipeline(vlearners)
# find the best model by evaluating the models
modelsperf = evaluate_pipeline(dfpipes,X,Y;folds=nfolds)
sort!(modelsperf,:mean, rev = true)
# get the string name of the top model
bestm = filter(x->occursin(x,modelsperf.Description[1]),lname.(vlearners))[1]
# get corresponding model object
bestmodel = learnerdict[bestm]
# use the best model to generate pipeline search
dfp = twoblock_pipeline_factory(vscalers,vextractors,[bestmodel])
# evaluate the pipeline
bestp=evaluate_pipeline(dfp,X,Y;folds=nfolds)
sort!(bestp,:mean, rev = true)
show(bestp;allrows=false,truncate=1,allcols=false)
println()
optmodel = bestp[1,:]
return bestp
end
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 810 | using Distributed, K8sClusterManagers
using DataFrames
using CSV
workers = parse(Int,ARGS[1])
cpu_fraction = parse(Int,ARGS[2])
addprocs(K8sClusterManager(workers,cpu=cpu_fraction); exeflags="--project")
#addprocs(K8sClusterManager(10, cpu=1, memory="300Mi", pending_timeout=300); exeflags="--project")
#nprocs() == 1 && addprocs(; exeflags = "--project")
@everywhere include("twoblocks.jl")
@everywhere using Main.TwoBlocksPipeline
dataset = getiris()
X = dataset[:,1:4]
Y = dataset[:,5] |> collect
X = dataset[:,2:end-1]
#dataset=CSV.read("environment_data.csv",DataFrame)
#X = select(dataset, Not([:state,:time]))
#Y = dataset.state
results=TwoBlocksPipeline.twoblockspipelinesearch(X,Y)
println(results)
println("best pipeline is: ", results[1,1],", with mean-sd ",results[1,2], " ± ",results[1,3])
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 4946 | module TwoBlocksPipeline
export twoblockspipelinesearch
using Distributed
#nprocs() == 1 && addprocs(; exeflags = "--project")
#nprocs() == 1 && addprocs()
@everywhere using AutoMLPipeline
@everywhere using DataFrames
@everywhere using DataFrames:DataFrame
@everywhere using AutoMLPipeline: score
@everywhere using Random
# disable truncation of dataframes columns
import Base.show
show(df::AbstractDataFrame) = show(df,truncate=0)
show(io::IO,df::AbstractDataFrame) = show(io,df;truncate=0)
# define scalers
const rb = SKPreprocessor("RobustScaler",Dict(:name=>"rb"))
const pt = SKPreprocessor("PowerTransformer",Dict(:name=>"pt"))
const norm = SKPreprocessor("Normalizer",Dict(:name=>"norm"))
const mx = SKPreprocessor("MinMaxScaler",Dict(:name=>"mx"))
const std = SKPreprocessor("StandardScaler",Dict(:name=>"std"))
# define extractors
const pca = SKPreprocessor("PCA",Dict(:name=>"pca"))
const fa = SKPreprocessor("FactorAnalysis",Dict(:name=>"fa"))
const ica = SKPreprocessor("FastICA",Dict(:name=>"ica"))
# define learners
const rf = SKLearner("RandomForestClassifier",Dict(:name => "rf"))
const ada = SKLearner("AdaBoostClassifier",Dict(:name => "ada"))
const gb = SKLearner("GradientBoostingClassifier",Dict(:name => "gb"))
const lsvc = SKLearner("LinearSVC",Dict(:name => "lsvc"))
const rbfsvc = SKLearner("SVC",Dict(:name => "rbfsvc"))
const dt = SKLearner("DecisionTreeClassifier",Dict(:name =>"dt"))
# preprocessing
const noop = Identity(Dict(:name =>"noop"))
const ohe = OneHotEncoder(Dict(:name=>"ohe"))
const catf = CatFeatureSelector(Dict(:name=>"catf"))
const numf = NumFeatureSelector(Dict(:name=>"numf"))
const vscalers = [rb,pt,norm,mx,std,noop]
const vextractors = [pca,fa,ica,noop]
const vlearners = [rf,gb,lsvc,rbfsvc,ada,dt]
const learnerdict = Dict("rf"=>rf,"gb"=>gb,"lsvc"=>lsvc,"rbfsvc"=>rbfsvc,"ada"=>ada,"dt"=>dt)
function oneblock_pipeline_factory(scalers,extractors,learners)
results = @distributed (vcat) for lr in learners
@distributed (vcat) for xt in extractors
@distributed (vcat) for sc in scalers
# baseline preprocessing
prep = @pipeline ((catf |> ohe) + numf)
# one-block prp
expx = @pipeline prep |> (sc |> xt) |> lr
scn = sc.name[1:end - 4];xtn = xt.name[1:end - 4]; lrn = lr.name[1:end - 4]
pname = "($scn |> $xtn) |> $lrn"
DataFrame(Description=pname,Pipeline=expx)
end
end
end
return results
end
function evaluate_pipeline(dfpipelines,X,Y;folds=3)
res=@distributed (vcat) for prow in eachrow(dfpipelines)
perf = crossvalidate(prow.Pipeline,X,Y,"balanced_accuracy_score";nfolds=folds)
DataFrame(;Description=prow.Description,mean=perf.mean,sd=perf.std,prow.Pipeline)
end
return res
end
function twoblock_pipeline_factory(scalers,extractors,learners)
results = @distributed (vcat) for lr in learners
@distributed (vcat) for xt1 in extractors
@distributed (vcat) for xt2 in extractors
@distributed (vcat) for sc1 in scalers
@distributed (vcat) for sc2 in scalers
prep = @pipeline ((catf |> ohe) + numf)
expx = @pipeline prep |> ((sc1 |> xt1) + (sc2 |> xt2)) |> lr
scn1 = sc1.name[1:end - 4];xtn1 = xt1.name[1:end - 4];
scn2 = sc2.name[1:end - 4];xtn2 = xt2.name[1:end - 4];
lrn = lr.name[1:end - 4]
pname = "($scn1 |> $xtn1) + ($scn2 |> $xtn2) |> $lrn"
DataFrame(Description=pname,Pipeline=expx)
end
end
end
end
end
return results
end
function model_selection_pipeline(learners)
results = @distributed (vcat) for lr in learners
prep = @pipeline ((catf |> ohe) + numf)
expx = @pipeline prep |> (rb |> pca) |> lr
pname = "(rb |> pca) |> $(lr.name[1:end-4])"
DataFrame(Description=pname,Pipeline=expx)
end
return results
end
function lname(n::Learner)
n.name[1:end-4]
end
function twoblockspipelinesearch(X::DataFrame,Y::Vector;scalers=vscalers,extractors=vextractors,learners=vlearners,nfolds=3)
dfpipes = model_selection_pipeline(vlearners)
# find the best model by evaluating the models
modelsperf = evaluate_pipeline(dfpipes,X,Y;folds=nfolds)
sort!(modelsperf,:mean, rev = true)
# get the string name of the top model
bestm = filter(x->occursin(x,modelsperf.Description[1]),lname.(vlearners))[1]
# get corresponding model object
bestmodel = learnerdict[bestm]
# use the best model to generate pipeline search
dfp = twoblock_pipeline_factory(vscalers,vextractors,[bestmodel])
# evaluate the pipeline
bestp=evaluate_pipeline(dfp,X,Y;folds=nfolds)
sort!(bestp,:mean, rev = true)
show(bestp;allrows=false,truncate=1,allcols=false)
println()
optmodel = bestp[1,:]
return bestp
end
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 810 | using Distributed, K8sClusterManagers
using DataFrames
using CSV
workers = parse(Int,ARGS[1])
cpu_fraction = parse(Int,ARGS[2])
addprocs(K8sClusterManager(workers,cpu=cpu_fraction); exeflags="--project")
#addprocs(K8sClusterManager(10, cpu=1, memory="300Mi", pending_timeout=300); exeflags="--project")
#nprocs() == 1 && addprocs(; exeflags = "--project")
@everywhere include("twoblocks.jl")
@everywhere using Main.TwoBlocksPipeline
dataset = getiris()
X = dataset[:,1:4]
Y = dataset[:,5] |> collect
X = dataset[:,2:end-1]
#dataset=CSV.read("environment_data.csv",DataFrame)
#X = select(dataset, Not([:state,:time]))
#Y = dataset.state
results=TwoBlocksPipeline.twoblockspipelinesearch(X,Y)
println(results)
println("best pipeline is: ", results[1,1],", with mean-sd ",results[1,2], " ± ",results[1,3])
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 4946 | module TwoBlocksPipeline
export twoblockspipelinesearch
using Distributed
#nprocs() == 1 && addprocs(; exeflags = "--project")
#nprocs() == 1 && addprocs()
@everywhere using AutoMLPipeline
@everywhere using DataFrames
@everywhere using DataFrames:DataFrame
@everywhere using AutoMLPipeline: score
@everywhere using Random
# disable truncation of dataframes columns
import Base.show
show(df::AbstractDataFrame) = show(df,truncate=0)
show(io::IO,df::AbstractDataFrame) = show(io,df;truncate=0)
# define scalers
const rb = SKPreprocessor("RobustScaler",Dict(:name=>"rb"))
const pt = SKPreprocessor("PowerTransformer",Dict(:name=>"pt"))
const norm = SKPreprocessor("Normalizer",Dict(:name=>"norm"))
const mx = SKPreprocessor("MinMaxScaler",Dict(:name=>"mx"))
const std = SKPreprocessor("StandardScaler",Dict(:name=>"std"))
# define extractors
const pca = SKPreprocessor("PCA",Dict(:name=>"pca"))
const fa = SKPreprocessor("FactorAnalysis",Dict(:name=>"fa"))
const ica = SKPreprocessor("FastICA",Dict(:name=>"ica"))
# define learners
const rf = SKLearner("RandomForestClassifier",Dict(:name => "rf"))
const ada = SKLearner("AdaBoostClassifier",Dict(:name => "ada"))
const gb = SKLearner("GradientBoostingClassifier",Dict(:name => "gb"))
const lsvc = SKLearner("LinearSVC",Dict(:name => "lsvc"))
const rbfsvc = SKLearner("SVC",Dict(:name => "rbfsvc"))
const dt = SKLearner("DecisionTreeClassifier",Dict(:name =>"dt"))
# preprocessing
const noop = Identity(Dict(:name =>"noop"))
const ohe = OneHotEncoder(Dict(:name=>"ohe"))
const catf = CatFeatureSelector(Dict(:name=>"catf"))
const numf = NumFeatureSelector(Dict(:name=>"numf"))
const vscalers = [rb,pt,norm,mx,std,noop]
const vextractors = [pca,fa,ica,noop]
const vlearners = [rf,gb,lsvc,rbfsvc,ada,dt]
const learnerdict = Dict("rf"=>rf,"gb"=>gb,"lsvc"=>lsvc,"rbfsvc"=>rbfsvc,"ada"=>ada,"dt"=>dt)
function oneblock_pipeline_factory(scalers,extractors,learners)
results = @distributed (vcat) for lr in learners
@distributed (vcat) for xt in extractors
@distributed (vcat) for sc in scalers
# baseline preprocessing
prep = @pipeline ((catf |> ohe) + numf)
# one-block prp
expx = @pipeline prep |> (sc |> xt) |> lr
scn = sc.name[1:end - 4];xtn = xt.name[1:end - 4]; lrn = lr.name[1:end - 4]
pname = "($scn |> $xtn) |> $lrn"
DataFrame(Description=pname,Pipeline=expx)
end
end
end
return results
end
function evaluate_pipeline(dfpipelines,X,Y;folds=3)
res=@distributed (vcat) for prow in eachrow(dfpipelines)
perf = crossvalidate(prow.Pipeline,X,Y,"balanced_accuracy_score";nfolds=folds)
DataFrame(;Description=prow.Description,mean=perf.mean,sd=perf.std,prow.Pipeline)
end
return res
end
function twoblock_pipeline_factory(scalers,extractors,learners)
results = @distributed (vcat) for lr in learners
@distributed (vcat) for xt1 in extractors
@distributed (vcat) for xt2 in extractors
@distributed (vcat) for sc1 in scalers
@distributed (vcat) for sc2 in scalers
prep = @pipeline ((catf |> ohe) + numf)
expx = @pipeline prep |> ((sc1 |> xt1) + (sc2 |> xt2)) |> lr
scn1 = sc1.name[1:end - 4];xtn1 = xt1.name[1:end - 4];
scn2 = sc2.name[1:end - 4];xtn2 = xt2.name[1:end - 4];
lrn = lr.name[1:end - 4]
pname = "($scn1 |> $xtn1) + ($scn2 |> $xtn2) |> $lrn"
DataFrame(Description=pname,Pipeline=expx)
end
end
end
end
end
return results
end
function model_selection_pipeline(learners)
results = @distributed (vcat) for lr in learners
prep = @pipeline ((catf |> ohe) + numf)
expx = @pipeline prep |> (rb |> pca) |> lr
pname = "(rb |> pca) |> $(lr.name[1:end-4])"
DataFrame(Description=pname,Pipeline=expx)
end
return results
end
function lname(n::Learner)
n.name[1:end-4]
end
function twoblockspipelinesearch(X::DataFrame,Y::Vector;scalers=vscalers,extractors=vextractors,learners=vlearners,nfolds=3)
dfpipes = model_selection_pipeline(vlearners)
# find the best model by evaluating the models
modelsperf = evaluate_pipeline(dfpipes,X,Y;folds=nfolds)
sort!(modelsperf,:mean, rev = true)
# get the string name of the top model
bestm = filter(x->occursin(x,modelsperf.Description[1]),lname.(vlearners))[1]
# get corresponding model object
bestmodel = learnerdict[bestm]
# use the best model to generate pipeline search
dfp = twoblock_pipeline_factory(vscalers,vextractors,[bestmodel])
# evaluate the pipeline
bestp=evaluate_pipeline(dfp,X,Y;folds=nfolds)
sort!(bestp,:mean, rev = true)
show(bestp;allrows=false,truncate=1,allcols=false)
println()
optmodel = bestp[1,:]
return bestp
end
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 501 | abstract type Computer <: Machine
abstract type Workflow <: Machine
abstract type Learner <: Computer
abstract type Transformer <: Computer
function fit!(mc::Machine, input::DataFrame, output::Vector)
error(typeof(mc)," has no implementation.")
end
function transform!(mc::Machine, input::DataFrame)
error(typeof(mc)," has no implementation.")
end
function fit_transform!(mc::Machine, input::DataFrame, output::Vector)
fit!(mc,input,output)
transform!(mc,input)
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 2052 | module AutoMLPipeline
using AMLPipelineBase
using AMLPipelineBase.AbsTypes
export fit, fit!, transform, transform!,fit_transform, fit_transform!
using AMLPipelineBase
using AMLPipelineBase: AbsTypes, Utils, BaselineModels, Pipelines
using AMLPipelineBase: BaseFilters, FeatureSelectors, DecisionTreeLearners
using AMLPipelineBase: EnsembleMethods, CrossValidators
using AMLPipelineBase: NARemovers
export Machine, Learner, Transformer, Workflow, Computer
export holdout, kfold, score, infer_eltype, nested_dict_to_tuples,
nested_dict_set!, nested_dict_merge, create_transformer,
mergedict, getiris, getprofb,
skipmean,skipmedian,skipstd,
aggregatorclskipmissing,
find_catnum_columns,
train_test_split
export Baseline, Identity
export Imputer,OneHotEncoder,Wrapper
export PrunedTree,RandomForest,Adaboost
export VoteEnsemble, StackEnsemble, BestLearner
export FeatureSelector, CatFeatureSelector, NumFeatureSelector, CatNumDiscriminator
export crossvalidate
export NARemover
export @pipeline, @pipelinex
export +, |>, *, |, >>
export Pipeline, ComboPipeline
import AMLPipelineBase.AbsTypes: fit!, transform!
# --------------------------------------------
include("skpreprocessor.jl")
using .SKPreprocessors
export SKPreprocessor, skpreprocessors
include("sklearners.jl")
using .SKLearners
export SKLearner, sklearners
include("skcrossvalidator.jl")
using .SKCrossValidators
export crossvalidate
export skoperator
function skoperator(name::String; args...)::Machine
sklr = keys(SKLearners.learner_dict)
skpr = keys(SKPreprocessors.preprocessor_dict)
if name ∈ sklr
obj = SKLearner(name; args...)
elseif name ∈ skpr
obj = SKPreprocessor(name; args...)
else
skoperator()
throw(ArgumentError("$name does not exist"))
end
return obj
end
function skoperator()
sklr = keys(SKLearners.learner_dict)
skpr = keys(SKPreprocessors.preprocessor_dict)
println("Please choose among these pipeline elements:")
println([sklr..., skpr...])
end
end # module
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 4841 | module SKCrossValidators
import PythonCall
const PYC=PythonCall
# standard included modules
using DataFrames
using Random
using ..AbsTypes
using ..Utils
import ..CrossValidators: crossvalidate
export crossvalidate
const metric_dict = Dict{String,PYC.Py}()
const SKM = PYC.pynew()
function __init__()
PYC.pycopy!(SKM, PYC.pyimport("sklearn.metrics"))
metric_dict["roc_auc_score"] = SKM.roc_auc_score
metric_dict["accuracy_score"] = SKM.accuracy_score
metric_dict["auc"] = SKM.auc
metric_dict["average_precision_score"] = SKM.average_precision_score
metric_dict["balanced_accuracy_score"] = SKM.balanced_accuracy_score
metric_dict["brier_score_loss"] = SKM.brier_score_loss
metric_dict["classification_report"] = SKM.classification_report
metric_dict["cohen_kappa_score"] = SKM.cohen_kappa_score
metric_dict["confusion_matrix"] = SKM.confusion_matrix
metric_dict["f1_score"] = SKM.f1_score
metric_dict["fbeta_score"] = SKM.fbeta_score
metric_dict["hamming_loss"] = SKM.hamming_loss
metric_dict["hinge_loss"] = SKM.hinge_loss
metric_dict["log_loss"] = SKM.log_loss
metric_dict["matthews_corrcoef"] = SKM.matthews_corrcoef
metric_dict["multilabel_confusion_matrix"] = SKM.multilabel_confusion_matrix
metric_dict["precision_recall_curve"] = SKM.precision_recall_curve
metric_dict["precision_recall_fscore_support"] = SKM.precision_recall_fscore_support
metric_dict["precision_score"] = SKM.precision_score
metric_dict["recall_score"] = SKM.recall_score
metric_dict["roc_auc_score"] = SKM.roc_auc_score
metric_dict["roc_curve"] = SKM.roc_curve
metric_dict["jaccard_score"] = SKM.jaccard_score
metric_dict["zero_one_loss"] = SKM.zero_one_loss
# regression
metric_dict["mean_squared_error"] = SKM.mean_squared_error
metric_dict["mean_squared_log_error"] = SKM.mean_squared_log_error
metric_dict["mean_absolute_error"] = SKM.mean_absolute_error
metric_dict["median_absolute_error"] = SKM.median_absolute_error
metric_dict["r2_score"] = SKM.r2_score
metric_dict["max_error"] = SKM.max_error
metric_dict["mean_poisson_deviance"] = SKM.mean_poisson_deviance
metric_dict["mean_gamma_deviance"] = SKM.mean_gamma_deviance
metric_dict["mean_tweedie_deviance"] = SKM.mean_tweedie_deviance
metric_dict["explained_variance_score"] = SKM.explained_variance_score
end
function checkfun(sfunc::String)
if !(sfunc in keys(metric_dict))
println("$sfunc metric is not supported")
println("metric: ",keys(metric_dict))
error("Metric keyword error")
end
end
"""
crossvalidate(pl::Machine,X::DataFrame,Y::Vector,sfunc::String="balanced_accuracy_score",nfolds=10)
Runs K-fold cross-validation using balanced accuracy as the default. It support the
following metrics for classification:
- "accuracy_score"
- "balanced_accuracy_score"
- "cohen_kappa_score"
- "jaccard_score"
- "matthews_corrcoef"
- "hamming_loss"
- "zero_one_loss"
- "f1_score"
- "precision_score"
- "recall_score"
and the following metrics for regression:
- "mean_squared_error"
- "mean_squared_log_error"
- "median_absolute_error"
- "r2_score"
- "max_error"
- "explained_variance_score"
"""
function crossvalidate(pl::Machine,X::DataFrame,Y::Vector,
sfunc::String; nfolds=10,verbose::Bool=true)
YC=Y
if !(eltype(YC) <: Real)
YC = Y |> Vector{String}
end
checkfun(sfunc)
pfunc = metric_dict[sfunc]
metric(a,b) = pfunc(a,b) |> (x -> PYC.pyconvert(Float64,x))
crossvalidate(pl,X,YC,metric,nfolds,verbose)
end
function crossvalidate(pl::Machine,X::DataFrame,Y::Vector,sfunc::String,nfolds::Int)
crossvalidate(pl,X,Y,sfunc; nfolds)
end
function crossvalidate(pl::Machine,X::DataFrame,Y::Vector,sfunc::String,verbose::Bool)
crossvalidate(pl,X,Y,sfunc; verbose)
end
function crossvalidate(pl::Machine,X::DataFrame,Y::Vector,
sfunc::String, nfolds::Int,verbose::Bool)
crossvalidate(pl,X,Y,sfunc; nfolds,verbose)
end
function crossvalidate(pl::Machine,X::DataFrame,Y::Vector,
sfunc::String,averagetype::String;nfolds=10,verbose::Bool=true)
checkfun(sfunc)
pfunc = metric_dict[sfunc]
metric(a,b) = pfunc(a,b,average=averagetype) |> (x -> PYC.pyconvert(Float64,x))
crossvalidate(pl,X,Y,metric,nfolds,verbose)
end
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 7360 | module SKLearners
import PythonCall
const PYC=PythonCall
# standard included modules
using DataFrames
using Random
using ..AbsTypes
using ..Utils
import ..AbsTypes: fit, fit!, transform, transform!
export fit, fit!, transform, transform!
export SKLearner, sklearners
const learner_dict = Dict{String,PYC.Py}()
const ENS = PYC.pynew()
const LM = PYC.pynew()
const DA = PYC.pynew()
const NN = PYC.pynew()
const SVM = PYC.pynew()
const TREE = PYC.pynew()
const ANN = PYC.pynew()
const GP = PYC.pynew()
const KR = PYC.pynew()
const NB = PYC.pynew()
const ISO = PYC.pynew()
function __init__()
PYC.pycopy!(ENS , PYC.pyimport("sklearn.ensemble"))
PYC.pycopy!(LM , PYC.pyimport("sklearn.linear_model"))
PYC.pycopy!(DA , PYC.pyimport("sklearn.discriminant_analysis"))
PYC.pycopy!(NN , PYC.pyimport("sklearn.neighbors"))
PYC.pycopy!(SVM , PYC.pyimport("sklearn.svm"))
PYC.pycopy!(TREE, PYC.pyimport("sklearn.tree"))
PYC.pycopy!(ANN , PYC.pyimport("sklearn.neural_network"))
PYC.pycopy!(GP , PYC.pyimport("sklearn.gaussian_process"))
PYC.pycopy!(KR , PYC.pyimport("sklearn.kernel_ridge"))
PYC.pycopy!(NB , PYC.pyimport("sklearn.naive_bayes"))
PYC.pycopy!(ISO , PYC.pyimport("sklearn.isotonic"))
# Available scikit-learn learners.
learner_dict["AdaBoostClassifier"] = ENS
learner_dict["BaggingClassifier"] = ENS
learner_dict["ExtraTreesClassifier"] = ENS
learner_dict["VotingClassifier"] = ENS
learner_dict["GradientBoostingClassifier"] = ENS
learner_dict["RandomForestClassifier"] = ENS
learner_dict["QuadraticDiscriminantAnalysis"] = DA
learner_dict["LinearDiscriminantAnalysis"] = DA
learner_dict["LogisticRegression"] = LM
learner_dict["PassiveAggressiveClassifier"] = LM
learner_dict["RidgeClassifier"] = LM
learner_dict["RidgeClassifierCV"] = LM
learner_dict["SGDClassifier"] = LM
learner_dict["KNeighborsClassifier"] = NN
learner_dict["RadiusNeighborsClassifier"] = NN
learner_dict["NearestCentroid"] = NN
learner_dict["SVC"] = SVM
learner_dict["LinearSVC"] = SVM
learner_dict["NuSVC"] = SVM
learner_dict["MLPClassifier"] = ANN
learner_dict["GaussianProcessClassifier"] = GP
learner_dict["DecisionTreeClassifier"] = TREE
learner_dict["GaussianNB"] = NB
learner_dict["MultinomialNB"] = NB
learner_dict["ComplementNB"] = NB
learner_dict["BernoulliNB"] = NB
learner_dict["SVR"] = SVM
learner_dict["Ridge"] = LM
learner_dict["RidgeCV"] = LM
learner_dict["Lasso"] = LM
learner_dict["ElasticNet"] = LM
learner_dict["Lars"] = LM
learner_dict["LassoLars"] = LM
learner_dict["OrthogonalMatchingPursuit"] = LM
learner_dict["BayesianRidge"] = LM
learner_dict["ARDRegression"] = LM
learner_dict["SGDRegressor"] = LM
learner_dict["PassiveAggressiveRegressor"] = LM
learner_dict["KernelRidge"] = KR
learner_dict["KNeighborsRegressor"] = NN
learner_dict["RadiusNeighborsRegressor"] = NN
learner_dict["GaussianProcessRegressor"] = GP
learner_dict["DecisionTreeRegressor"] = TREE
learner_dict["RandomForestRegressor"] = ENS
learner_dict["ExtraTreesRegressor"] = ENS
learner_dict["AdaBoostRegressor"] = ENS
learner_dict["GradientBoostingRegressor"] = ENS
learner_dict["IsotonicRegression"] = ISO
learner_dict["MLPRegressor"] = ANN
end
"""
SKLearner(learner::String, args::Dict=Dict())
A Scikitlearn wrapper to load the different machine learning models.
Invoking `sklearners()` will list the available learners. Please
consult Scikitlearn documentation for arguments to pass.
Implements `fit!` and `transform!`.
"""
mutable struct SKLearner <: Learner
name::String
model::Dict{Symbol,Any}
function SKLearner(args=Dict{Symbol,Any}())
default_args=Dict{Symbol,Any}(
:name => "sklearner",
:output => :class,
:learner => "LinearSVC",
:impl_args => Dict{Symbol,Any}()
)
cargs = nested_dict_merge(default_args, args)
cargs[:name] = cargs[:name]*"_"*randstring(3)
skl = cargs[:learner]
if !(skl in keys(learner_dict))
println("$skl is not supported.")
println()
sklearners()
error("Argument keyword error")
end
new(cargs[:name],cargs)
end
end
function SKLearner(learner::String, args::Dict)
SKLearner(Dict(:learner => learner,:name=>learner, args...))
end
function SKLearner(learner::String; args...)
SKLearner(Dict(:learner => learner,:name=>learner,:impl_args=>Dict(pairs(args))))
end
function (skl::SKLearner)(;objargs...)
skl.model[:impl_args] = Dict(pairs(objargs))
skname = skl.model[:learner]
skobj = getproperty(learner_dict[skname],skname)
newskobj = skobj(;objargs...)
skl.model[:sklearner] = newskobj
return skl
end
"""
function sklearners()
List the available scikitlearn machine learners.
"""
function sklearners()
learners = keys(learner_dict) |> collect |> x-> sort(x,lt=(x,y)->lowercase(x)<lowercase(y))
println("syntax: SKLearner(name::String, args::Dict=Dict())")
println("where 'name' can be one of:")
println()
[print(learner," ") for learner in learners]
println()
println()
println("and 'args' are the corresponding learner's initial parameters.")
println("Note: Consult Scikitlearn's online help for more details about the learner's arguments.")
end
function fit!(skl::SKLearner, xx::DataFrame, yy::Vector)::Nothing
# normalize inputs
x = xx |> Array
y = yy
skl.model[:predtype] = :numeric
if !(eltype(yy) <: Real)
y = yy |> Vector{String}
skl.model[:predtype] = :alpha
end
impl_args = copy(skl.model[:impl_args])
learner = skl.model[:learner]
py_learner = getproperty(learner_dict[learner],learner)
# Assign CombineML-specific defaults if required
if learner == "RadiusNeighborsClassifier"
if get(impl_args, :outlier_label, nothing) == nothing
impl_options[:outlier_label] = labels[rand(1:size(labels, 1))]
end
end
# Train
modelobj = py_learner(;impl_args...)
modelobj.fit(x,y)
skl.model[:sklearner] = modelobj
skl.model[:impl_args] = impl_args
return nothing
end
function fit(skl::SKLearner, xx::DataFrame, y::Vector)::SKLearner
fit!(skl,xx,y)
return deepcopy(skl)
end
function transform!(skl::SKLearner, xx::DataFrame)::Vector
x = deepcopy(xx) |> Array
sklearner = skl.model[:sklearner]
res = sklearner.predict(x)
if skl.model[:predtype] == :numeric
predn = PYC.pyconvert(Vector{Float64},res)
return predn
else
predc = PYC.pyconvert(Vector{String},res)
return predc
end
end
transform(skl::SKLearner, xx::DataFrame)::Vector = transform!(skl,xx)
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 7501 | module SKPreprocessors
import PythonCall
const PYC=PythonCall
# standard included modules
using DataFrames
using Random
using ..AbsTypes
using ..Utils
import ..AbsTypes: fit, fit!, transform, transform!
export fit, fit!, transform, transform!
export SKPreprocessor, skpreprocessors
const preprocessor_dict = Dict{String,PYC.Py}()
const DEC = PYC.pynew()
const FS = PYC.pynew()
const IMP = PYC.pynew()
const PREP = PYC.pynew()
function __init__()
PYC.pycopy!(DEC , PYC.pyimport("sklearn.decomposition"))
PYC.pycopy!(FS , PYC.pyimport("sklearn.feature_selection",))
PYC.pycopy!(IMP , PYC.pyimport("sklearn.impute"))
PYC.pycopy!(PREP, PYC.pyimport("sklearn.preprocessing"))
# Available scikit-learn learners.
preprocessor_dict["DictionaryLearning"] = DEC
preprocessor_dict["FactorAnalysis"] = DEC
preprocessor_dict["FastICA"] = DEC
preprocessor_dict["IncrementalPCA"] = DEC
preprocessor_dict["KernelPCA"] = DEC
preprocessor_dict["LatentDirichletAllocation"] = DEC
preprocessor_dict["MiniBatchDictionaryLearning"] = DEC
preprocessor_dict["MiniBatchSparsePCA"] = DEC
preprocessor_dict["NMF"] = DEC
preprocessor_dict["PCA"] = DEC
preprocessor_dict["SparsePCA"] = DEC
preprocessor_dict["SparseCoder"] = DEC
preprocessor_dict["TruncatedSVD"] = DEC
preprocessor_dict["dict_learning"] = DEC
preprocessor_dict["dict_learning_online"] = DEC
preprocessor_dict["fastica"] = DEC
preprocessor_dict["non_negative_factorization"] = DEC
preprocessor_dict["sparse_encode"] = DEC
preprocessor_dict["GenericUnivariateSelect"] = FS
preprocessor_dict["SelectPercentile"] = FS
preprocessor_dict["SelectKBest"] = FS
preprocessor_dict["SelectFpr"] = FS
preprocessor_dict["SelectFdr"] = FS
preprocessor_dict["SelectFromModel"] = FS
preprocessor_dict["SelectFwe"] = FS
preprocessor_dict["RFE"] = FS
preprocessor_dict["RFECV"] = FS
preprocessor_dict["VarianceThreshold"] = FS
preprocessor_dict["chi2"] = FS
preprocessor_dict["f_classif"] = FS
preprocessor_dict["f_regression"] = FS
preprocessor_dict["mutual_info_classif"] = FS
preprocessor_dict["mutual_info_regression"] = FS
preprocessor_dict["SimpleImputer"] = IMP
preprocessor_dict["MissingIndicator"] = IMP
preprocessor_dict["Binarizer"] = PREP
preprocessor_dict["FunctionTransformer"] = PREP
preprocessor_dict["KBinsDiscretizer"] = PREP
preprocessor_dict["KernelCenterer"] = PREP
preprocessor_dict["LabelBinarizer"] = PREP
preprocessor_dict["LabelEncoder"] = PREP
preprocessor_dict["MultiLabelBinarizer"] = PREP
preprocessor_dict["MaxAbsScaler"] = PREP
preprocessor_dict["MinMaxScaler"] = PREP
preprocessor_dict["Normalizer"] = PREP
preprocessor_dict["OneHotEncoder"] = PREP
preprocessor_dict["OrdinalEncoder"] = PREP
preprocessor_dict["PolynomialFeatures"] = PREP
preprocessor_dict["PowerTransformer"] = PREP
preprocessor_dict["QuantileTransformer"] = PREP
preprocessor_dict["RobustScaler"] = PREP
preprocessor_dict["StandardScaler"] = PREP
#"IterativeImputer" => IMP.IterativeImputer,
#"KNNImputer" => IMP.KNNImputer,
#"add_dummy_feature" => PREP.add_dummy_feature,
#"binarize" => PREP.binarize,
#"label_binarize" => PREP.label_binarize,
#"maxabs_scale" => PREP.maxabs_scale,
#"minmax_scale" => PREP.minmax_scale,
#"normalize" => PREP.normalize,
#"quantile_transform" => PREP.quantile_transform,
#"robust_scale" => PREP.robust_scale,
#"scale" => PREP.scale,
#"power_transform" => PREP.power_transform
end
"""
SKPreprocessor(preprocessor::String,args::Dict=Dict())
A wrapper for Scikitlearn preprocessor functions.
Invoking `skpreprocessors()` will list the acceptable
and supported functions. Please check Scikitlearn
documentation for arguments to pass.
Implements `fit!` and `transform!`.
"""
mutable struct SKPreprocessor <: Transformer
name::String
model::Dict{Symbol,Any}
function SKPreprocessor(args=Dict())
default_args=Dict(
:name => "skprep",
:preprocessor => "PCA",
:autocomponent=>false,
:impl_args => Dict()
)
cargs = nested_dict_merge(default_args, args)
cargs[:name] = cargs[:name]*"_"*randstring(3)
prep = cargs[:preprocessor]
if !(prep in keys(preprocessor_dict))
println("$prep is not supported.")
println()
skpreprocessors()
error("Argument keyword error")
end
new(cargs[:name],cargs)
end
end
function SKPreprocessor(prep::String,args::Dict)
SKPreprocessor(Dict(:preprocessor => prep,:name=>prep,args...))
end
function SKPreprocessor(prep::String; args...)
SKPreprocessor(Dict(:preprocessor => prep,:name=>prep,:impl_args=>Dict(pairs(args))))
end
function (skp::SKPreprocessor)(;objargs...)
skp.model[:impl_args] = Dict(pairs(objargs))
prepname = skp.model[:preprocessor]
skobj = getproperty(preprocessor_dict[prepname],prepname)
newskobj = skobj(;objargs...)
skp.model[:skpreprocessor] = newskobj
return skp
end
function skpreprocessors()
processors = keys(preprocessor_dict) |> collect |> x-> sort(x,lt=(x,y)->lowercase(x)<lowercase(y))
println("syntax: SKPreprocessor(name::String, args::Dict=Dict())")
println("where *name* can be one of:")
println()
[print(processor," ") for processor in processors]
println()
println()
println("and *args* are the corresponding preprocessor's initial parameters.")
println("Note: Please consult Scikitlearn's online help for more details about the preprocessor's arguments.")
end
function fit!(skp::SKPreprocessor, x::DataFrame, yc::Vector=[])::Nothing
features = x |> Array
y = yc
#if !(eltype(yc) <: Real)
# y = yc |> Vector{String}
#end
impl_args = copy(skp.model[:impl_args])
autocomp = skp.model[:autocomponent]
if autocomp == true
cols = ncol(x)
ncomponents = 1
if cols > 0
ncomponents = round(sqrt(cols),digits=0) |> Integer
push!(impl_args,:n_components => ncomponents)
end
end
preprocessor = skp.model[:preprocessor]
py_preprocessor = getproperty(preprocessor_dict[preprocessor],preprocessor)
# Train model
preproc = py_preprocessor(;impl_args...)
preproc.fit(features)
skp.model[:skpreprocessor] = preproc
skp.model[:impl_args] = impl_args
return nothing
end
function fit(skp::SKPreprocessor, x::DataFrame, y::Vector=[])::SKPreprocessor
fit!(skp,x,y)
return deepcopy(skp)
end
function transform!(skp::SKPreprocessor, x::DataFrame)::DataFrame
features = deepcopy(x) |> Array
model=skp.model[:skpreprocessor]
res = (model.transform(features))
PYC.pyconvert(Matrix,res) |> x->DataFrame(x,:auto)
end
transform(skp::SKPreprocessor, x::DataFrame)::DataFrame = transform!(skp,x)
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 291 | module TestAutoMLPipeline
using Test
# @info "suppressing Python warnings"
import PythonCall
const PYC=PythonCall
warnings = PYC.pyimport("warnings")
warnings.filterwarnings("ignore")
include("test_skpreprocessing.jl")
include("test_sklearner.jl")
include("test_skcrossvalidator.jl")
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 1353 | module TestBaseFilter
using Random
using Test
using AutoMLPipeline
using AutoMLPipeline.BaseFilters
using DataFrames: nrow
function test_basefilter()
data = getiris()
ohe = OneHotEncoder()
mptr = Imputer()
@test fit_transform!(ohe,data) |> Matrix |> sum |> round == 2229.0
@test fit_transform(ohe,data) |> Matrix |> sum |> round == 2229.0
@test fit_transform!(mptr,data) |> Matrix |> x->x[:,1:4] |> sum |> round == 2079.0
@test fit_transform(mptr,data) |> Matrix |> x->x[:,1:4] |> sum |> round == 2079.0
Random.seed!(1)
data.mss=rand([missing,(1:100)...],nrow(data))
@test fit_transform!(mptr,data) |> Matrix |> x->x[:,[(1:4)...,6]] |> sum |> round == 9054.0
@test fit_transform(mptr,data) |> Matrix |> x->x[:,[(1:4)...,6]] |> sum |> round == 9054.0
wrp = Wrapper(Dict(:transformer => OneHotEncoder()))
@test fit_transform!(wrp,data) |> Matrix |> sum |> round == 9204.0
@test fit_transform(wrp,data) |> Matrix |> sum |> round == 9204.0
Random.seed!(1)
data.mss=rand([missing,(1:100)...],nrow(data))
wrp = Wrapper(Dict(:transformer => Imputer()))
@test fit_transform!(wrp,data) |> Matrix |> x->x[:,[(1:4)...,6]] |> sum |> round == 9054.0
@test fit_transform(wrp,data) |> Matrix |> x->x[:,[(1:4)...,6]] |> sum |> round == 9054.0
end
@testset "BaseFilter" begin
Random.seed!(123)
test_basefilter()
end
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 3858 | module TestSKCrossValidator
using Test
using Random
using AutoMLPipeline
function crossval_class(ppl,X,Y,nfolds,verbose)
@test crossvalidate(ppl,X,Y,"accuracy_score",nfolds,verbose).mean > 0.80
@test crossvalidate(ppl,X,Y,"balanced_accuracy_score",nfolds,verbose).mean > 0.80
@test crossvalidate(ppl,X,Y,"cohen_kappa_score",nfolds,verbose).mean > 0.80
@test crossvalidate(ppl,X,Y,"matthews_corrcoef",nfolds,verbose).mean > 0.80
@test crossvalidate(ppl,X,Y,"hamming_loss",nfolds,verbose).mean < 0.1
@test crossvalidate(ppl,X,Y,"zero_one_loss",nfolds,verbose).mean < 0.1
@test crossvalidate(ppl,X,Y,"jaccard_score","weighted";nfolds,verbose).mean > 0.80
@test crossvalidate(ppl,X,Y,"f1_score","weighted";nfolds,verbose).mean > 0.80
@test crossvalidate(ppl,X,Y,"precision_score","weighted";nfolds,verbose).mean > 0.80
@test crossvalidate(ppl,X,Y,"recall_score","weighted";nfolds,verbose).mean > 0.80
end
function crossval_reg(ppl,X,Y,folds,verbose)
@test crossvalidate(ppl,X,Y,"mean_squared_error",folds,verbose).mean < 0.5
@test crossvalidate(ppl,X,Y,"mean_squared_log_error",folds,verbose).mean < 0.5
@test crossvalidate(ppl,X,Y,"mean_absolute_error",folds,verbose).mean < 0.5
@test crossvalidate(ppl,X,Y,"median_absolute_error",folds,verbose).mean < 0.5
@test crossvalidate(ppl,X,Y,"r2_score",folds,verbose).mean > 0.50
@test crossvalidate(ppl,X,Y,"max_error",folds,verbose).mean < 0.7
@test crossvalidate(ppl,X,Y,"mean_poisson_deviance",folds,verbose).mean < 0.7
@test crossvalidate(ppl,X,Y,"mean_gamma_deviance",folds,verbose).mean < 0.7
@test crossvalidate(ppl,X,Y,"mean_tweedie_deviance",folds,verbose).mean < 0.7
@test crossvalidate(ppl,X,Y,"explained_variance_score",folds,verbose).mean > 0.50
end
function test_skcross_reg()
data=getiris()
X=data[:,1:3]
Y=data[:,4]
ppl1 = Pipeline(Dict(:machines=>[RandomForest()]))
crossval_reg(ppl1,X,Y,10,false)
ppl2 = Pipeline(Dict(:machines=>[VoteEnsemble()]))
crossval_reg(ppl2,X,Y,10,false)
cat = CatFeatureSelector()
num = NumFeatureSelector()
pca = SKPreprocessor("PCA")
ptf = SKPreprocessor("PowerTransformer")
rbc = SKPreprocessor("RobustScaler")
ppl3=@pipeline ((cat + num) + (num |> ptf) + (num |> rbc) + (num |> pca)) |> VoteEnsemble()
crossval_reg(ppl3,X,Y,10,false)
end
@testset "CrossValidator Regression" begin
Random.seed!(123)
test_skcross_reg()
end
function test_skcross_class()
data=getiris()
X=data[:,1:4]
Y=data[:,5] |> Vector{String}
ppl1 = Pipeline(Dict(:machines=>[RandomForest()]))
crossval_class(ppl1,X,Y,10,false)
ppl2 = Pipeline(Dict(:machines=>[VoteEnsemble()]))
crossval_class(ppl2,X,Y,10,false)
cat = CatFeatureSelector()
num = NumFeatureSelector()
pca = SKPreprocessor("PCA")
ptf = SKPreprocessor("PowerTransformer")
rbc = SKPreprocessor("RobustScaler")
ppl3=@pipeline ((cat + num) + (num |> ptf) + (num |> rbc) + (num |> pca)) |> VoteEnsemble()
crossval_class(ppl3,X,Y,10,false)
end
@testset "CrossValidator Classification" begin
Random.seed!(123)
test_skcross_class()
end
function test_crossval_options()
data=getiris()
X=data[:,1:4]
Y=data[:,5] |> Vector{String}
acc(x,y)=score(:accuracy,x,y)
ppl1 = Pipeline(RandomForest())
@test crossvalidate(ppl1,X,Y,"accuracy_score",10,false).mean > 0.90
@test crossvalidate(ppl1,X,Y,"accuracy_score").mean > 0.90
@test crossvalidate(ppl1,X,Y,"accuracy_score",10).mean > 0.90
@test crossvalidate(ppl1,X,Y,"accuracy_score",false).mean > 0.90
@test crossvalidate(ppl1,X,Y,"accuracy_score",verbose=false).mean > 0.90
@test crossvalidate(ppl1,X,Y,metric=acc,verbose=false).mean > 0.90
@test crossvalidate(ppl1,X,Y,metric=acc,nfolds=5).mean > 0.90
@test crossvalidate(ppl1,X,Y,acc,5,true).mean > 0.90
end
@testset "CrossValidator Argument Options" begin
Random.seed!(123)
test_crossval_options()
end
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 3151 | module TestSKL
using Random
using Test
using AutoMLPipeline
using Statistics
using DataFrames: DataFrame
const IRIS = getiris()
const X = IRIS[:,1:3] |> DataFrame
const XC = IRIS[:,1:4] |> DataFrame
const YC = IRIS[:,5] |> Vector{String}
const Y = IRIS[:,4] |> Vector{Float64}
const classifiers = [
"LinearSVC","QuadraticDiscriminantAnalysis","MLPClassifier","BernoulliNB",
"RandomForestClassifier","LinearDiscriminantAnalysis",
"NearestCentroid","SVC","LinearSVC","NuSVC","MLPClassifier",
"RidgeClassifierCV","SGDClassifier","KNeighborsClassifier",
"GaussianProcessClassifier","DecisionTreeClassifier",
"PassiveAggressiveClassifier","RidgeClassifier",
"ExtraTreesClassifier","GradientBoostingClassifier",
"BaggingClassifier","AdaBoostClassifier","GaussianNB","MultinomialNB",
"ComplementNB","BernoulliNB"
]
const regressors = [
"SVR",
"Ridge",
"RidgeCV",
"Lasso",
"ElasticNet",
"Lars",
"LassoLars",
"OrthogonalMatchingPursuit",
"BayesianRidge",
"ARDRegression",
"SGDRegressor",
"PassiveAggressiveRegressor",
"KernelRidge",
"KNeighborsRegressor",
"RadiusNeighborsRegressor",
"GaussianProcessRegressor",
"DecisionTreeRegressor",
"RandomForestRegressor",
"ExtraTreesRegressor",
"GradientBoostingRegressor",
"MLPRegressor",
"AdaBoostRegressor"
]
function fit_test(learner::String,in::DataFrame,out::Vector)
_learner=SKLearner(Dict(:learner=>learner))
fit!(_learner,in,out)
lr = fit(_learner,in,out)
@test _learner.model != Dict()
@test lr.model != Dict()
return _learner
end
function fit_transform_reg(model::Learner,in::DataFrame,out::Vector)
@test sum((transform!(model,in) .- out).^2)/length(out) < 2.0
@test sum((transform(model,in) .- out).^2)/length(out) < 2.0
end
@testset "scikit classifiers" begin
Random.seed!(123)
for cl in classifiers
fit_test(cl,XC,YC)
end
end
@testset "scikit regressors" begin
Random.seed!(123)
for rg in regressors
model=fit_test(rg,X,Y)
fit_transform_reg(model,X,Y)
end
end
function pipeline_test()
pca = SKPreprocessor("PCA")
catf = CatFeatureSelector()
numf = NumFeatureSelector()
rb = SKPreprocessor("RobustScaler")
ohe=OneHotEncoder()
regressor = SKLearner("RandomForestRegressor")
classifier = SKLearner("RandomForestClassifier",n_estimators=100)
classifier1 = SKLearner("RandomForestClassifier")(n_estimators=200)
plr = @pipeline (catf |> ohe) + (numf |> rb |> pca) |> regressor
plr1 = (catf |> ohe) + (numf |> rb |> pca) |> regressor
plc = @pipeline (catf >> ohe) + (numf >> rb >> pca) |> classifier
plc1 = (catf >> ohe) + (numf >> rb >> pca) |> classifier
plc2 = (catf >> ohe) + (numf >> rb >> pca) |> classifier1
@test crossvalidate(plr,X,Y,"mean_absolute_error",3,false).mean < 0.3
@test crossvalidate(plr1,X,Y,"mean_absolute_error",3,false).mean < 0.3
@test crossvalidate(plc,XC,YC,"accuracy_score",3,false).mean > 0.8
@test crossvalidate(plc1,XC,YC,"accuracy_score",3,false).mean > 0.8
end
@testset "scikit pipeline" begin
pipeline_test()
end
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | code | 3582 | module TestSKPreprocessing
using Random
using Test
using AutoMLPipeline
using Statistics
using DataFrames: DataFrame, nrow
Random.seed!(1)
const IRIS = getiris()
extra = rand(150,3) |> x->DataFrame(x,:auto)
const X = hcat(IRIS[:,1:4],extra)
const Y = IRIS[:,5] |> Vector{String}
# "KernelCenterer","MissingIndicator","KBinsDiscretizer","OneHotEncoder",
const preprocessors = [
"DictionaryLearning", "FactorAnalysis", "FastICA", "IncrementalPCA",
"KernelPCA", "LatentDirichletAllocation", "MiniBatchDictionaryLearning",
"MiniBatchSparsePCA", "NMF", "PCA",
"TruncatedSVD",
"VarianceThreshold",
"SimpleImputer",
"Binarizer", "FunctionTransformer",
"MaxAbsScaler", "MinMaxScaler", "Normalizer",
"OrdinalEncoder", "PolynomialFeatures", "PowerTransformer",
"QuantileTransformer", "RobustScaler", "StandardScaler",
#"MultiLabelBinarizer",
]
function fit_test(preproc::String,in::DataFrame,out::Vector)
_preproc=SKPreprocessor(Dict(:preprocessor=>preproc))
fit!(_preproc,in,out)
prep = fit(_preproc,in,out)
@test _preproc.model != Dict()
@test prep.model != Dict()
return _preproc
end
function transform_test(preproc::String,in::DataFrame,out::Vector)
_preproc=SKPreprocessor(Dict(:preprocessor=>preproc))
res = fit_transform!(_preproc,in)
res1 = fit_transform(_preproc,in)
@test size(res)[1] == size(out)[1]
@test size(res1)[1] == size(out)[1]
end
@testset "scikit preprocessors fit test" begin
Random.seed!(123)
for cl in preprocessors
fit_test(cl,X,Y)
end
end
@testset "scikit preprocessors transform test" begin
Random.seed!(123)
for cl in preprocessors
transform_test(cl,X,Y)
end
end
function skptest()
features = X
labels = Y
pca = SKPreprocessor(Dict(:preprocessor=>"PCA",:impl_args=>Dict(:n_components=>3)))
@test fit_transform!(pca,features) |> x->size(x,2) == 3
pca(;n_components = 5)
@test fit_transform!(pca,features) |> x->size(x,2) == 5
pca = SKPreprocessor("PCA",Dict(:autocomponent=>true))
@test fit_transform!(pca,features) |> x->size(x,2) == 3
pca = SKPreprocessor("PCA",Dict(:impl_args=> Dict(:n_components=>3)))
@test fit_transform!(pca,features) |> x->size(x,2) == 3
svd = SKPreprocessor(Dict(:preprocessor=>"TruncatedSVD",:impl_args=>Dict(:n_components=>2)))
@test fit_transform!(svd,features) |> x->size(x,2) == 2
ica = SKPreprocessor(Dict(:preprocessor=>"FastICA",:impl_args=>Dict(:n_components=>2)))
@test fit_transform!(ica,features) |> x->size(x,2) == 2
stdsc = SKPreprocessor("StandardScaler")
@test abs(mean(fit_transform!(stdsc,features) |> Matrix)) < 0.00001
minmax = SKPreprocessor("MinMaxScaler")
@test mean(fit_transform!(minmax,features) |> Matrix) > 0.30
vote = VoteEnsemble()
stack = StackEnsemble()
best = BestLearner()
cat = CatFeatureSelector()
num = NumFeatureSelector()
disc = CatNumDiscriminator()
ohe = OneHotEncoder()
mpipeline = Pipeline(Dict(
:machines => [stdsc,pca,best]
))
pred = fit_transform!(mpipeline,features,labels)
@test score(:accuracy,pred,labels) > 50.0
fpipe = @pipeline ((cat + num) + (num + pca)) |> stack
@test ((fit_transform!(fpipe,features,labels) .== labels) |> sum ) / nrow(features) > 0.50
fpipe1 = ((cat + num) + (num + pca)) >> stack
@test ((fit_transform!(fpipe1,features,labels) .== labels) |> sum ) / nrow(features) > 0.50
end
@testset "scikit preprocessor fit/transform test with real data" begin
Random.seed!(123)
skptest()
end
end
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 1099 | Copyright for portions of project AutoMLPipeline.jl are held by Samuel Jenkins, 2014 as part of project Orchestra.jl.
All other copyright for project AutoMLPipeline.jl are held by IBM.
This is an open source project sponsored and copyrighted by IBM.
Everyone is welcome to contribute and help.
Please submit a PR (pull request) to contribute code, documentation, tests, and features.
Follow the guidelines in [MAINTAINERS](MAINTAINERS.md) when you submit a PR.
We use GitHub issue tracker for any issue/bug reporting and discussion.
## Contributors
- [Paulito Palmes, IBM Research](https://github.com/ppalmes): data structures/types, algorithms, pipeline, and general workflow
## Collaborators
- [Akihiro Kishimoto, IBM Research](https://researcher.watson.ibm.com/researcher/view.php?person=ie-AKIHIROK)
- [Radu Marinescu, IBM Research](https://researcher.watson.ibm.com/researcher/view.php?person=ie-radu.marinescu)
- [Parikshit Ram, IBM Research](https://rithram.github.io/)
- [Elizabeth Daly, IBM Research](https://researcher.watson.ibm.com/researcher/view.php?person=ie-elizabeth.daly)
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 688 | # Main author
- [Paulito P. Palmes, IBM Research](https://github.com/ppalmes)
# Contributors
This is an open-source project sponsored by IBM. We highly encourage contribution by making a PR.
# Pull Requests
We recommend contributors to submit a PR directly in Github to have public discussion of issues
for transparency. When providing feedback, please practice a good sense of civility, avoid personal
attacks and abusive comments.
# Merge Approval
Make sure the PR has good documentation of the why/what issues are being addressed or features added.
# Vulnerability Report
For security and vulnerability issues, please send email to [Paulito P. Palmes](https://github.com/ppalmes).
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 25828 | ## AutoMLPipeline
<div align="center">

[](https://bestpractices.coreinfrastructure.org/projects/7093)

------------------
| **Documentation** | **Build Status** | **Help** |
|:---:|:---:|:---:|
| [![][docs-dev-img]][docs-dev-url] [![][docs-stable-img]][docs-stable-url] | [![][gha-img]][gha-url] [![][codecov-img]][codecov-url] | [![][slack-img]][slack-url] [![][gitter-img]][gitter-url] |
</div>
### Star History
[](https://star-history.com/#IBM/AutoMLPipeline.jl&Date)
------------------
**AutoMLPipeline** (AMLP) is a package
that makes it trivial to create
complex ML pipeline structures
using simple expressions. It leverages on
the built-in macro programming features of
Julia to symbolically process, manipulate
pipeline expressions, and makes it easy to
discover optimal structures for machine
learning regression and classification.
To illustrate, here is a pipeline expression
and evaluation of a typical machine learning
workflow that extracts numerical features (`numf`)
for `ica` (Independent Component Analysis)
and `pca` (Principal Component Analysis)
transformations, respectively, concatenated with
the hot-bit encoding (`ohe`) of categorical
features (`catf`) of a given data for `rf` (Random Forest) modeling:
```julia
model = (catf |> ohe) + (numf |> pca) + (numf |> ica) |> rf
fit!(model,Xtrain,Ytrain)
prediction = transform!(model,Xtest)
score(:accuracy,prediction,Ytest)
crossvalidate(model,X,Y,"balanced_accuracy_score")
```
Just take note that `+` has higher priority than `|>` so if you
are not sure, enclose the operations inside parentheses.
```julia
### these two expressions are the same
a |> b + c; a |> (b + c)
### these two expressions are the same
a + b |> c; (a + b) |> c
```
#### Please read this [AutoMLPipeline Paper](https://doi.org/10.21105/jcon.00129) for benchmark comparisons.
- [JuliaCon Proceedings](https://doi.org/10.21105/jcon.00129): [](https://doi.org/10.21105/jcon.00129)
### Recorded Video/Conference Presentations:
- [2024 JuliaCon (**End-to-End AI with Julia, K0s, and Argo Workflow**)](https://www.youtube.com/live/ZKt0tiG5ajw?t=24008s)
- [2023 JuliaCon (**Wrapping Up Offline RL as Part of AutoMLPipeline Workflow**)](https://www.youtube.com/watch?v=7a2MqqvyoEc)
- [2022 JuliaCon (**Distributed AutoML Pipeline Search in PC/RasPi K8s Cluster**)](https://www.youtube.com/watch?v=gpmmHt6d0pw)
- [2021 JuliaCon (**Finding an Effective Strategy for AutoML Pipeline Optimization**)](https://www.youtube.com/watch?v=ZRFIMGW88Co)
- [2021 PyData Ireland Meetup (**Symbolic ML Pipeline Expression and Benchmarking**)](https://www.youtube.com/watch?v=EQm5fj-4Hrw)
- [2020 JuliaCon (**AutoMLPipeline: A ToolBox for Building ML Pipelines**)](https://www.youtube.com/watch?v=6-hJnMO0oDs)
### Related Video/Conference Presentations:
- [2021 JuliaCon (**Lale in Julia: A package for semi-automated data science**)](https://www.youtube.com/watch?v=4ayA_EWWlqk)
- [2019 JuliaCon (**TSML: Time Series Machine Learning Pipeline**)](https://www.youtube.com/watch?v=RRY0OXc52Ns)
- [2021 OpenSource Guild in IBM (**Overview of HPC and Data Science in Julia Programming with AutoML**)](https://www.youtube.com/watch?v=zkks1_SrUx0)
More examples can be found in the
[examples](https://github.com/IBM/AutoMLPipeline.jl/tree/master/examples)
folder including optimizing pipelines by multi-threading or distributed computing.
### Motivations
The typical workflow in machine learning
classification or prediction requires
some or combination of the following
preprocessing steps together with modeling:
- feature extraction (e.g. ica, pca, svd)
- feature transformation (e.g. normalization, scaling, ohe)
- feature selection (anova, correlation)
- modeling (rf, adaboost, xgboost, lm, svm, mlp)
Each step has several choices of functions
to use together with their corresponding
parameters. Optimizing the performance of the
entire pipeline is a combinatorial search
of the proper order and combination of preprocessing
steps, optimization of their corresponding
parameters, together with searching for
the optimal model and its hyper-parameters.
Because of close dependencies among various
steps, we can consider the entire process
to be a pipeline optimization problem (POP).
POP requires simultaneous optimization of pipeline
structure and parameter adaptation of its elements.
As a consequence, having an elegant way to
express pipeline structure can help lessen
the complexity in the management and analysis
of the wide-array of choices of optimization routines.
The target of future work will be the
implementations of different pipeline
optimization algorithms ranging from
evolutionary approaches, integer
programming (discrete choices of POP elements),
tree/graph search, and hyper-parameter search.
### Package Features
- Symbolic pipeline API for easy expression and high-level description of complex pipeline structures and processing workflow
- Common API wrappers for ML libs including Scikitlearn, DecisionTree, etc
- Easily extensible architecture by overloading just two main interfaces: fit! and transform!
- Meta-ensembles that allow composition of ensembles of ensembles (recursively if needed) for robust prediction routines
- Categorical and numerical feature selectors for specialized preprocessing routines based on types
### Installation
AutoMLPipeline is in the Julia Official package registry.
The latest release can be installed at the Julia
prompt using Julia's package management which is triggered
by pressing `]` at the julia prompt:
```julia
julia> ]
pkg> update
pkg> add AutoMLPipeline
```
### Sample Usage
Below outlines some typical way to preprocess and model any dataset.
##### 1. Load Data, Extract Input (X) and Target (Y)
```julia
# Make sure that the input feature is a dataframe and the target output is a 1-D vector.
using AutoMLPipeline
profbdata = getprofb()
X = profbdata[:,2:end]
Y = profbdata[:,1] |> Vector;
head(x)=first(x,5)
head(profbdata)
```
```julia
5×7 DataFrame. Omitted printing of 1 columns
│ Row │ Home.Away │ Favorite_Points │ Underdog_Points │ Pointspread │ Favorite_Name │ Underdog_name │
│ │ String │ Int64 │ Int64 │ Float64 │ String │ String │
├─────┼───────────┼─────────────────┼─────────────────┼─────────────┼───────────────┼───────────────┤
│ 1 │ away │ 27 │ 24 │ 4.0 │ BUF │ MIA │
│ 2 │ at_home │ 17 │ 14 │ 3.0 │ CHI │ CIN │
│ 3 │ away │ 51 │ 0 │ 2.5 │ CLE │ PIT │
│ 4 │ at_home │ 28 │ 0 │ 5.5 │ NO │ DAL │
│ 5 │ at_home │ 38 │ 7 │ 5.5 │ MIN │ HOU │
```
#### 2. Load Filters, Transformers, and Learners
```julia
using AutoMLPipeline
#### Decomposition
pca = skoperator("PCA")
fa = skoperator("FactorAnalysis")
ica = skoperator("FastICA")
#### Scaler
rb = skoperator("RobustScaler")
pt = skoperator("PowerTransformer")
norm = skoperator("Normalizer")
mx = skoperator("MinMaxScaler")
std = skoperator("StandardScaler")
#### categorical preprocessing
ohe = OneHotEncoder()
#### Column selector
catf = CatFeatureSelector()
numf = NumFeatureSelector()
disc = CatNumDiscriminator()
#### Learners
rf = skoperator("RandomForestClassifier")
gb = skoperator("GradientBoostingClassifier")
lsvc = skoperator("LinearSVC")
svc = skoperator("SVC")
mlp = skoperator("MLPClassifier")
ada = skoperator("AdaBoostClassifier")
sgd = skoperator("SGDClassifier")
skrf_reg = skoperator("RandomForestRegressor")
skgb_reg = skoperator("GradientBoostingRegressor")
jrf = RandomForest()
tree = PrunedTree()
vote = VoteEnsemble()
stack = StackEnsemble()
best = BestLearner()
```
Note: You can get a listing of available `Preprocessors` and `Learners` by invoking the function:
- `skoperator()`
#### 3. Filter categories and hot-encode them
```julia
pohe = catf |> ohe
tr = fit_transform!(pohe,X,Y)
head(tr)
```
```julia
5×56 DataFrame. Omitted printing of 47 columns
│ Row │ x1 │ x2 │ x3 │ x4 │ x5 │ x6 │ x7 │ x8 │ x9 │
│ │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │
├─────┼─────────┼─────────┼─────────┼─────────┼─────────┼─────────┼─────────┼─────────┼─────────┤
│ 1 │ 1.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │
│ 2 │ 0.0 │ 1.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │
│ 3 │ 0.0 │ 0.0 │ 1.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │
│ 4 │ 0.0 │ 0.0 │ 0.0 │ 1.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │
│ 5 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │ 1.0 │ 0.0 │ 0.0 │ 0.0 │ 0.0 │
```
#### 4. Numerical Feature Extraction Example
##### 4.1 Filter numeric features, compute ica and pca features, and combine both features
```julia
pdec = (numf |> pca) + (numf |> ica)
tr = fit_transform!(pdec,X,Y)
head(tr)
```
```julia
5×8 DataFrame
│ Row │ x1 │ x2 │ x3 │ x4 │ x1_1 │ x2_1 │ x3_1 │ x4_1 │
│ │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │
├─────┼──────────┼──────────┼──────────┼──────────┼────────────┼────────────┼────────────┼────────────┤
│ 1 │ 2.47477 │ 7.87074 │ -1.10495 │ 0.902431 │ 0.0168432 │ 0.00319873 │ -0.0467633 │ 0.026742 │
│ 2 │ -5.47113 │ -3.82946 │ -2.08342 │ 1.00524 │ -0.0327947 │ -0.0217808 │ -0.0451314 │ 0.00702006 │
│ 3 │ 30.4068 │ -10.8073 │ -6.12339 │ 0.883938 │ -0.0734292 │ 0.115776 │ -0.0425357 │ 0.0497831 │
│ 4 │ 8.18372 │ -15.507 │ -1.43203 │ 1.08255 │ -0.0656664 │ 0.0368666 │ -0.0457154 │ -0.0192752 │
│ 5 │ 16.6176 │ -6.68636 │ -1.66597 │ 0.978243 │ -0.0338749 │ 0.0643065 │ -0.0461703 │ 0.00671696 │
```
##### 4.2 Filter numeric features, transform to robust and power transform scaling, perform ica and pca, respectively, and combine both
```julia
ppt = (numf |> rb |> ica) + (numf |> pt |> pca)
tr = fit_transform!(ppt,X,Y)
head(tr)
```
```julia
5×8 DataFrame
│ Row │ x1 │ x2 │ x3 │ x4 │ x1_1 │ x2_1 │ x3_1 │ x4_1 │
│ │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │ Float64 │
├─────┼─────────────┼─────────────┼────────────┼───────────┼───────────┼──────────┼────────────┼───────────┤
│ 1 │ -0.00308891 │ -0.0269009 │ -0.0166298 │ 0.0467559 │ -0.64552 │ 1.40289 │ -0.0284468 │ 0.111773 │
│ 2 │ 0.0217799 │ -0.00699717 │ 0.0329868 │ 0.0449952 │ -0.832404 │ 0.475629 │ -1.14881 │ -0.01702 │
│ 3 │ -0.115577 │ -0.0503802 │ 0.0736173 │ 0.0420466 │ 1.54491 │ 1.65258 │ -1.35967 │ -2.57866 │
│ 4 │ -0.0370057 │ 0.0190459 │ 0.065814 │ 0.0454864 │ 1.32065 │ 0.563565 │ -2.05839 │ -0.74898 │
│ 5 │ -0.0643088 │ -0.00711682 │ 0.0340452 │ 0.0459816 │ 1.1223 │ 1.45555 │ -0.88864 │ -0.776195 │
```
#### 5. A Pipeline for the Voting Ensemble Classification
```julia
# take all categorical columns and hot-bit encode each,
# concatenate them to the numerical features,
# and feed them to the voting ensemble
using AutoMLPipeline.Utils
pvote = (catf |> ohe) + (numf) |> vote
pred = fit_transform!(pvote,X,Y)
sc=score(:accuracy,pred,Y)
println(sc)
crossvalidate(pvote,X,Y,"accuracy_score")
```
```julia
fold: 1, 0.5373134328358209
fold: 2, 0.7014925373134329
fold: 3, 0.5294117647058824
fold: 4, 0.6716417910447762
fold: 5, 0.6716417910447762
fold: 6, 0.6119402985074627
fold: 7, 0.5074626865671642
fold: 8, 0.6323529411764706
fold: 9, 0.6268656716417911
fold: 10, 0.5671641791044776
errors: 0
(mean = 0.6057287093942055, std = 0.06724940684190235, folds = 10, errors = 0)
```
Note: `crossvalidate()` supports the following sklearn's performance metric
#### classification:
- `accuracy_score`, `balanced_accuracy_score`, `cohen_kappa_score`
- `jaccard_score`, `matthews_corrcoef`, `hamming_loss`, `zero_one_loss`
- `f1_score`, `precision_score`, `recall_score`,
#### regression:
- `mean_squared_error`, `mean_squared_log_error`
- `mean_absolute_error`, `median_absolute_error`
- `r2_score`, `max_error`, `mean_poisson_deviance`
- `mean_gamma_deviance`, `mean_tweedie_deviance`,
- `explained_variance_score`
#### 6. Use `@pipelinex` instead of `@pipeline` to print the corresponding function calls in 6
```julia
julia> @pipelinex (catf |> ohe) + (numf) |> vote
:(Pipeline(ComboPipeline(Pipeline(catf, ohe), numf), vote))
# another way is to use @macroexpand with @pipeline
julia> @macroexpand @pipeline (catf |> ohe) + (numf) |> vote
:(Pipeline(ComboPipeline(Pipeline(catf, ohe), numf), vote))
```
#### 7. A Pipeline for the Random Forest (RF) Classification
```julia
# compute the pca, ica, fa of the numerical columns,
# combine them with the hot-bit encoded categorical features
# and feed all to the random forest classifier
prf = (numf |> rb |> pca) + (numf |> rb |> ica) + (numf |> rb |> fa) + (catf |> ohe) |> rf
pred = fit_transform!(prf,X,Y)
score(:accuracy,pred,Y) |> println
crossvalidate(prf,X,Y,"accuracy_score")
```
```julia
fold: 1, 0.6119402985074627
fold: 2, 0.7611940298507462
fold: 3, 0.6764705882352942
fold: 4, 0.6716417910447762
fold: 5, 0.6716417910447762
fold: 6, 0.6567164179104478
fold: 7, 0.6268656716417911
fold: 8, 0.7058823529411765
fold: 9, 0.6417910447761194
fold: 10, 0.6865671641791045
errors: 0
(mean = 0.6710711150131694, std = 0.04231869797446545, folds = 10, errors = 0)
```
#### 8. A Pipeline for the Linear Support Vector for Classification (LSVC)
```julia
plsvc = ((numf |> rb |> pca)+(numf |> rb |> fa)+(numf |> rb |> ica)+(catf |> ohe )) |> lsvc
pred = fit_transform!(plsvc,X,Y)
score(:accuracy,pred,Y) |> println
crossvalidate(plsvc,X,Y,"accuracy_score")
```
```julia
fold: 1, 0.6567164179104478
fold: 2, 0.7164179104477612
fold: 3, 0.8235294117647058
fold: 4, 0.7164179104477612
fold: 5, 0.7313432835820896
fold: 6, 0.6567164179104478
fold: 7, 0.7164179104477612
fold: 8, 0.7352941176470589
fold: 9, 0.746268656716418
fold: 10, 0.6865671641791045
errors: 0
(mean = 0.7185689201053556, std = 0.04820829087095355, folds = 10, errors = 0)
```
#### 9. A Pipeline for Random Forest Regression
```julia
iris = getiris()
Xreg = iris[:,1:3]
Yreg = iris[:,4] |> Vector
pskrfreg = (catf |> ohe) + (numf) |> skrf_reg
res=crossvalidate(pskrfreg,Xreg,Yreg,"mean_absolute_error",10)
```
```julia
fold: 1, 0.1827433333333334
fold: 2, 0.18350888888888886
fold: 3, 0.11627222222222248
fold: 4, 0.1254152380952376
fold: 5, 0.16502333333333377
fold: 6, 0.10900222222222226
fold: 7, 0.12561111111111076
fold: 8, 0.14243000000000025
fold: 9, 0.12130555555555576
fold: 10, 0.18811111111111098
errors: 0
(mean = 0.1459423015873016, std = 0.030924217263958102, folds = 10, errors = 0)
```
Note: More examples can be found in the *test* directory of the package. Since
the code is written in Julia, you are highly encouraged to read the source
code and feel free to extend or adapt the package to your problem. Please
feel free to submit PRs to improve the package features.
#### 10. Performance Comparison of Several Learners
##### 10.1 Sequential Processing
```julia
using Random
using DataFrames
Random.seed!(1)
jrf = RandomForest()
tree = PrunedTree()
disc = CatNumDiscriminator()
ada = skoperator("AdaBoostClassifier")
sgd = skoperator("SGDClassifier")
std = skoperator("StandardScaler")
lsvc = skoperator("LinearSVC")
learners = DataFrame()
for learner in [jrf,ada,sgd,tree,lsvc]
pcmc = @pipeline disc |> ((catf |> ohe) + (numf |> std)) |> learner
println(learner.name[1:end-4])
mean,sd,_ = crossvalidate(pcmc,X,Y,"accuracy_score",10)
global learners = vcat(learners,DataFrame(name=learner.name[1:end-4],mean=mean,sd=sd))
end;
@show learners;
```
```julia
learners = 5×3 DataFrame
│ Row │ name │ mean │ sd │
│ │ String │ Float64 │ Float64 │
├─────┼────────────────────────┼──────────┼───────────┤
│ 1 │ rf │ 0.653424 │ 0.0754433 │
│ 2 │ AdaBoostClassifier │ 0.69504 │ 0.0514792 │
│ 3 │ SGDClassifier │ 0.694908 │ 0.0641564 │
│ 4 │ prunetree │ 0.621927 │ 0.0578242 │
│ 5 │ LinearSVC │ 0.726097 │ 0.0498317 │
```
##### 10.2 Parallel Processing
```julia
using Random
using DataFrames
using Distributed
nprocs() == 1 && addprocs()
@everywhere using DataFrames
@everywhere using AutoMLPipeline
@everywhere profbdata = getprofb()
@everywhere X = profbdata[:,2:end]
@everywhere Y = profbdata[:,1] |> Vector;
@everywhere jrf = RandomForest()
@everywhere ohe = OneHotEncoder()
@everywhere catf = CatFeatureSelector()
@everywhere numf = NumFeatureSelector()
@everywhere tree = PrunedTree()
@everywhere disc = CatNumDiscriminator()
@everywhere ada = skoperator("AdaBoostClassifier")
@everywhere sgd = skoperator("SGDClassifier")
@everywhere std = skoperator("StandardScaler")
@everywhere lsvc = skoperator("LinearSVC")
learners = @sync @distributed (vcat) for learner in [jrf,ada,sgd,tree,lsvc]
pcmc = disc |> ((catf |> ohe) + (numf |> std)) |> learner
println(learner.name[1:end-4])
mean,sd,_ = crossvalidate(pcmc,X,Y,"accuracy_score",10)
DataFrame(name=learner.name[1:end-4],mean=mean,sd=sd)
end
@show learners;
```
```julia
From worker 3: AdaBoostClassifier
From worker 4: SGDClassifier
From worker 5: prunetree
From worker 2: rf
From worker 6: LinearSVC
From worker 4: fold: 1, 0.6716417910447762
From worker 5: fold: 1, 0.6567164179104478
From worker 6: fold: 1, 0.6865671641791045
From worker 2: fold: 1, 0.7164179104477612
From worker 4: fold: 2, 0.7164179104477612
From worker 5: fold: 2, 0.6119402985074627
From worker 6: fold: 2, 0.8059701492537313
From worker 2: fold: 2, 0.6716417910447762
From worker 4: fold: 3, 0.6764705882352942
....
learners = 5×3 DataFrame
│ Row │ name │ mean │ sd │
│ │ String │ Float64 │ Float64 │
├─────┼────────────────────────┼──────────┼───────────┤
│ 1 │ rf │ 0.647388 │ 0.0764844 │
│ 2 │ AdaBoostClassifier │ 0.712862 │ 0.0471003 │
│ 3 │ SGDClassifier │ 0.710009 │ 0.05173 │
│ 4 │ prunetree │ 0.60428 │ 0.0403121 │
│ 5 │ LinearSVC │ 0.726383 │ 0.0467506 │
```
#### 11. Automatic Selection of Best Learner
You can use `*` operation as a selector function which outputs the result of the best learner.
If we use the same pre-processing pipeline in 10, we expect that the average performance of
best learner which is `lsvc` will be around 73.0.
```julia
Random.seed!(1)
pcmc = disc |> ((catf |> ohe) + (numf |> std)) |> (jrf * ada * sgd * tree * lsvc)
crossvalidate(pcmc,X,Y,"accuracy_score",10)
```
```julia
fold: 1, 0.7164179104477612
fold: 2, 0.7910447761194029
fold: 3, 0.6911764705882353
fold: 4, 0.7761194029850746
fold: 5, 0.6567164179104478
fold: 6, 0.7014925373134329
fold: 7, 0.6417910447761194
fold: 8, 0.7058823529411765
fold: 9, 0.746268656716418
fold: 10, 0.835820895522388
errors: 0
(mean = 0.7262730465320456, std = 0.060932268798867976, folds = 10, errors = 0)
```
#### 12. Learners as Transformers
It is also possible to use learners in the middle of expression to serve
as transformers and their outputs become inputs to the final learner as illustrated
below.
```julia
expr = (
((numf |> rb)+(catf |> ohe) |> gb) +
((numf |> rb)+(catf |> ohe) |> rf)
) |> ohe |> ada;
crossvalidate(expr,X,Y,"accuracy_score")
```
```julia
fold: 1, 0.6567164179104478
fold: 2, 0.5522388059701493
fold: 3, 0.7205882352941176
fold: 4, 0.7313432835820896
fold: 5, 0.6567164179104478
fold: 6, 0.6119402985074627
fold: 7, 0.6119402985074627
fold: 8, 0.6470588235294118
fold: 9, 0.6716417910447762
fold: 10, 0.6119402985074627
errors: 0
(mean = 0.6472124670763829, std = 0.053739947087648336, folds = 10, errors = 0)
```
One can even include selector function as part of transformer preprocessing routine:
```julia
pjrf = disc |> ((catf |> ohe) + (numf |> std)) |>
((jrf * ada ) + (sgd * tree * lsvc)) |> ohe |> ada
crossvalidate(pjrf,X,Y,"accuracy_score")
```
```julia
fold: 1, 0.7164179104477612
fold: 2, 0.7164179104477612
fold: 3, 0.7941176470588235
fold: 4, 0.7761194029850746
fold: 5, 0.6268656716417911
fold: 6, 0.6716417910447762
fold: 7, 0.7611940298507462
fold: 8, 0.7352941176470589
fold: 9, 0.7761194029850746
fold: 10, 0.6865671641791045
errors: 0
(mean = 0.7260755048287972, std = 0.0532393731318768, folds = 10, errors = 0)
```
Note: The `ohe` is necessary in both examples
because the outputs of the learners and selector function are categorical
values that need to be hot-bit encoded before feeding to the final `ada` learner.
#### 13. Tree Visualization of the Pipeline Structure
You can visualize the pipeline by using AbstractTrees Julia package.
```julia
# package installation
using Pkg
Pkg.update()
Pkg.add("AbstractTrees")
# load the packages
using AbstractTrees
using AutoMLPipeline
expr = @pipelinex (catf |> ohe) + (numf |> pca) + (numf |> ica) |> rf
:(Pipeline(ComboPipeline(Pipeline(catf, ohe), Pipeline(numf, pca), Pipeline(numf, ica)), rf))
print_tree(stdout, expr)
```
```julia
:(Pipeline(ComboPipeline(Pipeline(catf, ohe), Pipeline(numf, pca), Pipeline(numf, ica)), rf))
├─ :Pipeline
├─ :(ComboPipeline(Pipeline(catf, ohe), Pipeline(numf, pca), Pipeline(numf, ica)))
│ ├─ :ComboPipeline
│ ├─ :(Pipeline(catf, ohe))
│ │ ├─ :Pipeline
│ │ ├─ :catf
│ │ └─ :ohe
│ ├─ :(Pipeline(numf, pca))
│ │ ├─ :Pipeline
│ │ ├─ :numf
│ │ └─ :pca
│ └─ :(Pipeline(numf, ica))
│ ├─ :Pipeline
│ ├─ :numf
│ └─ :ica
└─ :rf
```
### Extending AutoMLPipeline
If you want to add your own filter or transformer or learner,
take note that filters and transformers process the
input features but ignores the output argument. On the other hand,
learners process both their input and output arguments during `fit!`
while `transform!` expects one input argument in all cases.
First step is to import the abstract types and define your own mutable structure
as subtype of either Learner or Transformer. Next is to import the `fit!` and
`transform!` functions so that you can overload them. Also, you must
load the DataFrames package because it is the main format for data processing.
Finally, implement your own `fit` and `transform` and export them.
```julia
using DataFrames
using AutoMLPipeline.AbsTypes
# import functions for overloading
import AutoMLPipeline.AbsTypes: fit!, transform!
# export the new definitions for dynamic dispatch
export fit!, transform!, MyFilter
# define your filter structure
mutable struct MyFilter <: Transformer
name::String
model::Dict
args::Dict
function MyFilter(args::Dict())
....
end
end
# define your fit! function.
function fit!(fl::MyFilter, inputfeatures::DataFrame, target::Vector=Vector())
....
end
#define your transform! function
function transform!(fl::MyFilter, inputfeatures::DataFrame)::DataFrame
....
end
```
Note that the main format to exchange data is dataframe which requires `transform!`
output to return a dataframe. The features as input for fit! and transform! shall
be in dataframe format too. This is necessary so that
the pipeline passes the dataframe format consistently to
its corresponding filters/transformers/learners. Once you have
this transformer, you can use it as part of the pipeline
together with the other learners and transformers.
### Feature Requests and Contributions
We welcome contributions, feature requests, and suggestions. Here is the link to open an [issue][issues-url] for any problems you encounter. If you want to contribute, please follow the guidelines in [contributors page][contrib-url].
### Help usage
Usage questions can be posted in:
- [Julia Community](https://julialang.org/community/)
- [Gitter AutoMLPipeline Community][gitter-url]
- [Julia Discourse forum][discourse-tag-url]
[contrib-url]: https://github.com/IBM/AutoMLPipeline.jl/blob/master/CONTRIBUTORS.md
[issues-url]: https://github.com/IBM/AutoMLPipeline.jl/issues
[discourse-tag-url]: https://discourse.julialang.org/
[gitter-url]: https://gitter.im/AutoMLPipelineLearning/community
[gitter-img]: https://badges.gitter.im/ppalmes/TSML.jl.svg
[slack-img]: https://img.shields.io/badge/chat-on%20slack-yellow.svg
[slack-url]: https://julialang.slack.com/
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://ibm.github.io/AutoMLPipeline.jl/stable/
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://ibm.github.io/AutoMLPipeline.jl/dev/
[gha-img]: https://github.com/IBM/AutoMLPipeline.jl/actions/workflows/ci.yml/badge.svg
[gha-url]: https://github.com/IBM/AutoMLPipeline.jl/actions/workflows/ci.yml
[codecov-img]: https://codecov.io/gh/IBM/AutoMLPipeline.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/IBM/AutoMLPipeline.jl
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 5413 | ## AutoMLPipeline (AMLP)
is a package that makes it trivial to create
complex ML pipeline structures using simple
expressions. AMLP leverages on the built-in
macro programming features of Julia
to symbolically process, manipulate
pipeline expressions, and
automatically discover optimal structures
for machine learning prediction and classification.
To illustrate, a typical machine learning workflow that extracts
numerical features (numf) for ICA (independent component analysis) and
PCA (principal component analysis) transformations, respectively,
concatenated with the hot-bit encoding (ohe) of categorical
features (catf) of a given data for RF modeling can be expressed
in AMLP as:
```julia
julia> model = @pipeline (catf |> ohe) + (numf |> pca) + (numf |> ica) |> rf
julia> fit!(model,Xtrain,Ytrain)
julia> prediction = transform!(model,Xtest)
julia> score(:accuracy,prediction,Ytest)
julia> crossvalidate(model,X,Y,"accuracy_score")
julia> crossvalidate(model,X,Y,"balanced_accuracy_score")
```
You can visualize the pipeline by using AbstractTrees Julia package.
```julia
# package installation
julia> using Pkg
julia> Pkg.add("AbstractTrees")
julia> Pkg.add("AutoMLPipeline")
# load the packages
julia> using AbstractTrees
julia> using AutoMLPipeline
julia> expr = @pipelinex (catf |> ohe) + (numf |> pca) + (numf |> ica) |> rf
:(Pipeline(ComboPipeline(Pipeline(catf, ohe), Pipeline(numf, pca), Pipeline(numf, ica)), rf))
julia> print_tree(stdout, expr)
:(Pipeline(ComboPipeline(Pipeline(catf, ohe), Pipeline(numf, pca), Pipeline(numf, ica)), rf))
├─ :Pipeline
├─ :(ComboPipeline(Pipeline(catf, ohe), Pipeline(numf, pca), Pipeline(numf, ica)))
│ ├─ :ComboPipeline
│ ├─ :(Pipeline(catf, ohe))
│ │ ├─ :Pipeline
│ │ ├─ :catf
│ │ └─ :ohe
│ ├─ :(Pipeline(numf, pca))
│ │ ├─ :Pipeline
│ │ ├─ :numf
│ │ └─ :pca
│ └─ :(Pipeline(numf, ica))
│ ├─ :Pipeline
│ ├─ :numf
│ └─ :ica
└─ :rf
```
### Motivations
The typical workflow in machine learning
classification or prediction requires
some or combination of the following
preprocessing steps together with modeling:
- feature extraction (e.g. ica, pca, svd)
- feature transformation (e.g. normalization, scaling, ohe)
- feature selection (anova, correlation)
- modeling (rf, adaboost, xgboost, lm, svm, mlp)
Each step has several choices of functions
to use together with their corresponding
parameters. Optimizing the performance of the
entire pipeline is a combinatorial search
of the proper order and combination of preprocessing
steps, optimization of their corresponding
parameters, together with searching for
the optimal model and its hyper-parameters.
Because of close dependencies among various
steps, we can consider the entire process
to be a pipeline optimization problem (POP).
POP requires simultaneous optimization of pipeline
structure and parameter adaptation of its elements.
As a consequence, having an elegant way to
express pipeline structure helps in the analysis
and implementation of the optimization routines.
The target of future work will be the
implementations of different pipeline
optimization algorithms ranging from
evolutionary approaches, integer
programming (discrete choices of POP elements),
tree/graph search, and hyper-parameter search.
### Package Features
- Pipeline API that allows high-level description of processing workflow
- Common API wrappers for ML libs including Scikitlearn, DecisionTree, etc
- Symbolic pipeline parsing for easy expression
of complex pipeline structures
- Easily extensible architecture by overloading just two main interfaces: fit! and transform!
- Meta-ensembles that allows composition of
ensembles of ensembles (recursively if needed)
for robust prediction routines
- Categorical and numerical feature selectors for
specialized preprocessing routines based on types
### Installation
AutoMLPipeline is in the Julia Official package registry.
The latest release can be installed at the Julia
prompt using Julia's package management which is triggered
by pressing `]` at the julia prompt:
```julia
julia> ]
(v1.0) pkg> add AutoMLPipeline
```
or
```julia
julia> using Pkg
julia> pkg"add AutoMLPipeline"
```
or
```julia
julia> using Pkg
julia> Pkg.add("AutoMLPipeline")
```
Once AutoMLPipeline is installed, you can
load it by:
```julia
julia> using AutoMLPipeline
```
or
```julia
julia> import AutoMLPipeline
```
Generally, you will need the different learners/transformers and utils in AMLP for
to carry-out the processing and modeling routines.
```julia
using AutoMLPipeline
using AutoMLPipeline.FeatureSelectors
using AutoMLPipeline.EnsembleMethods
using AutoMLPipeline.CrossValidators
using AutoMLPipeline.DecisionTreeLearners
using AutoMLPipeline.Pipelines
using AutoMLPipeline.BaseFilters
using AutoMLPipeline.SKPreprocessors
using AutoMLPipeline.Utils`
```
CSV and DataFrames will be needed in the succeeding examples
and should be installed:
```julia
using Pkg
Pkg.add("CSV")
Pkg.add("DataFrames")
```
## Tutorial Outline
```@contents
Pages = [
"tutorial/pipeline.md",
"tutorial/preprocessing.md",
"tutorial/learning.md",
"tutorial/extending.md"
]
Depth = 3
```
## Manual Outline
```@contents
Pages = [
"man/pipeline.md",
"man/preprocessors.md",
"man/learners.md",
"man/metaensembles.md"
]
Depth = 3
```
## ML Library
```@contents
Pages = [
"lib/typesfunctions.md"
]
```
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 989 | ## Index
```@index
Order = [:module,:type,:function]
Modules = [
AutoMLPipeline.AbsTypes,
AutoMLPipeline.BaselineModels,
AutoMLPipeline.BaseFilters,
AutoMLPipeline.Pipelines,
AutoMLPipeline.NARemovers,
AutoMLPipeline.CrossValidators,
AutoMLPipeline.DecisionTreeLearners,
AutoMLPipeline.EnsembleMethods,
AutoMLPipeline.FeatureSelectors,
AutoMLPipeline.SKLearners,
AutoMLPipeline.SKPreprocessors,
AutoMLPipeline.SKCrossValidators,
AutoMLPipeline.Utils
]
```
## Descriptions
```@autodocs
Order = [:type,:function]
Modules = [
AutoMLPipeline.AbsTypes,
AutoMLPipeline.BaselineModels,
AutoMLPipeline.BaseFilters,
AutoMLPipeline.Pipelines,
AutoMLPipeline.NARemovers,
AutoMLPipeline.CrossValidators,
AutoMLPipeline.DecisionTreeLearners,
AutoMLPipeline.EnsembleMethods,
AutoMLPipeline.FeatureSelectors,
AutoMLPipeline.SKLearners,
AutoMLPipeline.SKPreprocessors,
AutoMLPipeline.SKCrossValidators,
AutoMLPipeline.Utils
]
```
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 1436 | # Learners
```@setup learner
ENV["COLUMNS"]=1000
#using PythonCall
#warnings = pyimport("warnings")
#warnings.filterwarnings("ignore")
```
Similar to `SKPreprocessor`, most of the `Learners` in AMLP
for its initial release are based on Scikitlearn libraries.
!!! note
For more information and specific details of arguments to pass
and learner's behaviour, please consult the Scikitlearn
documentation.
### SKLearner Structure
```
SKLearner(Dict(
:name => "sklearner",
:output => :class,
:learner => "LinearSVC",
:impl_args => Dict()
)
)
Helper Function:
SKLearner(learner::String,args::Dict=Dict())
```
SKLearner maintains a dictionary of learners which can
be listed by invoking the function: `sklearners()`
The `:impl_args` is a dictionary of paramters to be
passed as arguments to the Scikitlearn learner.
Let's try loading some learners with some arguments based on Scikitlearn
documentation:
```@repl learner
using AutoMLPipeline
iris = getiris();
X = iris[:,1:4];
Y = iris[:,end] |> Vector;
rf = SKLearner("RandomForestClassifier",Dict(:n_estimators=>30,:random_state=>0));
crossvalidate(rf,X,Y,"accuracy_score",3)
ada = SKLearner("AdaBoostClassifier",Dict(:n_estimators=>20,:random_state=>0));
crossvalidate(ada,X,Y,"accuracy_score",3)
svc = SKLearner("SVC",Dict(:kernel=>"rbf",:random_state=>0,:gamma=>"auto"));
crossvalidate(svc,X,Y,"accuracy_score",3)
```
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 5916 | # Ensemble Methods
AMPL supports three types of meta-ensembles, namely:
StackEnsemble, VoteEnsemble, and BestLearner. They
are considered as meta-ensembles because they can contain
other learners including other ensembles as well as
meta-ensembles. They support complex level of hierarchy
depending on the requirements. The most effective way to
show their flexibility is to provide some real examples.
### StackEnsemble
Stack ensemble uses the idea of stacking to train
learners into two stages. The first stage trains
bottom-level learners for the mapping
between the input and output. The default is to use
70% of the data. Once the bottom-level learners finish the training,
the algorithm proceeds to stage 2 which treats the
trained learners as transformers. The output from
these transformers is used to train the Meta-Learner
(RandomForest, PrunedTree, or Adaboost) using the
remaining 30% of the data.
The StackEnsemble accepts the following arguments
wrapped in a `Dictionary` type argument:
- `:name` -> alias name of ensemble
- `:learners` -> a vector of learners
- `:stacker` -> the meta-learner (RandomForest, or Adaboost, or PrunedTree)
- `:stacker_training_portion` -> percentage of data for the meta-learner
- `:keep_original_features` -> boolean (whether the original data is included together with the transformed data by the bottom-level learners)
While the init function of StackEnsemble expects an argument of
Dictionary type, it supports the following convenient function signatures:
- `StackEnsemble(Dict(:learners=>...,:stacker=>...))`
- `StackEnsemble([learner1,learner2,...],Dict(:stacker=>...))`
- `StackEnsemble([learner1,learner2,...],stacker=...)`
- `StackEnsemble([learner1,learner2,...])`
To illustrate, let's create some bottom-level learners from Scikitlearn and Julia:
```@example ensemble
using AutoMLPipeline
using DataFrames
gauss = SKLearner("GaussianProcessClassifier")
svc = SKLearner("LinearSVC")
ridge = SKLearner("RidgeClassifier")
jrf = RandomForest() # julia's rf
rfstacker = RandomForest()
stackens = StackEnsemble([gauss,svc,ridge,jrf],stacker=rfstacker)
nothing #hide
```
Let's load some dataset and create a pipeline with the `stackens`
as the learner at the end of the pipeline.
```@example ensemble
using CSV
using Random
Random.seed!(123);
profbdata = CSV.File(joinpath(dirname(pathof(AutoMLPipeline)),"../data/profb.csv")) |> DataFrame
X = profbdata[:,2:end]
Y = profbdata[:,1] |> Vector;
ohe = OneHotEncoder()
catf = CatFeatureSelector();
numf = NumFeatureSelector()
rb = SKPreprocessor("RobustScaler");
pt = SKPreprocessor("PowerTransformer");
pca = SKPreprocessor("PCA");
fa = SKPreprocessor("FactorAnalysis");
ica = SKPreprocessor("FastICA")
pplstacks = @pipeline (numf |> rb |> pca) + (numf |> rb |> ica) +
(catf |> ohe) + (numf |> rb |> fa) |> stackens
nothing #hide
```
```@repl ensemble
crossvalidate(pplstacks,X,Y)
```
It is worth noting that stack ensemble is dealing with mixture of libraries consisting of Julia's
Random Forest and Scikitlearn learners.
### VoteEnsemble
Vote ensemble uses similar idea with the Stack Ensemble
but instead of stacking, it uses voting to get the final
prediction. The first stage involves the collection of
bottom-level learners being trained to learn
the mapping between input and output. Once they are trained
in a classification problem, they are treated as transformers
wherein the final output of the ensemble is based on the
output with the greatest count. It's equivalent to majority
voting where each learner has one vote based on its prediction
output class.
The VoteEnsemble accepts the following arguments
wrapped inside a `Dictionary` type of argument:
- `:name` -> alias name of ensemble
- `:learners` -> a vector of learners
While the init function of VoteEnsemble expects a Dictionary
type of argument, it also supports the following convenient
helper functions:
- `VoteEnsemble(Dict(:learners=>...,:name=>...))`
- `VoteEnsemble([learner1,learner2,...],Dict(:name=>...))`
- `VoteEnsemble([learner1,learner2,...],name=...)`
- `VoteEnsemble([learner1,learner2,...])`
Let's use the same pipeline but substitute the stack ensemble
with the vote ensemble:
```@example ensemble
Random.seed!(123);
votingens = VoteEnsemble([gauss,svc,ridge,jrf]);
pplvote = @pipeline (numf |> rb |> pca) + (numf |> rb |> ica) +
(catf |> ohe) + (numf |> rb |> fa) |> votingens;
nothing #hide
```
```@repl ensemble
crossvalidate(pplvote,X,Y)
```
### [BestLearner](@id bestlearner)
The BestLearner ensemble does not perform any 2-stage mapping. What it does is
to cross-validate each learner performance and use the most optimal learner
as the final model. This ensemble can be used to automatically pick the
most optimal learner in a group of learners included in each argument
based on certain selection criteria.
The BestLearner accepts the following arguments
wrapped in `Dictionary` type argument:
- `:selection_function` -> Function
- `:score_type` -> Real
- `:partition_generator` -> Function
- `:learners` -> Vector of learners
- `:name` -> alias name of learner
- `:learner_options_grid` -> for hyper-parameter search
The BestLearner supports the following function signatures
aside from Dictionary type argument:
- `BestLearner(Dict(:learners=>...,:name=>...))`
- `BestLearner([learner1,learner2,...],Dict(:name=>...))`
- `BestLearner([learner1,learner2,...],name=...)`
- `BestLearner([learner1,learner2,...])`
Let's use the same pipeline as above but substitute the vote ensemble
with the BestLearner ensemble:
```@example ensemble
Random.seed!(123);
bestens = BestLearner([gauss,svc,ridge,jrf]);
pplbest = @pipeline (numf |> rb |> pca) + (numf |> rb |> ica) +
(catf |> ohe) + (numf |> rb |> fa) |> bestens;
nothing #hide
```
```@repl ensemble
crossvalidate(pplbest,X,Y)
```
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 2751 | # Pipeline
There are three types of Pipelines: `Pipeline`, `ComboPipeline`, and `Selector Pipeline`.
The `Pipeline` (linear pipeline) performs
sequential evaluation of `fit_transform!` operation
to each of its elements passing the output of previous element as
input to the next element iteratively. `ComboPipeline` (feature union pipeline)
performs dataframe concatenation of the final
outputs of its elements. `Selector Pipeline` acts as
a selector function which outputs the results of the best
learner using its internal cross-validation process.
The `Pipeline` uses `|>` symbolic expression while `ComboPipeline` uses `+`.
The expression, `a |> b`, is equivalent to `Pipeline(a,b)` function call while
the expression, `a + b`, is equivalent to `ComboPipeline(a,b)`. The
elements `a` and `b` can be transformers, filters, learners or
pipeline themselves.
### Pipeline Structure
The linear pipeline accepts the following variables wrapped in a
`Dictionary` type argument:
- `:name` -> alias name for the pipeline
- `:machines` -> a Vector learners/transformers/pipelines
- `:machine_args` -> arguments to elements of the pipeline
For ease of usage, the following function calls are supported:
- `Pipeline(args::Dict)` -> default init function
- `Pipeline(Vector{<:Machine},args::Dict=Dict())` -> for passing vectors of learners/transformers
- `Pipeline(machs::Varargs{Machine})` -> for passing learners/transformers as arguments
### ComboPipeline Structure
`ComboPipeline` or feature union pipeline accepts similar init variables
with the the linear pipeline and follows similar helper functions:
- `ComboPipeline(args::Dict)` -> default init function
- `ComboPipeline(Vector{<:Machine},args::Dict=Dict())` -> for passing vectors of learners/transformers
- `ComboPipeline(machs::Varargs{Machine})` -> for passing learners/transformers as arguments
### Selector Pipeline Structure
`Selector Pipeline` is based on the `BestLearner` ensemble. Detailed explanation can be found in:
[BestLearner](@ref bestlearner)
### Macro Functions
There are two macro functions available: `@pipeline` and `@pipelinex`. The `@pipeline` macro
is used to process pipeline expression and returns the evaluation of the transformed expression.
During its recursive parsing, any occurence of `(|>)` is converted to `CombinePipeline`
call and `+` to `Pipeline` calls. To aid in the understanding of the worfklow, `@pipelinex`
shows the transformed expression during the `@pipeline` parsing but just before
the expression is evaluated. The macro `@pipelinex` has similar output to that of `@macroexpand`,
i.e., `pipelinex expr` is equivalent to `@macroexpand @pipeline expr`.
Note: Please refer to the [Pipeline Tutorial](@ref PipelineUsage) for their usage.
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 2114 | # Preprocessors
```@setup preprocessor
ENV["COLUMNS"]=1000
```
The design of AMLP is to allow easy extensibility of its processing elements.
The choice of Scikitlearn preprocessors in this initial release
is more for demonstration purposes to get a good
narrative of how the various parts of AMLP
fits together to solve a particular problem. AMLP has been tested
to run with a mixture of transformers and filters from Julia, Scikitlearn,
and R's caret in the same pipeline without issues as long as the interfaces
are properly implemented for each wrapped functions.
As there are loads of preprocessing techniques available, the user is encouraged
to create their own wrappers of their favorite implementations
to allow them interoperability with the existing AMLP implementations.
### SKPreprocessor Structure
```
SKPreprocessor(args=Dict(
:name => "skprep",
:preprocessor => "PCA",
:impl_args => Dict()
)
)
Helper Function:
SKPreprocessor(preprocessor::String,args::Dict=Dict())
```
SKPreprocessor maintains a dictionary of pre-processors
and dynamically load them based on the `:preprocessor`
name passed during its initialization. The
`:impl_args` is a dictionary of parameters to be passed
as arguments to the Scikitlearn preprocessor.
!!! note
Please consult the documentation in Scikitlearn
for what arguments to pass relative to the chosen preprocessor.
Let's try PCA with 2 components decomposition and random state initialized at 0.
```@example preprocessor
using AutoMLPipeline
iris = getiris()
X=iris[:,1:4]
pca = SKPreprocessor("PCA",Dict(:n_components=>2,:random_state=>0))
respca = fit_transform!(pca,X)
nothing #hide
```
```@repl preprocessor
first(respca,5)
```
Let's try ICA with 3 components decomposition and whitening:
```@example preprocessor
ica = SKPreprocessor("FastICA",Dict(:n_components=>3,:whiten=>true))
resica = fit_transform!(ica,X)
nothing #hide
```
```@repl preprocessor
first(resica,5)
```
To get a listing of available preprocessors, use the `skpreprocessors()` function:
```@repl preprocessor
skpreprocessors()
```
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 5833 | # Extending AutoMLPipeline
```@setup csvreader
ENV["COLUMNS"]=1000
```
Having a meta-ML package sounds ideal but not practical
in terms of maintainability and flexibility.
The metapackage becomes a central point of failure
and bottleneck. It doesn't subscribe to the KISS philosophy of
Unix which encourages decentralization of implementation. As long
as the input and output behavior of transformers and learners
follow a standard format, they should work without
dependency or communication. By using a consistent input/output
interfaces, the passing of information
among the elements in the pipeline will not bring any
surprises to the receivers and transmitters of information
down the line.
Because AMPL's symbolic pipeline is based on the idea of Linux
pipeline and filters, there is a deliberate effort to follow
as much as possible the KISS philosophy by just using two
interfaces to be overloaded (`fit!` and `transform!`):
input features should be a DataFrame type while
the target output should be a Vector type. Transformers `fit!`
function expects only one input argument and ignores the target
argument. On the other hand, the `fit!` function of any learner
requires both input and target arguments to carry out the
supervised learning phase. For the `transform!` function, both
learners and transformers expect one input argument that both
use to apply their learned parameters in transforming the input
into either prediction, decomposition, normalization, scaling, etc.
#### AMLP Abstract Types
The AMLP abstract types are composed of the following:
```
abstract type Machine end
abstract type Workflow <: Machine end
abstract type Computer <: Machine end
abstract type Learner <: Computer end
abstract type Transformer <: Computer end
```
At the top of the hierarchy is the `Machine` abstraction that supports
two major interfaces: `fit!` and `transform!`.
The abstract `Machine` has two major types: `Computer` and `Workflow`.
The `Computer` types perform computations suchs as filters, transformers, and filters while
the `Workflow` controls the flow of information. A `Workflow` can be a
sequential flow of information or a combination of information from two
or more workflow. A `Workflow` that provides sequential flow is called
`Pipeline` (or linear pipeline) while the one that combines information
from different workflows is called `ComboPipeline`.
The `Computer` type has two subtypes: `Learner` and `Transformer`. Their main
difference is in the behavior of their `fit!` function. The `Learner`
type learns its parameters by finding a mapping function between its
`input` and `output` arguments while the
`Transformer` does not require these mapping function to perform its operation.
The `Transfomer` learns all its parameters by just processing its `input` features.
Both `Transfomer` and `Learner` has similar behaviour in the `transform!` function. Both
apply their learned parameters to transform their `input` into `output`.
#### Extending AMLP by Adding a CSVReader Transformer
Let's extend AMLP by adding CSV reading support embedded in the pipeline.
Instead of passing the data in the pipeline argument, we create
a csv transformer that passes the data to succeeding elements in the pipeline
from a csv file.
```@example csvreader
module FileReaders
using CSV
using DataFrames: DataFrame, nrow,ncol
using AutoMLPipeline
using AutoMLPipeline.AbsTypes # abstract types (Learners and Transformers)
import AutoMLPipeline.fit!
import AutoMLPipeline.transform!
export fit!, transform!
export CSVReader
# define a user-defined structure for type dispatch
mutable struct CSVReader <: Transformer
name::String
model::Dict
function CSVReader(args = Dict(:fname=>""))
fname = args[:fname]
fname != "" || throw(ArgumentError("missing filename."))
isfile(fname) || throw(ArgumentError("file does not exist."))
new(fname,args)
end
end
CSVReader(fname::String) = CSVReader(Dict(:fname=>fname))
# Define fit! which does error checking. You can also make
# it do nothing and let the transform! function does the
# the checking and loading. The fit! function is only defined
# here to make sure there is a fit! dispatch for CSVReader
# type which is needed in the pipeline call iteration.
function fit!(csvreader::CSVReader, df::DataFrame=DataFrame(), target::Vector=Vector())
fname = csvreader.name
isfile(fname) || throw(ArgumentError("file does not exist."))
end
# define transform which opens the file and returns a dataframe
function transform!(csvreader::CSVReader, df::DataFrame=DataFrame())
fname = csvreader.name
df = CSV.File(fname) |> DataFrame
df != DataFrame() || throw(ArgumentError("empty dataframe."))
return df
end
end
nothing #hide
```
Let's now load the FileReaders module together with the other AutoMLPipeline
modules and create a pipeline that includes the csv reader we just created.
```@example csvreader
using DataFrames: DataFrame, nrow,ncol
using AutoMLPipeline
using .FileReaders # load from the Main module
#### Column selector
catf = CatFeatureSelector()
numf = NumFeatureSelector()
pca = SKPreprocessor("PCA")
ohe = OneHotEncoder()
fname = joinpath(dirname(pathof(AutoMLPipeline)),"../data/profb.csv")
csvrdr = CSVReader(fname)
p1 = @pipeline csvrdr |> (catf + numf)
df1 = fit_transform!(p1) # empty argument because input coming from csvreader
nothing #hide
```
```@repl csvreader
first(df1,5)
```
```@example csvreader
p2 = @pipeline csvrdr |> (numf |> pca) + (catf |> ohe)
df2 = fit_transform!(p2) # empty argument because input coming from csvreader
nothing #hide
```
```@repl csvreader
first(df2,5)
```
With the CSVReader extension, csv files can now be directly processed or loaded inside the pipeline
and can be used with other existing filters and transformers.
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 3044 | # Training and Validation
```@setup learning
using Random
ENV["COLUMNS"]=1000
Random.seed!(123)
```
Let us continue our discussion by using another dataset. This time,
let's use CMC dataset that are mostly categorical.
[CMC](https://archive.ics.uci.edu/ml/datasets/Contraceptive+Method+Choice)
is about asking women of their contraceptive choice. The dataset is composed
of the following features:
```@example learning
using AutoMLPipeline
using CSV
using DataFrames
cmcdata = CSV.File(joinpath(dirname(pathof(AutoMLPipeline)),"../data/cmc.csv")) |> DataFrame;
X = cmcdata[:,1:end-1]
Y = cmcdata[:,end] .|> string
show5(df) = first(df,5)
nothing #hide
```
```@repl learning
show5(cmcdata)
```
Let's examine the number of unique instances for each column:
```@repl learning
DataFrame(hcat([length(unique(n)) for n in eachcol(cmcdata)],names(cmcdata)),:auto)
```
Except for Wife's age and Number of children, the other columns
have less than five unique instances. Let's create a pipeline
to filter those columns and convert them to hot-bits and
concatenate them with the standardized scale of the numeric columns.
```@example learning
std = SKPreprocessor("StandardScaler")
ohe = OneHotEncoder()
kohe = SKPreprocessor("OneHotEncoder")
catf = CatFeatureSelector()
numf = NumFeatureSelector()
disc = CatNumDiscriminator(5) # unique instances <= 5 are categories
pcmc = @pipeline disc |> ((catf |> ohe) + (numf |> std))
dfcmc = fit_transform!(pcmc,X)
nothing #hide
```
```@repl learning
show5(dfcmc)
```
### Evaluate Learners with Same Pipeline
You can get a list of sklearners and skpreprocessors by using the following
function calls:
```@repl learning
sklearners()
skpreprocessors()
```
Let us evaluate 4 learners using the same preprocessing pipeline:
```@example learning
jrf = RandomForest()
ada = SKLearner("AdaBoostClassifier")
sgd = SKLearner("SGDClassifier")
tree = PrunedTree()
nothing #hide
```
```@example learning
using DataFrames: DataFrame, nrow,ncol
learners = DataFrame()
for learner in [jrf,ada,sgd,tree]
pcmc = @pipeline disc |> ((catf |> ohe) + (numf |> std)) |> learner
println(learner.name)
mean,sd,folds = crossvalidate(pcmc,X,Y,"accuracy_score",5)
global learners = vcat(learners,DataFrame(name=learner.name,mean=mean,sd=sd,kfold=folds))
end;
nothing #hide
```
```@repl learning
@show learners;
```
For this particular pipeline, Adaboost has the best performance followed
by RandomForest.
Let's extend the pipeline adding Gradient Boost learner and Robust Scaler.
```@example learning
rbs = SKPreprocessor("RobustScaler")
gb = SKLearner("GradientBoostingClassifier")
learners = DataFrame()
for learner in [jrf,ada,sgd,tree,gb]
pcmc = @pipeline disc |> ((catf |> ohe) + (numf |> rbs) + (numf |> std)) |> learner
println(learner.name)
mean,sd,folds = crossvalidate(pcmc,X,Y,"accuracy_score",5)
global learners = vcat(learners,DataFrame(name=learner.name,mean=mean,sd=sd,kfold=folds))
end;
nothing #hide
```
```@repl learning
@show learners;
```
This time, Gradient boost has the best performance.
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 7983 | # [Pipeline](@id PipelineUsage)
*A tutorial for using the `@pipeline` expression*
### Dataset
Let us start the tutorial by loading the dataset.
```@setup pipeline
using Random
ENV["COLUMNS"]=1000
Random.seed!(123)
```
```@example pipeline
using AutoMLPipeline
using CSV
using DataFrames
profbdata = getprofb()
X = profbdata[:,2:end]
Y = profbdata[:,1] |> Vector
nothing #hide
```
We can check the data by showing the first 5 rows:
```@repl pipeline
show5(df)=first(df,5); # show first 5 rows
show5(profbdata)
```
This dataset is a collection of pro football scores with the
following variables and their descriptions:
- Home/Away = Favored team is at home or away
- Favorite Points = Points scored by the favored team
- Underdog Points = Points scored by the underdog team
- Pointspread = Oddsmaker's points to handicap the favored team
- Favorite Name = Code for favored team's name
- Underdog name = Code for underdog's name
- Year = 89, 90, or 91
!!! note
For the purpose of this tutorial, we will use the first column,
Home vs Away, as the target variable to be predicted using the
other columns as input features. For this target output, we are
trying to ask whether the model can learn the patterns from its
input features to predict whether the game was played at home or
away. Since the input features have both categorical and numerical
features, the dataset is a good basis to describe
how to extract these two types of features, preprocessed them, and
learn the mapping using a one-liner pipeline expression.
### AutoMLPipeline Modules and Instances
Before continuing further with the tutorial, let us load the
necessary AutoMLPipeline package:
```@example pipeline
using AutoMLPipeline
nothing #hide
```
Let us also create some instances of filters, transformers, and
models that we can use to preprocess and model the dataset.
```@example pipeline
#### Decomposition
pca = SKPreprocessor("PCA"); fa = SKPreprocessor("FactorAnalysis");
ica = SKPreprocessor("FastICA")
#### Scaler
rb = SKPreprocessor("RobustScaler"); pt = SKPreprocessor("PowerTransformer")
norm = SKPreprocessor("Normalizer"); mx = SKPreprocessor("MinMaxScaler")
#### categorical preprocessing
ohe = OneHotEncoder()
#### Column selector
disc = CatNumDiscriminator()
catf = CatFeatureSelector(); numf = NumFeatureSelector()
#### Learners
rf = SKLearner("RandomForestClassifier"); gb = SKLearner("GradientBoostingClassifier")
lsvc = SKLearner("LinearSVC"); svc = SKLearner("SVC")
mlp = SKLearner("MLPClassifier"); ada = SKLearner("AdaBoostClassifier")
jrf = RandomForest(); vote = VoteEnsemble(); stack = StackEnsemble()
best = BestLearner()
nothing #hide
```
### Processing Categorical Features
For the first illustration, let us extract categorical features of
the data and output some of them using the pipeline expression
and its interface:
```@example pipeline
pop_cat = @pipeline catf
tr_cat = fit_transform!(pop_cat,X,Y)
nothing #hide
```
```@repl pipeline
show5(tr_cat)
```
One may notice that instead of using `fit!` and `transform`,
the example uses `fit_transform!` instead. The latter is equivalent
to calling `fit!` and `transform` in sequence which is handy
for examining the final output of the transformation prior to
feeding it to the model.
Let us now transform the categorical features into one-hot-bit-encoding (ohe)
and examine the results:
```@example pipeline
pop_ohe = @pipeline catf |> ohe
tr_ohe = fit_transform!(pop_ohe,X,Y)
nothing #hide
```
```@repl pipeline
show5(tr_ohe)
```
### Processing Numerical Features
Let us have an example of extracting the numerical features
of the data using different combinations of filters/transformers:
```@example pipeline
pop_rb = @pipeline (numf |> rb)
tr_rb = fit_transform!(pop_rb,X,Y)
nothing #hide
```
```@repl pipeline
show5(tr_rb)
```
### Concatenating Extracted Categorical and Numerical Features
For typical modeling workflow, input features are combinations
of categorical features transformer to one-bit encoding together
with numerical features normalized or scaled or transformed by
decomposition.
Here is an example of a typical input feature:
```@example pipeline
pop_com = @pipeline (numf |> norm) + (catf |> ohe)
tr_com = fit_transform!(pop_com,X,Y)
nothing #hide
```
```@repl pipeline
show5(tr_com)
```
The column size from 6 grew to 60 after the hot-bit encoding was applied
because of the large number of unique instances for the categorical columns.
### Performance Evaluation of the Pipeline
We can add a model at the end of the pipeline and evaluate
the performance of the entire pipeline by cross-validation.
Let us use a linear SVC model and evaluate using 5-fold cross-validation.
```@repl pipeline
Random.seed!(12345);
pop_lsvc = @pipeline ( (numf |> rb) + (catf |> ohe) + (numf |> pt)) |> lsvc;
tr_lsvc = crossvalidate(pop_lsvc,X,Y,"balanced_accuracy_score",5)
```
What about using Gradient Boosting model?
```@repl pipeline
Random.seed!(12345);
pop_gb = @pipeline ( (numf |> rb) + (catf |> ohe) + (numf |> pt)) |> gb;
tr_gb = crossvalidate(pop_gb,X,Y,"balanced_accuracy_score",5)
```
What about using Random Forest model?
```@repl pipeline
Random.seed!(12345);
pop_rf = @pipeline ( (numf |> rb) + (catf |> ohe) + (numf |> pt)) |> jrf;
tr_rf = crossvalidate(pop_rf,X,Y,"balanced_accuracy_score",5)
```
Let's evaluate several learners which is a typical workflow
in searching for the optimal model.
```@example pipeline
using Random
using DataFrames: DataFrame, nrow,ncol
using AutoMLPipeline
Random.seed!(1)
jrf = RandomForest()
ada = SKLearner("AdaBoostClassifier")
sgd = SKLearner("SGDClassifier")
tree = PrunedTree()
std = SKPreprocessor("StandardScaler")
disc = CatNumDiscriminator()
lsvc = SKLearner("LinearSVC")
learners = DataFrame()
for learner in [jrf,ada,sgd,tree,lsvc]
pcmc = @pipeline disc |> ((catf |> ohe) + (numf |> std)) |> learner
println(learner.name)
mean,sd,_ = crossvalidate(pcmc,X,Y,"accuracy_score",10)
global learners = vcat(learners,DataFrame(name=learner.name,mean=mean,sd=sd))
end;
nothing #hide
```
```@repl pipeline
@show learners;
```
!!! note
It can be inferred from the results that linear SVC has the best performance
with respect to the different pipelines evaluated.
The compact expression supported by the
pipeline makes testing of the different combination of features
and models trivial. It makes performance evaluation
of the pipeline easily manageable in a systematic way.
### Learners as Filters
It is also possible to use learners in the middle of
expression to serve as filters and their outputs become
input to the final learner as illustrated below.
```@repl pipeline
Random.seed!(1);
expr = @pipeline (
((numf |> pca) |> gb) + ((numf |> pca) |> jrf)
) |> ohe |> ada;
crossvalidate(expr,X,Y,"accuracy_score",5)
```
It is important to take note that `ohe`
is necessary because the outputs of the two learners (`gb` and `jrf`)
are categorical values that need to be hot-bit encoded before
feeding them to the final `ada` learner.
### Advanced Expressions using Selector Pipeline
You can use `*` operation as a selector
function which outputs the result of the best learner.
Instead of looping over the different learners to identify
the best learner, you can use the selector function
to automatically determine the best learner and output its
prediction.
```@repl pipeline
Random.seed!(1);
pcmc = @pipeline disc |> ((catf |> ohe) + (numf |> std)) |>
(jrf * ada * sgd * tree * lsvc);
crossvalidate(pcmc,X,Y,"accuracy_score",10)
```
Here is another example using the Selector Pipeline as a preprocessor
in the feature extraction stage of the pipeline:
```@repl pipeline
Random.seed!(1);
pjrf = @pipeline disc |> ((catf |> ohe) + (numf |> std)) |>
((jrf * ada ) + (sgd * tree * lsvc)) |> ohe |> ada;
crossvalidate(pjrf,X,Y,"accuracy_score")
```
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT"
] | 0.4.6 | 02a92d379ef775b7e3bf07414a4459ec72cca73d | docs | 6098 | # Preprocessing
Let us start by loading the `diabetes` dataset:
```@setup preprocessing
using Random
ENV["COLUMNS"]=1000
Random.seed!(123)
```
```@example preprocessing
using AutoMLPipeline
using CSV
using DataFrames
diabetesdf = CSV.File(joinpath(dirname(pathof(AutoMLPipeline)),"../data/diabetes.csv")) |> DataFrame
X = diabetesdf[:,1:end-1]
Y = diabetesdf[:,end] |> Vector
nothing #hide
```
We can check the data by showing the first 5 rows:
```@repl preprocessing
show5(df)=first(df,5); # show first 5 rows
show5(diabetesdf)
```
This [UCI dataset](https://archive.ics.uci.edu/ml/datasets/diabetes)
is a collection of diagnostic tests among the Pima Indians
to investigate whether the patient shows
sign of diabetes or not based on certain features:
- Number of times pregnant
- Plasma glucose concentration a 2 hours in an oral glucose tolerance test
- Diastolic blood pressure (mm Hg)
- Triceps skin fold thickness (mm)
- 2-Hour serum insulin (mu U/ml)
- Body mass index (weight in kg/(height in m)^2)
- Diabetes pedigree function
- Age (years)
- Class variable (0 or 1) indicating diabetic or not
What is interesting with this dataset is that one or more numeric columns
can be categorical and should be hot-bit encoded. One way to verify is
to compute the number of unique instances for each column and look for
columns with relatively smaller count:
```@repl preprocessing
[n=>length(unique(x)) for (n,x) in pairs(eachcol(diabetesdf))] |> collect
```
Among the input columns, `preg` has only 17 unique instances and it can
be treated as a categorical variable. However, its description indicates
that the feature refers to the number of times the patient is pregnant
and can be considered numerical. With this dilemma, we need to figure
out which representation provides better performance to our classifier.
In order to test the two options, we can use the Feature Discriminator
module to filter and transform the `preg` column to either numeric
or categorical and choose the pipeline with the optimal performance.
### CatNumDiscriminator for Detecting Categorical Numeric Features
*Transform numeric columns with small unique instances to categories.*
Let us use `CatNumDiscriminator` which expects one argument to indicate
the maximum number of unique instances in order to consider a particular
column as categorical. For the sake of this discussion, let us use its
default value which is 24.
```@example preprocessing
using AutoMLPipeline
disc = CatNumDiscriminator(24)
@pipeline disc
tr_disc = fit_transform!(disc,X,Y)
nothing #hide
```
```@repl preprocessing
show5(tr_disc)
```
You may notice that the `preg` column is converted by the `CatNumDiscriminator`
into `String` type which can be fed to hot-bit encoder to preprocess
categorical data:
```@example preprocessing
disc = CatNumDiscriminator(24)
catf = CatFeatureSelector()
ohe = OneHotEncoder()
pohe = @pipeline disc |> catf |> ohe
tr_pohe = fit_transform!(pohe,X,Y)
nothing #hide
```
```@repl preprocessing
show5(tr_pohe)
```
We have now converted all categorical data into hot-bit encoded values.
For a typical scenario, one can consider columns with around 3-10
unique numeric instances to be categorical.
Using `CatNumDiscriminator`, it is trivial
to convert columns of features with small unique instances into categorical
and hot-bit encode them as shown below. Let us use 5 as the cut-off and any
columns with less than 5 unique instances is converted to hot-bits.
```@repl preprocessing
using DataFrames: DataFrame, nrow,ncol
df = rand(1:3,100,3) |> DataFrame;
show5(df)
disc = CatNumDiscriminator(5);
pohe = @pipeline disc |> catf |> ohe;
tr_pohe = fit_transform!(pohe,df);
show5(tr_pohe)
```
### Concatenating Hot-Bits with PCA of Numeric Columns
Going back to the original `diabetes` dataset, we can now use the
`CatNumDiscriminator` to differentiate between categorical
columns and numerical columns and preprocess them based on their
types (String vs Number). Below is the pipeline to convert `preg`
column to hot-bits and use PCA for the numerical features:
```@example preprocessing
pca = SKPreprocessor("PCA")
disc = CatNumDiscriminator(24)
ohe = OneHotEncoder()
catf = CatFeatureSelector()
numf = NumFeatureSelector()
pl = @pipeline disc |> ((numf |> pca) + (catf |> ohe))
res_pl = fit_transform!(pl,X,Y)
nothing #hide
```
```@repl preprocessing
show5(res_pl)
```
### Performance Evaluation
Let us compare the RF cross-validation result between two options:
- `preg` column should be categorical vs
- `preg` column is numerical
in predicting diabetes where numerical values are scaled by robust scaler and
decomposed by PCA.
##### Option 1: Assume All Numeric Columns as not Categorical and Evaluate
```@example preprocessing
pca = SKPreprocessor("PCA")
dt = SKLearner("DecisionTreeClassifier")
rf = SKLearner("RandomForestClassifier")
rbs = SKPreprocessor("RobustScaler")
jrf = RandomForest()
lsvc = SKLearner("LinearSVC")
ohe = OneHotEncoder()
catf = CatFeatureSelector()
numf = NumFeatureSelector()
disc = CatNumDiscriminator(0) # disable turning numeric to categorical features
pl = @pipeline disc |> ((numf |> pca) + (catf |> ohe)) |> jrf
nothing #hide
```
```@repl preprocessing
crossvalidate(pl,X,Y,"accuracy_score",30)
```
##### Option 2: Assume as Categorical Numeric Columns <= 24 and Evaluate
```@example preprocessing
disc = CatNumDiscriminator(24) # turning numeric to categorical if unique instances <= 24
pl = @pipeline disc |> ((numf |> pca) + (catf |> ohe)) |> jrf
nothing #hide
```
```@repl preprocessing
crossvalidate(pl,X,Y,"accuracy_score",30)
```
From this evaluation, `preg` column should be treated as numerical
because the corresponding pipeline got better performance. One
thing to note is the presence of errors in the cross-validation
performance for the pipeline that treats `preg` as categorical
data. The subset of training data during the
kfold validation may contain singularities and evaluation causes
some errors due to hot-bit encoding that increases data sparsity.
The error, however, may be a bug which needs to be addressed in
the future.
| AutoMLPipeline | https://github.com/IBM/AutoMLPipeline.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 752 | using GraphPlayground
using Documenter
DocMeta.setdocmeta!(GraphPlayground, :DocTestSetup, :(using GraphPlayground); recursive=true)
makedocs(;
modules=[GraphPlayground],
authors="David Gleich <[email protected]> and contributors",
sitename="GraphPlayground.jl",
format=Documenter.HTML(;
canonical="https://dgleich.github.io/GraphPlayground.jl",
edit_link="main",
assets=String[],
),
checkdocs=:exports,
pages=[
"Home" => "index.md",
"Forces" => "forces.md",
"Library" => "library.md",
"Example: Mouse Pointer Repulsion and Collision" => "mouse-pointer-repulsion.md",
],
)
deploydocs(;
repo="github.com/dgleich/GraphPlayground.jl",
devbranch="main",
)
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 2321 | using TOML
using Printf
# this variable needs to be changed based on your own installation directory
path = joinpath(homedir(),".julia","registries","General")
packages_dict = TOML.parsefile(joinpath(path,"Registry.toml"))["packages"]
# Get the root stdlib directory
function _get_stdlib_dir()
ladir = Base.find_package("LinearAlgebra")
# go up two directories from ladir
return dirname(dirname(dirname(ladir)))
end
const STDLIB_DIR = _get_stdlib_dir()
const STDLIBS = readdir(STDLIB_DIR)
##
for (i, stdlib) in enumerate(STDLIBS)
if isfile(joinpath(STDLIB_DIR, stdlib, "Project.toml"))
proj = TOML.parsefile(joinpath(STDLIB_DIR, stdlib, "Project.toml"))
packages_dict[proj["uuid"]] = proj
end
end
pkg_keys = collect(keys(packages_dict))
pkg_ids = Dict(pkg_keys[i] => i-1 for i = 1:length(pkg_keys))
G = DiGraph(length(pkg_keys))
for i in eachindex(pkg_keys)
pkg_id = pkg_ids[pkg_keys[i]]
if haskey(packages_dict[pkg_keys[i]],"path")
dep_path = joinpath(path,packages_dict[pkg_keys[i]]["path"],"Deps.toml")
if isfile(dep_path)
dep_dict = TOML.parsefile(dep_path)
for key in keys(dep_dict)
tmp_dict = dep_dict[key]
for pkg_name in keys(tmp_dict)
add_edge!(G, pkg_id, pkg_ids[tmp_dict[pkg_name]])
end
end
end
else
if haskey(packages_dict[pkg_keys[i]],"deps")
for key in packages_dict[pkg_keys[i]]["deps"]
add_edge!(G, pkg_ids[key[2]], pkg_id)
end
end
end
end
##
pkg_names = [packages_dict[pkg_keys[i]]["name"] for i = 1:length(pkg_keys)]
##
#playground(G)
playground(G,
graphplot_options = (; node_size=outdegree(G).+1,
node_color = [colorant"red" for i in 1:nv(G)],
edge_width = [1.0 for i in 1:ne(G)]),
manybody_options = (; distance=-(outdegree(G) .+ 10)),
link_options = (; distance=30, iterations=1),
charge_options = (; strength=-30.0 .* outdegree(G) )
)
##
sim = ForceSimulation(Point2f, vertices(g);
link=LinkForce(edges=edges(g), iterations=10, distance=20, strength=1), charge=ManyBodyForce(),
center=GraphPlayground.CenterForce(Point2f(320, 240)))
for _ in 1:100
step!(sim)
end
fig = Figure()
ax = Axis(fig[1,1])
GraphPlayground.igraphplot!(ax, g, sim; node_size=[10 for _ in 1:nv(g)])
fig | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 1677 | using GraphPlayground, StableRNGs, GeometryBasics, GLMakie
## Setup the nodes
rng = StableRNG(1)
nballs = 200
nnodes = nballs + 1
width = 564
k = width/nnodes
radiusdist = k:4k
radius = rand(rng, radiusdist, nnodes )
radius[end] = 0
## Create the positions
pos = [Point2f0(rand(rng, 0:width), rand(rng, 0:width)) for _ in 1:nnodes]
pos = pos .- sum(pos) / length(pos)
## Setup the force simulation
sim = ForceSimulation(
pos, # the starting list of positions
eachindex(pos); # the list of nodes, it's just all the indices.
position=PositionForce(;strength=0.01), # a centering force
collide=CollisionForce(;radius=radius.+1,iterations=3), # the collision force
charge=ManyBodyForce(strength=(i) -> i==nnodes ? -width*2/3 : 0.0, theta2=0.82),
# this creates a strong repulsion from the mouse pointer (which is the
# last node)
alpha=GraphPlayground.CoolingStepper(alpha_target=0.3),
velocity_decay=0.9,)
## Setup the scene and window
s = Scene(camera = campixel!, size = (width, width))
pos = Observable(sim.positions .+ Point2f0(width/2, width/2))
scatter!(s,
pos,
markersize=pi*radius/1.11,
markerspace=:pixel,
color=:black,
strokewidth=0.5,
strokecolor=:white,
)
GraphPlayground.Window(s; title="Mouse Pointer Repulsion Demo") do _
mp = mouseposition(s) # get the mouse position
@show mp
fixnode!(sim, nnodes, mp .- Point2f0(width/2, width/2)) # fix the last node to the mouse pointer
step!(sim) # take a step in the simulation
pos[] = sim.positions .+ Point2f0(width/2, width/2) # update the positions
end
## A few things for test cases
step!(sim) # directly take a step to exercise various portions
display(sim) | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 771 | using Graphs, GLMakie, GraphPlayground
function sbm(blocks::Vector{Int}, p::Matrix{Float64})
n = sum(blocks)
g = SimpleGraph(n)
start_indices = cumsum(vcat(1, blocks[1:end-1]))
for i in 1:length(blocks)
for j in i:length(blocks)
for u in start_indices[i]:(start_indices[i] + blocks[i] - 1)
for v in start_indices[j]:(start_indices[j] + blocks[j] - 1)
if (i != j || u < v) && rand() < p[i, j]
add_edge!(g, u, v)
end
end
end
end
end
return g
end
##
# Number of nodes in each block
blocks = [50, 50, 50, 50]
# Probability matrix
p = ones(length(blocks), length(blocks)) * 0.001
foreach(i->p[i,i] += 0.1, 1:length(blocks))
# Generate the SBM graph
g = sbm(blocks, p)
playground(g) | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 2306 | ## Recording the examples in the README.md
using Graphs, GraphPlayground, GeometryBasics, GLMakie
##
g = smallgraph(:karate)
p = playground(g)
# Need to show the mousepointer
mppos = Observable(Point2f(0,0))
xoffset = 0.1
poffset = -0.1
arrow_path = BezierPath([
MoveTo(Point(0+xoffset, 0+poffset)),
LineTo(Point(0.3+xoffset, -0.3+poffset)),
LineTo(Point(0.15+xoffset, -0.3+poffset)),
LineTo(Point(0.3+xoffset, -1+poffset)),
LineTo(Point(0+xoffset, -0.9+poffset)),
LineTo(Point(-0.3+xoffset, -1+poffset)),
LineTo(Point(-0.15+xoffset, -0.3+poffset)),
LineTo(Point(-0.3+xoffset, -0.3+poffset)),
ClosePath()
])
scatter!(p.window.root_scene, mppos, markersize=25,
marker = arrow_path, rotation=pi/4, color=:grey,
strokecolor=:white, strokewidth=2)
on(events(p.window.root_scene).mouseposition) do pos
mppos[] = pos
end
##
record(p.window.root_scene, "karate.gif") do io
while isopen(p.window)
recordframe!(io)
end
end
##
using Graphs, GraphPlayground, GeometryBasics
g = grid([100,100]) # make a 100x100 grid from Graphs
p = playground(g,
ForceSimulation(Point2f, vertices(g);
link=LinkForce(;edges=edges(g), iterations=10, distance=0.5, strength=1),
charge=ManyBodyForce(;strength=-1),
center=PositionForce(target=Point2f(300,300)));
graphplot_options = (;node_size=[2 for _ in 1:nv(g)], edge_width=[1.0 for _ in 1:ne(g)]))
display(p)
p.sim.alpha.alpha_target = 0.5 # keep the simulation hot for a while
# Need to show the mousepointer
mppos = Observable(Point2f(0,0))
xoffset = 0.1
poffset = -0.1
arrow_path = BezierPath([
MoveTo(Point(0+xoffset, 0+poffset)),
LineTo(Point(0.3+xoffset, -0.3+poffset)),
LineTo(Point(0.15+xoffset, -0.3+poffset)),
LineTo(Point(0.3+xoffset, -1+poffset)),
LineTo(Point(0+xoffset, -0.9+poffset)),
LineTo(Point(-0.3+xoffset, -1+poffset)),
LineTo(Point(-0.15+xoffset, -0.3+poffset)),
LineTo(Point(-0.3+xoffset, -0.3+poffset)),
ClosePath()
])
scatter!(p.window.root_scene, mppos, markersize=25,
marker = arrow_path, rotation=pi/4, color=:grey,
strokecolor=:white, strokewidth=2)
on(events(p.window.root_scene).mouseposition) do pos
mppos[] = pos
end
##
record(p.window.root_scene, "mesh.gif") do io
while isopen(p.window)
recordframe!(io)
end
end | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 611 | module GraphPlayground
using GraphMakie
using Random
using Colors
using Makie
using LinearAlgebra
using Graphs
using NearestNeighbors
using GLMakie
using GeometryBasics
## import setindex from Base
import Base.setindex, Base.eltype, Base.zero, Base.show
include("utilities.jl")
include("simulation.jl")
export ForceSimulation, step!, fixnode!, freenode!
include("simpleforces.jl")
export CenterForce, PositionForce
include("linkforce.jl")
export LinkForce
include("manybodyforce.jl")
export ManyBodyForce
include("collisionforce.jl")
export CollisionForce
include("playground.jl")
export playground
end
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 6827 | struct CollisionForce{T}
args::T
end
"""
CollisionForce([radius,] [strength])
Create a collision force. This force is used to simulate collisions between nodes.
"""
CollisionForce(;kwargs...) = CollisionForce{typeof(kwargs)}(kwargs)
#CollisionForce() = CollisionForce(NamedTuple())
function initialize(body::CollisionForce, nodes;
radius = Float32(1.0),
strength = Float32(1.0),
iterations = 1,
random = nothing)
radius = _handle_node_values(nodes, radius)
strength = _handle_node_values(nodes, strength)
if random === nothing
random = Random.GLOBAL_RNG
end
return InitializedCollisionForce(radius, strength, iterations, random)
end
struct InitializedCollisionForce
radius
strength
iterations
rng
end
function Base.show(io::IO, z::InitializedCollisionForce)
print(io, "CollisionForce with ", z.iterations, " iterations")
println(io)
print(io, " with radius ", z.radius)
println()
print(io, " and strength ", z.strength)
end
function _walk_maxradius(T::KDTree, n::Int, idx::Int, radii, maxradius)
if NearestNeighbors.isleaf(n, idx)
idxmap = T.indices
maxrad = zero(eltype(maxradius))
for ptsidx in NearestNeighbors.get_leaf_range(T.tree_data, idx)
# we need to get the original index in the pts array, not the
# index in the tree, which has likely been reordered.
#@show ptsidx, T.data[ptsidx], radii[ptsidx], idxmap[ptsidx], radii[idxmap[ptsidx]]
origidx = T.reordered == false ? ptsidx : idxmap[ptsidx]
maxrad = max(maxrad, radii[origidx])
end
return eltype(maxradius)(maxrad)
else
left, right = NearestNeighbors.getleft(idx), NearestNeighbors.getright(idx)
l_maxrad = _walk_maxradius(T, n, left, radii, maxradius)
r_maxrad = _walk_maxradius(T, n, right, radii, maxradius)
maxradius[idx] = max(l_maxrad, r_maxrad)
return maxradius[idx]
end
end
function _build_tree_info_maxradius(T::KDTree, pts, radii)
n = T.tree_data.n_internal_nodes
maxradius = Vector{eltype(radii)}(undef, n)
_walk_maxradius(T, n, 1, radii, maxradius)
return maxradius
end
"""
_check_if_possible_collision(region::HyperRectangle, maxradius::Float64, targetpt)
Check for a potential collision between an expanded `region` and `targetpt`.
`region` is a `HyperRectangle`, and `maxradius` is the amount by which the `region` is expanded.
Returns `true` if a collision is possible, `false` otherwise.
# Thanks ChatGPT!
"""
function _check_if_possible_collision(region, maxradius, targetpt)
# Expand the region by maxradius
expanded_region = NearestNeighbors.HyperRectangle(
region.mins .- maxradius, # Subtract maxradius from each dimension
region.maxes .+ maxradius # Add maxradius to each dimension
)
# Check if the target point is inside the expanded region
return all(expanded_region.mins .<= targetpt) && all(targetpt .<= expanded_region.maxes)
end
function _collision_force_on_node(target, treenode, rect, targetpt, T, maxradii, radii, strengths, rng, vel, velidx)
if NearestNeighbors.isleaf(T.tree_data.n_internal_nodes, treenode)
idxmap = T.indices
for ptsidx in NearestNeighbors.get_leaf_range(T.tree_data, treenode)
origidx = T.reordered == false ? ptsidx : idxmap[ptsidx]
# TODO, check if it's really better to use the symmetric approach.
if origidx > velidx # we only handle "half" of the positions and apply forces symmetrically...
ri = radii[velidx]
rj = radii[origidx]
r = ri + rj
d = targetpt .- T.data[ptsidx] .- vel[origidx]
d2 = dot(d,d)
if d2 < r*r
#println("Collision between ", velidx, " and ", origidx, " with distance ", sqrt(d2), " and radii ", ri, " and ", rj)
# we have a collision.
d = jiggle(d, rng)
d2 = dot(d,d)
dval = sqrt(d2)
l = (r-dval) / dval
l *= strengths[velidx]
factor = (rj*rj)/(ri*ri + rj*rj)
vel[velidx] = vel[velidx] .+ d .* l .* factor
vel[origidx] = vel[origidx] .- d .* l .* (1-factor)
#vel[velidx] = vel[velidx] .+ d .* l .* (1-factor)
#vel[origidx] = vel[origidx] .- d .* l .* (factor)
end
end
end
else
maxradius = maxradii[treenode]
if _check_if_possible_collision(rect, maxradius+radii[velidx], targetpt)
left, right = NearestNeighbors.getleft(treenode), NearestNeighbors.getright(treenode)
split_val = T.split_vals[treenode]
split_dim = T.split_dims[treenode]
rect_right = NearestNeighbors.HyperRectangle(@inbounds(setindex(rect.mins, split_val, split_dim)), rect.maxes)
rect_left = NearestNeighbors.HyperRectangle(rect.mins, @inbounds setindex(rect.maxes, split_val, split_dim))
_collision_force_on_node(target, left, rect_left, targetpt, T, maxradii, radii, strengths, rng, vel, velidx)
_collision_force_on_node(target, right, rect_right, targetpt, T, maxradii, radii, strengths, rng, vel, velidx)
end
end
end
function _apply_collsion_force(T, vel, maxradii, radii, strengths, rng)
for i in eachindex(T.data)
velidx = T.reordered == false ? i : T.indices[i] # this is the index of i in the real vel array
_collision_force_on_node(i, 1, T.hyper_rec, T.data[i] .+ vel[velidx], T, maxradii, radii, strengths, rng, vel, velidx)
end
end
function collisionforce!(niter::Int, alpha::Real, nodes, pos, vel, radii, strengths, rng)
T = KDTree(pos; reorder=true)
# need to find the biggest radius in each quadtree node.
maxradius = _build_tree_info_maxradius(T, pos, radii)
for _ in 1:niter
_apply_collsion_force(T, vel, maxradius, radii, strengths, rng)
end
end
function simplecollisionforce!(niter::Int, alpha::Real, nodes, pos, vel, radii, strengths, rng)
for _ in 1:niter
for i in eachindex(nodes)
targetpt = pos[i] .+ vel[i]
for j in eachindex(nodes)
if i > j
ri = radii[i]
rj = radii[j]
r = ri + rj
d = targetpt .- pos[j] .- vel[j]
d2 = dot(d,d)
if d2 < r*r
d = jiggle(d, rng)
d2 = dot(d,d)
dval = sqrt(d2)
l = (r-dval) / dval
factor = (rj*rj)/(ri*ri + rj*rj)
vel[i] = vel[i] .+ d .* l .* (factor)
vel[j] = vel[j] .- d .* l .* (1-factor)
end
end
end
end
end
end
function force!(alpha::Real, sim::ForceSimulation, many::InitializedCollisionForce)
pos = sim.positions
vel = sim.velocities
nodes = sim.nodes
radii = many.radius
strengths = many.strength
rng = many.rng
collisionforce!(many.iterations, alpha, nodes, pos, vel, radii, strengths, rng)
#simplecollisionforce!(many.iterations, alpha, nodes, pos, vel, radii, strengths, rng)
end
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 6854 | function _count_edges!(counter, edges)
for e in edges
src, dst = _srcdst(e)
counter[src] += 1
counter[dst] += 1
end
end
"""
LinkForce(;edges)
LinkForce(;edges, [strength], [distance], [bias], [iterations], [random])
A link force computes forces between nodes that emulate a strong connection.
This is useful for graphs where the edges represent strong connections between nodes.
The force applied between two nodes is based on the strength of the link, the distance,
the strength of the edge. The bias of the edge is used to determine how much the nodes
should move.
For an edge between `src`, `dst`, let ``d`` be the difference vector
between the position of `dst` and `src` _with their velocity corrections included_.
The total force is ``f = \\alpha \\cdot s \\cdot (||d|| - l) / ||d||`` where ``l`` is the ideal distance
and ``s`` is the strength of the link. The force is applied to the velocity of the nodes
proportional to the bias of the edge ``\\beta``
`vel[dst] -=` ``\\beta f \\cdot d``
`vel[src] +=` ``(1-\\beta) f \\cdot d``
The bias is used to determine how much the nodes should move. If the bias is 0, then the
update is exclusively provided to the `src` node. If the bias is 1, then the update is
exclusively provided to the `dst` node.
## Arguments
- `edges`: An array of edge structures, where each edge structure contains `src` and `dst` fields
or can be indexed like a tuple with e[1], e[2] as the source and destination nodes.
## Optional Arguments
- `strength`: A function or array of values that determine the strength of the link between two nodes.
By default, this is based on the number of edges between the nodes: 1/(min(degree(src), degree(dst))).
- `distance`: A function or array of values that determine the ideal distance between two nodes.
By default, this is 30.0. But this can be a function that takes the edge index and returns a distance.
- `bias`: A function or array of values that determine the bias of the link between two nodes.
This is designed to weight how much the nodes should move. It's designed to make it harder to
move high degree nodes.
- `iterations`: The number of iterations to run the link force. The default is 1.
Each iteration updates the velocity but not the positions. However, the velocity updates
are included in the force calculations. So by running multiple iterations, the forces
are more accurate. This is because we update the velocities in-place.
Using large values here are most important for grids or graphs with a lot of structure.
- `random`: A random number generator. This is used for the random perturbations.
The default is to use a deterministic generator so that the results are reproducible.
I can't imagine why you would need to use this, but it's here in case someone needs
to reproduce something strange.
## Function inputs
An exmaple of using it with a function is the following
```julia
val = randn(10)
f = LinkForce(;edges=edges, strength=(i,e,src,dst)->val[src]*val[dst])
```
This is called with
- `i`: The index of the edge in the edges array.
- `e`: The edge structure.
- `src`: The source node.
- `dst`: The destination node.
This same structure is used for all strength, bias, and distance.
## Usage
LinkForce is usually used as part of a ForceSimulation.
Here, we setup something simple with two nodes at distance 1.
But that want to be at distance 10 given the edge between them.
```julia
nodes = [1,2]
edgelist = [(1, 2)]
positions = [Point2f(0.0, 0.0), Point2f(1.0, 0.0)]
sim = ForceSimulation(positions, nodes;
link=LinkForce(edges=edgelist, strength=10, distance=10.0, bias=0.25))
iforce = sim.forces.link
# iforce is an `InitializedLinkForce` that's been linked to the simulation
GraphPlayground.force!(0.1, sim, iforce) # Assume alpha=0.1
sim.velocities
```
This example shows how the [`LinkForce`]computes the velocities of the nodes
to move them away from each other. The reason the update is nonsymmetric
is because of the bias. This says that we want to move node 1 more than node 2.
## See also
[`ForceSimulation`](@ref)
"""
struct LinkForce{T}
args::T
end
#LinkForce(edges) = LinkForce(edges=edges)
LinkForce(;kwargs...) = LinkForce{typeof(kwargs)}(kwargs)
struct InitializedLinkForce
edges
biases
strengths
distances
rng
iterations::Int
end
function Base.show(io::IO, z::InitializedLinkForce)
print(io, length(z.edges), "-edge LinkForce")
end
function initialize(link::LinkForce, nodes;
edges,
strength = nothing,
distance = 30.0,
bias = nothing,
iterations = 1,
random = nothing)
if strength === nothing || bias === nothing
# count degrees to initialize srength and bias
count = _get_node_array(Int, nodes)
fill!(count, 0)
_count_edges!(count, edges)
if strength === nothing
strength = _handle_link_values(edges, (i,e,src,dst) -> 1.0 / min(count[src], count[dst]))
else
strength = _handle_link_values(edges, strength)
end
if bias === nothing
bias = _handle_link_values(edges, (i,e,src,dst) -> count[src] / (count[src] + count[dst]))
else
bias = _handle_link_values(edges, bias)
end
else
strength = _handle_link_values(edges, strength)
bias = _handle_link_values(edges, bias)
end
distance = _handle_link_values(edges, distance)
return InitializedLinkForce(edges, bias, strength, distance, random, iterations)
end
function initialize(link::LinkForce, sim::ForceSimulation)
return initialize(link, sim; link.args...)
end
function linkforce!(alpha::Real, pos, vel, edges, biases, strengths, distances, rng, niter)
for k in 1:niter
for (i,e) in enumerate(edges)
src, dst = _srcdst(e)
#=
link = links[i], source = link.source, target = link.target;
x = target.x + target.vx - source.x - source.vx || jiggle(random);
y = target.y + target.vy - source.y - source.vy || jiggle(random);
l = Math.sqrt(x * x + y * y);
l = (l - distances[i]) / l * alpha * strengths[i];
x *= l, y *= l;
target.vx -= x * (b = bias[i]);
target.vy -= y * b;
source.vx += x * (b = 1 - b);
source.vy += y * b;
=#
diff = pos[dst] .+ vel[dst] .- pos[src] .- vel[src]
diff = jiggle(diff, rng)
l = norm(diff)
l = (l - distances[i]) / l * alpha * strengths[i]
diff *= l
vel[dst] -= diff .* biases[i]
vel[src] += diff .* (1 - biases[i])
end
end
end
function force!(alpha::Real, sim::ForceSimulation, link::InitializedLinkForce)
iters = link.iterations
pos = sim.positions
vel = sim.velocities
bias = link.biases
strengths = link.strengths
distances = link.distances
edges = link.edges
rng = link.rng
linkforce!(alpha, pos, vel, edges, bias, strengths, distances, rng, iters)
end | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 8683 | """
ManyBodyForce()
ManyBodyForce(; [strength], [min_distance2], [max_distance2], [theta2], [random])
Create a force defined by multiple bodies.
This force is used to simulate the repulsion or attraction
between nodes of the simulation. If you wish to apply to only
a subset of nodes, you can set the `strength` to zero for the
nodes you wish to ignore.
This computation is implemented with a space partitioning data structure
(current a KDTree) to approximate the impact of distance forces
using a far-field approximation (this is often called a
Barnes-Hut approximation, but that doesn't help understand what
is going on). Setting `theta2` to zero will cause it to
discard the approximation and compute the exact force.
Reasonable values for `theta2` are between 0.5 (better approximation)
and 1.5 (poor approximation). (This is the square of the ``\\theta``
value commonly used in Barnes-Hut approximations.)
## Arguments
- `strength`: A constant, a function or array of values.
The repulsive strength to use, defaults to -30,
which is a repulsive force between nodes.
- `min_distance2`: A constant, that defines a minimum distance
between nodes. If the distance between two nodes is less than
this value, the force is increased a bit. The default is 1.0.
- `max_distance2`: A constant, that defines a maximum distance
between nodes. If the distance between two nodes is greater than
this value, the force is ignored. The default is Inf.
- `theta2`: A constant, that defines the accuracy of the approximation.
The default is 0.81, which is a reasonable value for most simulations.
- `random`: A random number generator. This is used for the random perturbations.
"""
struct ManyBodyForce{T}
args::T
end
ManyBodyForce(;kwargs...) = ManyBodyForce{typeof(kwargs)}(kwargs)
struct InitializedManyBodyForce
strengths
min_distance2
max_distance2
theta2
rng
end
function initialize(body::ManyBodyForce, nodes;
strength = Float32(-30.0),
min_distance2 = Float32(1.0),
max_distance2 = Float32(Inf),
theta2 = Float32(0.81),
random = nothing)
strength = _handle_node_values(nodes, strength)
return InitializedManyBodyForce(strength, min_distance2, max_distance2, theta2, random)
end
#=
function manybodyforce!(alpha::Real, nodes, pos, vel, strengths, min_distance2, max_distance2, theta2, rng)
for u in eachindex(nodes)
for v in eachindex(nodes)
if u == v
continue
end
d = pos[v] .- pos[u]
d2 = dot(d, d)
if d2 < max_distance2
d = jiggle(d, rng)
d2 = dot(d, d)
if d2 < min_distance2
d2 = sqrt(min_distance2*d2)
end
w = strengths[v]*alpha / d2
vel[u] += d .* w
end
end
end
end
=#
function _walk(T::KDTree, n::Int, idx::Int, centers, weights, widths, strengths, rect)
center = _zero(eltype(centers)) # handles Tuple types...
weight = zero(eltype(weights))
CType = typeof(center)
WType = typeof(weight)
FType = _eltype(eltype(centers))
if NearestNeighbors.isleaf(n, idx)
idxmap = T.indices
treepts = T.data
totalmass = zero(FType)
for ptsidx in NearestNeighbors.get_leaf_range(T.tree_data, idx)
Tidx = T.reordered ? ptsidx : idxmap[ptsidx]
origidx = T.reordered == false ? ptsidx : idxmap[ptsidx]
q = FType(abs(strengths[origidx]))
totalmass += q
center = center .+ q*treepts[Tidx]
weight += strengths[origidx]
end
center = center ./ totalmass
return (center::CType, weight::WType)
else
left, right = NearestNeighbors.getleft(idx), NearestNeighbors.getright(idx)
split_val = T.split_vals[idx]
split_dim = T.split_dims[idx]
rect_right = NearestNeighbors.HyperRectangle(@inbounds(setindex(rect.mins, split_val, split_dim)), rect.maxes)
rect_left = NearestNeighbors.HyperRectangle(rect.mins, @inbounds setindex(rect.maxes, split_val, split_dim))
lcenter, lweight = _walk(T, n, left, centers, weights, widths, strengths, rect_left)
rcenter, rweight = _walk(T, n, right, centers, weights, widths, strengths, rect_right)
centers[idx] = (abs(lweight) .* lcenter .+ abs(rweight) .* rcenter) ./ (abs(lweight) .+ abs(rweight))
weights[idx] = lweight + rweight
widths[idx] = maximum(rect.maxes .- rect.mins)
return (centers[idx]::CType, weights[idx]::WType)
end
end
function _build_tree_info(T::KDTree, pts, strengths)
n = T.tree_data.n_internal_nodes
centers = Vector{eltype(pts)}(undef, n)
weights = Vector{eltype(strengths)}(undef, n)
widths = Vector{_eltype(eltype(pts))}(undef, n)
# we need to do a post-order traversal
_walk(T, n, 1, centers, weights, widths, strengths, T.hyper_rec)
return centers, weights, widths
end
@inline function _compute_force(rng, pt1, pt2, strength, max_distance2, min_distance2, alpha)
d = pt2 .- pt1
d = jiggle(d, rng)
d2 = dot(d, d)
if d2 < max_distance2
#d = jiggle(d, rng)
if d2 < min_distance2
@fastmath d2 = sqrt(min_distance2*d2)
end
w = strength*alpha / d2
return d .* w
else
return 0 .* pt1
end
end
function _compute_force_on_node(target, treeindex, targetpt, T, forcefunc, centers, weights, widths, strengths, theta2, vel, velidx)
#f = 0 .* targetpt
ncomp = 0
if NearestNeighbors.isleaf(T.tree_data.n_internal_nodes, treeindex)
idxmap = T.indices
treepts = T.data
f = 0 .* targetpt
@simd for Tidx in NearestNeighbors.get_leaf_range(T.tree_data, treeindex)
#ptsidx = idxmap[Tidx]
#Tidx = T.reordered ? Tidx : ptsidx
if Tidx != target
ncomp += 1
@inbounds pt = treepts[Tidx]
origidx = T.reordered == false ? Tidx : idxmap[Tidx]
f = f .+ forcefunc(targetpt, pt, strengths[origidx])
end
end
@inbounds vel[velidx] = vel[velidx] .+ f
else
@inbounds center = centers[treeindex]
@inbounds w = weights[treeindex]
@inbounds width = widths[treeindex]
d = center .- targetpt
d2 = dot(d,d)
if (width*width / theta2) < d2
@inbounds vel[velidx] = vel[velidx] .+ forcefunc(targetpt, center, w)
ncomp += 1
# and then don't recurse...
else
# otherwise, recurse...
left, right = NearestNeighbors.getleft(treeindex), NearestNeighbors.getright(treeindex)
ncomp += _compute_force_on_node(target, left, targetpt, T, forcefunc, centers, weights, widths, strengths, theta2, vel, velidx)
ncomp += _compute_force_on_node(target, right, targetpt, T, forcefunc, centers, weights, widths, strengths, theta2, vel, velidx)
end
end
return ncomp
end
function _applyforces!(T, vel, centers, weights, widths, strengths, forcefunc, theta2)
ncomp = 0
for i in eachindex(T.data)
velidx = T.reordered == false ? i : T.indices[i] # this is the index of i in the real vel array
ncomp += _compute_force_on_node(i, 1, T.data[i], T, forcefunc, centers, weights, widths, strengths, theta2, vel, velidx)
end
end
function manybodyforce!(alpha::Real, nodes, pos, vel, strengths, min_distance2, max_distance2, theta2, rng)
T = KDTree(pos)
centers, weights, widths = _build_tree_info(T, pos, strengths)
forcefunc = @inline (u, v, strength) -> _compute_force(rng, u, v, strength, max_distance2, min_distance2, Float32(alpha))
_applyforces!(T, vel, centers, weights, widths, strengths, forcefunc, theta2)
end
function simpleforces!(alpha::Real, nodes, pts, vel, strengths, min_distance2, max_distance2, theta2, rng)
forcefunc = @inline (u, v, strength) -> _compute_force(rng, u, v, strength, max_distance2, min_distance2, Float32(alpha))
for i in eachindex(pts)
targetpt = pts[i]
f = 0.0 .* targetpt
for j in eachindex(pts)
if i != j
f = f .+ forcefunc(targetpt, pts[j], strengths[j])
end
end
vel[i] = vel[i] .+ f
end
end
function force!(alpha::Real, sim::ForceSimulation, many::InitializedManyBodyForce)
pos = sim.positions
vel = sim.velocities
nodes = sim.nodes
strengths = many.strengths
min_distance2 = many.min_distance2
max_distance2 = many.max_distance2
theta2 = many.theta2
rng = many.rng
manybodyforce!(alpha, nodes, pos, vel, strengths, min_distance2, max_distance2, theta2, rng)
#simpleforces!(alpha, nodes, pos, vel, strengths, min_distance2, max_distance2, theta2, rng)
end
function Base.show(io::IO, z::InitializedManyBodyForce)
print(io, "ManyBodyForce")
println(io)
print(io, " with strength ", z.strengths)
println(io)
print(io, " with min_distance2 ", z.min_distance2)
println(io)
print(io, " with max_distance2 ", z.max_distance2)
println(io)
print(io, " with theta2 ", z.theta2)
end
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 6155 |
function igraphplot!(ax, g, sim; kwargs...)
p = graphplot!(ax, g;
edge_width = [2.0 for i in 1:ne(g)],
edge_color = [colorant"gray" for i in 1:ne(g)],
node_size = [10 for i in 1:nv(g)],
node_color = [colorant"black" for i in 1:nv(g)],
layout = sim.positions,
kwargs...)
hidedecorations!(ax); hidespines!(ax)
ax.aspect = DataAspect()
deregister_interaction!(ax, :rectanglezoom)
function node_hover_action(state, idx, event, axis)
p.node_size[][idx] = state ? p.node_size[][idx]*2 : p.node_size[][idx]/2
p.node_size[] = p.node_size[] # trigger observable
end
nhover = NodeHoverHandler(node_hover_action)
register_interaction!(ax, :nhover, nhover)
function edge_hover_action(state, idx, event, axis)
p.edge_width[][idx]= state ? p.edge_width[][idx]*4 : p.edge_width[][idx]/4
p.edge_width[] = p.edge_width[] # trigger observable
end
ehover = EdgeHoverHandler(edge_hover_action)
register_interaction!(ax, :ehover, ehover)
function node_drag_action(state, idx, event, axis)
if state == false
# this means it's the end of the drag
if Keyboard.left_shift in events(axis).keyboardstate
# then we want to leave the node as fixed...
else
freenode!(sim, idx)
end
sim.alpha.alpha_target = 0.001
else
fixnode!(sim, idx, event.data)
sim.alpha.alpha_target = 0.3
#p[:node_pos][][idx] = event.data
p[:node_pos][] = p[:node_pos][]
end
end
ndrag = NodeDragHandler(node_drag_action)
register_interaction!(ax, :ndrag, ndrag)
return p
end
function playground(g, sim::ForceSimulation;
initial_iterations = 10,
graphplot_options = NamedTuple(),
labels = map(i->string(i), 1:nv(g)),
verbose = false)
f = Figure()
button_startstop = Button(f[1, 1], label="Animate", tellwidth=false)
button_reheat = Button(f[1, 3], label="Reheat", tellwidth=false)
button_help = Button(f[1, 4], label="Show Help", tellwidth=false)
ax = Axis(f[2, :])
ax.limits = (0, 800, 0, 600)
txt = text!(ax, (0, 0), text="")
status = Label(f[3,:], text="", tellwidth=false,
tellheight=true, height=1)
for _ in 1:initial_iterations
step!(sim)
end
p = igraphplot!(ax, g, sim; graphplot_options...)
function node_hover_action_label(state, idx, event, axis)
status.text[] = state ? "Node $(labels[idx])" : ""
end
register_interaction!(ax, :nhover_label, NodeHoverHandler(node_hover_action_label))
edgelist = collect(edges(g))
function edge_hover_action_label(state, idx, event, axis)
edge = edgelist[idx]
status.text[] = state ? "Edge $(labels[edge.src]) -- $(labels[edge.dst])" : ""
end
register_interaction!(ax, :ehover_label, EdgeHoverHandler(edge_hover_action_label))
p[:node_pos][] = sim.positions
run_updates = Ref(false)
on(button_reheat.clicks) do _
sim.alpha.alpha = min(sim.alpha.alpha * 10, 1.0)
end
on(button_startstop.clicks) do _
if button_startstop.label[] == "Animate"
run_updates[] = true
button_startstop.label[] = "Stop Updates"
else
run_updates[] = false
button_startstop.label[] = "Animate"
end
Consume(true)
end
on(button_help.clicks) do _
#println("Help")
status.text[] = "Drag to move nodes, reheat to restart the animation, hold Shift while dragging to fix a node"
Consume(true)
end
w = Window(f.scene, focus_on_show=true, framerate=60.0,
vsync=true, render_on_demand=false, title="GLMakie Graph Playground") do _
if run_updates[] == true
step!(sim)
p[:node_pos][] = sim.positions
if verbose
txt.text[] = "alpha $(sim.alpha.alpha)"
end
end
end
return Playground(w, sim, ax)
end
struct Playground{WindowType,SimType,AxisType}
window::WindowType
sim::SimType
axis::AxisType
end
import Base.display
display(p::Playground) = display(p.window)
import Base.show
show(io::IO, p::Playground) = println(io, "Playground with window ", p.window, " and simulation ", p.sim)
"""
playground(g;
[link_options = (;iterations=1,distance=30),]
[center_options = NamedTuple(),]
[charge_options = NamedTuple(),]
[graphplot_options = NamedTuple(),]
[initial_iterations = 10,]
[labels = map(i->string(i), 1:nv(g)),]
[verbose = false,]
[kwargs...] )
playground(g, sim; kwargs...)
Create a graph playground window from the graph `g`.
The value that is returned is a `Playground` object, which is a thin
wrapper around the window, the simulation, and the Makie axis.
## Optional parameters
- `link_options`: These options are passed to the `LinkForce`` constructor.
The default is `(;iterations=1,distance=30)`. This is generally good.
For grid graphs, set iterations higher to get a better layout.
- `center_options`: These options are passed to the `PositionForce` constructor.
- `charge_options`: These options are passed to the `ManyBodyForce` constructor.
- `graphplot_options`: These options are passed to the `graphplot!` function.
- `initial_iterations`: The number of layout iteration to run before the first display.
The default is 10.
- `labels`: A list of strings to display for the node identifiers. By default these
are the numeric node ids
- `verbose`: If true, additional information is shown in the lower left corner of the plot.
## Examples
```julia
g = wheel_graph(10)
playground(g)
```
For a grid, this often looks much better
```julia
g = grid([10,10])
playground(g;
link_options=(;iterations=10, strength=1, distance=20))
```
## See also
[`ForceSimulation`](@ref), [`LinkForce`](@ref), [`ManyBodyForce`](@ref), [`PositionForce`](@ref)
"""
function playground(g;
link_options = (;iterations=1,distance=30),
center_options = NamedTuple(),
charge_options = NamedTuple(),
kwargs...
)
sim = ForceSimulation(Point2f, vertices(g);
link=LinkForce(;edges=edges(g), link_options...),
charge=ManyBodyForce(;charge_options...),
center=PositionForce(;target=Point2f(400, 300), center_options),
)
playground(g, sim; kwargs...)
end
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 4064 |
struct CenterForce{T, V <: Real}
center::T
strength::V
args::@NamedTuple{}
end
"""
`CenterForce` represents a centering adjustment in a force simulation.
it has two parameters:
* `center`: The center of the force, which can be anything resembling a point
* `strength`: The strength of the force, which is a real number
Note that CenterForce directly applies the force to the
positions of the nodes in the simulation instead of updating their velocities.
Use PositionForce to apply a force to the velocities of the nodes instead.
(Also, please don't combine PositionForce and CenterForce.)
## Example
rad = 10*rand(100)
sim = ForceSimulation(Point2f, eachindex(rad);
center=CenterForce(center, strength=1.0),
collide=CollisionForce(radius=rad)
)
p = scatter(sim.positions, markersize=rad)
for i in 1:100
step!(sim)
p[:node_pos][] = sim.positions
sleep(0.5)
end
## See also
[`ForceSimulation`](@ref)
"""
CenterForce(;center=(0.0f0,0.0f0), strength=1.0f0) = CenterForce(center, strength, NamedTuple())
#CenterForce(center) = CenterForce(center, 1.0, NamedTuple())
#CenterForce(center, strength) = CenterForce(center, strength, NamedTuple())
function initialize(center::CenterForce, nodes; kwargs...)
return center
end
function centerforce!(n::Integer, pos, centertarget, strength)
ptsum = 0 .* first(pos) # get a zero element of the same type as pos[1]
w = one(_eltype(_eltype(pos)))/n
for i in 1:n
ptsum = ptsum .+ (w .* pos[i]) # can't use .+= because it Point2f isn't mutable
end
ptcenter = ptsum # we handle the 1/n with the w now.
centerdir = (ptcenter .- centertarget)*strength
for i in eachindex(pos)
pos[i] = pos[i] .- centerdir
end
end
function force!(alpha::Real, sim::ForceSimulation, center::CenterForce)
pos = sim.positions
centertarget = center.center
strength = center.strength
nodes = sim.nodes
centerforce!(length(nodes), pos, centertarget, strength)
end
function Base.show(io::IO, z::CenterForce)
print(io, "CenterForce with center ", z.center, " and strength ", z.strength)
end
"""
PositionForce(;[target] [, strength])
`PositionForce` represents a force that directions nodes of the
simulation towards specific target positions.
## Arguments
- `target`: The target position of each node.
This can be a single value or an array of values.
The default is (0,0), which tries to center the positions.
- `strength`: The strength of the force, which is a real number. The default is 0.1.
## See also
[`ForceSimulation`](@ref)
"""
struct PositionForce{T}
args::T
end
PositionForce(;kwargs...) = PositionForce{typeof(kwargs)}(kwargs)
struct InitializedPositionForce
targets
strengths
end
function initialize(pforce::PositionForce, nodes;
strength=0.1,
target=(0,0), kwargs...)
strengths = _handle_node_values(nodes, strength)
# need to be careful with the center, because it could be a single value or a tuple
targets = _handle_node_values(nodes, target)
if targets === target
if length(targets) != length(nodes)
targets = ConstArray(target, (length(nodes),))
end
end
return InitializedPositionForce(targets, strengths)
end
function positionforce!(alpha, nodes, pos, vel, strengths, targets)
for i in nodes
p = pos[i]
t = targets[i]
s = strengths[i]
# TODO if you just want a particular component to be forced to a particular value
# the idea is that you could do that with an NaN mask on the points.
# isvalid = map(isnan, t)
# t2 = map(x->isnan(x) ? zero(_eltype(t))) : x, t)
vel[i] = vel[i] .+ (t .- p) .* s .* alpha
end
end
function force!(alpha::Real, sim::ForceSimulation, pforce::InitializedPositionForce)
pos = sim.positions
targets = pforce.targets
strengths = pforce.strengths
nodes = sim.nodes
positionforce!(alpha, nodes, pos, sim.velocities, strengths, targets)
end
function Base.show(io::IO, z::InitializedPositionForce)
print(io, "PositionForce with targets ", z.targets, " and strength ", z.strengths)
end | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 7411 | struct ForceSimulation
nodes
forces
positions
velocities
rng::AbstractRNG
alpha::CoolingStepper
velocity_decay
fixed
end
"""
ForceSimulation([PositionType,] nodes; [...])
ForceSimulation(positions, nodes; [rng,] [alpha,] [velocity_decay,] forcelist...)
Create a force simulator for a set of positions. This evaluates and evolves the positions
based on the forces applied. It is designed to be used with evaluating a dynamic force
directed graph layout including attractive forces for edges and repulsive forces for nodes.
But it may have other uses as well. For instance, collision forces can be used to simulate
packed bubble charts with various radii.
## Arguments
- `nodes` is any array of nodes. This can be very simple, i.e. 1:n, or
a list of objects. The objects must be used consistent with other forces involved.
- `PositionType` is the type of the positions. Using `Point2f` is recommended and the default
- `positions` is an array of initial positions. The position type is determined by the elements of
the array.
- `forcelist` is a trailing list of forces. The names of these forces do not matter.
The order is the order in which they are applied. While forcelist is not syntactically
required, it is semantically required as otherwise the simulation will not do anything.
## Optional Arguments
- `rng` is a random number generator. This is used for the initial positions and for
any random perturbations if there are degeneracies. The default is to use
a deterministic generator so that the results are reproducible.
- `alpha` is the cooling stepper. This is used to control the rate of convergence.
See [`GraphPlayground.CoolingStepper`](@ref) for more information.
- `velocity_decay` is the factor by which the velocities are decayed each step.
Setting this to 1 will not decay the velocities. Setting it to 0 will stop all motion.
The default is 0.6.
## Usage
Here is an example that packs balls of different sizes
into a region around the point (0,0).
```julia
radii = 1:10
sim = ForceSimulation(1:10;
collide=CollisionForce(radius=radii, iterations=3),
center=PositionForce(target=(0,0)))
initial_positions = copy(sim.positions)
step!(sim, 100) # run 100 steps
plot(sim.positions; markersize=(radii .- 0.5).*pi/1.11,
markerspace=:data, strokewidth=0.25, strokecolor=:white) # weird 1.11 to get the right size, add 0.05
```
## Forces
The list of forces can have silly names if you wish. The names are not used other than
for display. For example, this is entirely valid:
```julia
sim = ForceSimulation(1:10;
collide=CollisionForce(radius=radii, iterations=3),
push_nodes_to_middle=PositionForce(target=(0,0))
push_nodes_to_offset=PositionForce(target=(10,10)))
```
Of course, that generates a very useless simulator.
## Forces
- [`LinkForce`](@ref): This force applies a spring force to all edges in the graph.
The force is proportional to the distance between the nodes.
- [`ManyBodyForce`](@ref): This force applies a repulsive force between all nodes.
The force is proportional to the inverse square of the distance between the nodes.
- [`PositionForce`](@ref): This force applies a force to all nodes to move them to a target position.
This is useful for centering the graph or pushing nodes to the edge.
- [`CollisionForce`](@ref): This force applies a repulsive force between all positions.
The force is proportional to the sum of the radii of the nodes.
- [`CenterForce`](@ref): This force directly centers all the positions.
## Data
The simulator maintains the following data that are useful:
- `positions`: The current positions of the nodes.
- `velocities`: The current velocities of the nodes.
You can access these directly.
## Methods
To fix a node in place, use `fixnode!(sim, i, pos)`. To free a node, use `freenode!(sim, i)`.
To take a step, use `step!(sim)`. To take multiple steps, use `step!(sim, n)`.
## See also
[`step!`](@ref), [`fixnode!`](@ref), [`freenode!`](@ref), [`LinkForce`](@ref),
[`ManyBodyForce`](@ref), [`PositionForce`](@ref), [`CollisionForce`](@ref), [`CenterForce`](@ref)
"""
function ForceSimulation(positions, nodes;
rng=Random.MersenneTwister(0xd3ce), # 0xd34ce -> "d3-force" ? d3 - 4 - ce?
alpha = CoolingStepper(),
velocity_decay = 0.6,
kwargs...)
n = length(nodes)
T = eltype(positions)
velocities = Vector{T}(undef, n)
fill!(velocities, ntuple(i->0, length(first(positions)))) # fancy way of writing 0s
forces = NamedTuple( map(keys(kwargs)) do f
f => initialize(kwargs[f], nodes; random=rng, kwargs[f].args...)
end)
fixed = falses(n)
ForceSimulation(nodes, forces, positions, velocities, rng,
alpha, velocity_decay, fixed
)
end
function ForceSimulation(T::Type, nodes;
rng=Random.MersenneTwister(0xd3ce), # 0xd34ce -> "d3-force" ? d3 - 4 - ce?
kwargs...)
# 0xd34ce -> "d3-force" ? d3 - 4 - ce?
n = length(nodes)
positions = _initial_positions(T, nodes, rng)
return ForceSimulation(positions, nodes; rng, kwargs...)
end
ForceSimulation(nodes; kwargs...) = ForceSimulation(Point2f, nodes; kwargs...)
function _initial_positions(T, nodes, rng)
n = length(nodes)
pos = Vector{T}(undef, n)
for i in 1:n
pos[i] = sqrt(n) .* ntuple(i->rand(rng), length(first(pos)))
end
return pos
end
function simstep!(alpha, positions, velocities, forces, decay, fixed)
for i in eachindex(positions)
if fixed[i] == false
velocities[i] = velocities[i] .* decay
positions[i] = positions[i] .+ velocities[i]
else
velocities[i] = ntuple(i->0, length(velocities[i]))
end
end
end
function apply_forces!(alpha, sim, forces)
for f in forces
force!(alpha, sim, f)
end
end
"""
step!(sim) # take one step
step!(sim, n) # take n steps
Take a step of the force simulator. This will apply all forces in the order they were
added to the simulator. The forces are applied to the positions and velocities.
The velocities are then decayed by the `velocity_decay` factor.
See [`ForceSimulation`](@ref) for more information and an example.
"""
function step!(sim::ForceSimulation)
alpha = step!(sim.alpha)
apply_forces!(alpha, sim, sim.forces)
simstep!(alpha, sim.positions,
sim.velocities, sim.forces, sim.velocity_decay, sim.fixed
)
return sim
end
function step!(sim::ForceSimulation, n)
for i in 1:n
step!(sim)
end
return sim
end
"""
fixnode!(sim::ForceSimulation, i, pos)
Fix the position of a node in the simulation. This will prevent the node from moving.
This importantly keeps the velocity of the node set to 0, which will prevent the node
from updating other implicit positions.
"""
function fixnode!(sim::ForceSimulation, i, pos)
sim.fixed[i] = true
sim.positions[i] = pos
end
"""
freenode!(sim::ForceSimulation, i)
Remove the fixed position of a node in the simulation. This will allow the node to move.
"""
function freenode!(sim::ForceSimulation, i)
sim.fixed[i] = false
end
function Base.show(io::IO, z::ForceSimulation)
println(io, length(z.nodes), "-node ForceSimulation ", pointtype(z), " with forces: ")
for f in z.forces
println(io, " ", f)
end
end
pointtype(z::ForceSimulation) = eltype(z.positions)
_get_node_array(T::DataType, sim::ForceSimulation) = Vector{T}(undef, length(sim.nodes))
_get_node_array(T::DataType, nodes) = Vector{T}(undef, length(nodes))
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 8391 |
"""
_eltype(x)
Create an _eltype function that also handles NTuple types. This is useful to avoid
a dependency on explicit point types of static arrays. Since everything we can
do can be done with NTuple types. This forwards to Base.eltype for all other types.
"""
_eltype(x) = Base.eltype(x)
# Aqua detects this as having an unbound type
#_eltype(::NTuple{N, T}) where {N, T} = T
# so we use this ugly hack from StaticArrays instead ...
_TupleOf{T} = Tuple{T,Vararg{T}}
_eltype(::Union{_TupleOf{T}, Type{<:_TupleOf{T}}}) where {T} = T
_zero(x) = Base.zero(x)
# We need that ugly hack again.
# The issue is that NTuple{N,T} can have N = 0, which is zero parameters. So then
# we have a method ambiguity/etc. problem, which is what we're trying to avoid.
# So the _TupleOf type forces it to have _at least one_ Tuple parameter.
# and a consistent type for the rest.
_TupleOfLen{T,N} = Tuple{T,Vararg{T,N}}
#_zero(::Union{NTuple{N, T}, Type{NTuple{N, T}}}) where {N, T} = ntuple(i -> zero(T), Val(N))
# The
_zero(x::Union{_TupleOfLen{T,N}, Type{<:_TupleOfLen{T,N}}}) where {T,N} = ntuple(i -> zero(T), Val(N+1))
"""
jiggle(rng::AbstractRNG)
Generate a small random perturbation using the provided random number generator (`rng`).
The perturbation is uniformly distributed between -0.5e-6 and 0.5e-6. This function is
commonly used in simulations to avoid issues like division by zero when two objects
have the exact same position.
# Examples
```julia
rng = MersenneTwister(123)
jiggle(rng)
```
"""
function jiggle(rng)
return (rand(rng) - 0.5) * 1e-6
end
"""
jiggle(x, rng::AbstractRNG)
Apply a small random perturbation to each element of the array `x` that equals zero,
using the provided random number generator (`rng`). Non-zero elements of `x` are left
unaltered. This is particularly useful in numerical simulations where exact zeroes may
lead to singularities or undefined behaviors.
# Arguments
- `x`: An array of numeric values.
- `rng`: A random number generator instance.
# Examples
```julia
x = [0, 1, 0, 2]
rng = MersenneTwister(123)
jiggle(x, rng)
```
"""
function jiggle(x, rng::AbstractRNG)
return map(c -> c == 0 ? _eltype(c)(jiggle(rng)) : c, x)
end
"""
_srcdst(e)
Extract the source and destination identifiers from an edge structure `e`. This function
is designed to be used internally within graph-related algorithms where edges need to
be decomposed into their constituent nodes.
# Arguments
- `e`: An edge data structure containing `src` and `dst` fields.
# Examples
```julia
e = (src=1, dst=2)
_srcdst(e)
```
"""
function _srcdst(e)
return e.src, e.dst
end
"""
_srcdst(e::Tuple)
A variant of `_srcdst` that directly returns the tuple `e`, assuming it represents an edge
with source and destination values. This overload is useful when edges are represented
simply as tuples, without any encapsulating structure.
# Arguments
- `e`: A tuple representing an edge, where the first element is the source and the second
element is the destination.
# Examples
```julia
e = (1, 2)
_srcdst(e)
```
"""
function _srcdst(e::Tuple)
return e
end
mutable struct CoolingStepper{T <: Real}
alpha::T
alpha_min::T
alpha_decay::T
alpha_target::T
end
function step!(stepper::CoolingStepper)
# convert this code
# alpha += (alphaTarget - alpha) * alphaDecay;
if (stepper.alpha <= stepper.alpha_min) && stepper.alpha_target < stepper.alpha
return zero(typeof(stepper.alpha))
else
stepper.alpha += (stepper.alpha_target - stepper.alpha) * stepper.alpha_decay
return stepper.alpha
end
end
"""
A model of the cooling step in d3-force.
The stepper allows dynamic retargeting of the cooling factor, which is useful
in simulations where you want to adjust behavior for user interaction or for
incoming data.
Once the stepper has reached it's minimum value, it will return zero for all
subsequent steps.
Usage:
```julia
alpha = CoolingStepper()
for i=1:10
println(step!(alpha))
end
alpha.alpha_target = 0.5
for i=1:10
println(step!(alpha))
end
alpha.alpha_target = 0.0
for i=1:10
println(step!(alpha))
end
"""
function CoolingStepper(; alpha=1.0, alpha_min=0.001, alpha_decay=1 - alpha_min^(1/300), alpha_target=0.0)
return CoolingStepper(alpha, alpha_min, alpha_decay, alpha_target)
end
function _handle_node_values(nodes, x::Function)
return map(x, nodes)
end
function _handle_node_values(nodes, x::Real)
return ConstArray(x, (length(nodes),))
end
function _handle_node_values(nodes, x::Tuple)
return ConstArray(x, (length(nodes),))
end
function _handle_node_values(nodes, x::Union{AbstractArray,Dict})
return x
end
function _handle_link_values(edges, f::Function)
return map(x -> f(x[1],x[2], _srcdst(x[2])...), enumerate(edges))
end
function _handle_link_values(edges, x::Real)
return range(x, x, length=length(edges))
end
function _handle_link_values(edges, x::Union{AbstractArray,Dict})
return x
end
import Base.getindex, Base.size
struct ConstArray{T,N} <: AbstractArray{T,N}
val::T
shape::NTuple{N,Int}
end
getindex(c::ConstArray, i::Int...) = c.val
size(c::ConstArray) = c.shape
## Write a Base.show function for ConstArray
function Base.show(io::IO, c::ConstArray)
print(io, "ConstArray of shape ", c.shape, " with value ", c.val)
end
function updateloop(loop, scene)
screen = Makie.getscreen(scene)
task, close = taskloop(loop; delay = 1/screen.config.framerate)
#= waittask = @async begin
wait(screen)
close[] = true
wait(task[])
end =#
on(screen.window_open) do x
if x == false
println("Got false from window_open")
close[] = true
wait(task[])
end
end
return task, close
end
function taskloop(loop::Function; delay=1/60)
t0 = time()
taskref = Ref{Union{Nothing,Task}}(nothing)
should_close = Ref(false)
taskref[] = @async begin
while true
sleep(delay)
if loop(time() - t0) == false
should_close[] = true
end
should_close[] && break
end
should_close[] = false
end
#schedule(taskref[])
yield()
t = taskref[]
if !(t === nothing)
if istaskfailed(t)
rethrow(t)
else
push!(_tasklist, (taskref, should_close))
end
end
return taskref, should_close
end
_tasklist = []
function cleanup_tasks()
for (taskref, should_close) in _tasklist
if !(taskref[] === nothing)
if !istaskdone(taskref[])
should_close[] = true
end
wait(taskref[])
end
end
empty!(_tasklist)
return nothing
end
"""
Window(loop::Function, scene; [title="GraphPlayground", size=(800,800), kwargs...])
Create a window based on a scene. The window will run the provided `loop` function ever
frame. The loop function should take a single argument, which is the time since the window
was opened. This function is a fairly thin wrapper around GLMakie.Screen and GLMakie.display_scene!,
but makes it easier to abstract in the future.
## Parameters
- `loop`: A function that will be called every frame.
The function should take a single argument,
which is the time since the window was opened.
- `scene`: The scene to display in the window.
- `title`: The title of the window. Default is "GraphPlayground".
- `size`: The size of the window. Default is (800,800).
- `kwargs`: Additional keyword arguments to pass to the GLMakie.Screen constructor.
## Example
This example shows a bunch of points that are going to be pushed away from each other
in a simulation of a collision.
```julia
using GeometryBasics, GraphPlayground, GLMakie
scenesize = 500
n = 100
scene = Scene(camera=campixel!, size=(scenesize, scenesize))
pts = Observable((scenesize/2*rand(Point2f0, n)) .+ (scenesize/4)*Point2f(1,1))
radius = rand(10:20, n)
sim = ForceSimulation(pts[], eachindex(pts[]);
collide = CollisionForce(radius=radius .+ 2, iterations=3))
scatter!(scene, pts, markersize=pi*radius/1.11)
GraphPlayground.Window(scene;
title="Collision Simulation", size=(scenesize, scenesize),
focus_on_show = true) do _
step!(sim)
pts[] = sim.positions
end
```
"""
function Window(loop::Function, scene; title="GraphPlayground", size=(800,800), kwargs...)
screen = GLMakie.Screen(framerate=60.0, vsync=true, render_on_demand=false, title=title; kwargs...)
GLMakie.display_scene!(screen, scene)
on(loop, screen.render_tick)
return screen
end | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 249 | @testset "CenterForce" begin
pts = [Point2f(1,1), Point2f(3,3)]
sim = ForceSimulation(pts, eachindex(pts); center=CenterForce())
display(sim)
step!(sim)
@test sim.positions[1] ≈ Point2f(-1,-1)
@test sim.positions[2] ≈ Point2f(1,1)
end | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 18119 | @testset "d3-compare" begin
function initgrid()
#=
## Setup a demo to understand any differences from d3-force
using Graphs
g = grid([5,5])
foreach(edges(g)) do e
println(" links.push({source: ", e.src-1, ", target: ", e.dst-1, "});")
end
using GraphPlayground, GeometryBasics, GLMakie
sim = ForceSimulation(Point2f, vertices(g); )
foreach(1:nv(g)) do i
println(" nodes[$(i-1)].x = ", sim.positions[i][1], "; nodes[$(i-1)].y = ", sim.positions[i][2], ";")
end
# get the same info for julia
@show sim.positions
foreach(edges(g)) do e
print("Edge(", e.src, ", ", e.dst, "), ")
end
=#
# record the same positions...
pos = Point{2, Float32}[[0.47453555, 2.7263865], [1.4919398, 3.7412186], [0.96295494, 1.5611136], [0.6828322, 1.2432957], [3.2857077, 3.1859443], [2.7603805, 3.6410117], [2.2274709, 4.0740123], [1.2668705, 3.296721], [4.502468, 2.8802135], [1.3515131, 2.8808281], [4.6283107, 2.116446], [4.057611, 2.5923073], [4.4946713, 1.3253814], [3.113631, 2.373382], [0.3633141, 0.8325094], [4.2768054, 4.4309807], [2.838262, 3.7632565], [2.992505, 3.0753062], [3.5060213, 1.3894404], [0.336989, 4.3687134], [3.417117, 4.4206443], [4.4303355, 0.97814], [3.7498107, 4.703241], [2.1663282, 4.306034], [4.8099008, 3.191344]]
edgelist = [Edge(1, 2), Edge(1, 6), Edge(2, 3), Edge(2, 7), Edge(3, 4), Edge(3, 8), Edge(4, 5), Edge(4, 9), Edge(5, 10), Edge(6, 7), Edge(6, 11), Edge(7, 8), Edge(7, 12), Edge(8, 9), Edge(8, 13), Edge(9, 10), Edge(9, 14), Edge(10, 15), Edge(11, 12), Edge(11, 16), Edge(12, 13), Edge(12, 17), Edge(13, 14), Edge(13, 18), Edge(14, 15), Edge(14, 19), Edge(15, 20), Edge(16, 17), Edge(16, 21), Edge(17, 18), Edge(17, 22), Edge(18, 19), Edge(18, 23), Edge(19, 20), Edge(19, 24), Edge(20, 25), Edge(21, 22), Edge(22, 23), Edge(23, 24), Edge(24, 25)]
g = SimpleGraph(edgelist)
return g, pos
end
function test_sim(sim; d3pos, d3vel, steps=1)
for _ in 1:steps
step!(sim)
end
for i in sim.nodes
@testset "Node $i" begin
@test sim.positions[i] ≈ d3pos[i]
@test sim.velocities[i] ≈ d3vel[i]
end
end
end
@testset "linkforce" begin
@testset "case1" begin
g, pos = initgrid()
test_sim(
ForceSimulation(pos, vertices(g);
link=LinkForce(edges=edges(g), strength=1, distance=20, iterations=1),
);
d3pos = [Point2f(-5.966415475914804, -3.4499377025253772),
Point2f(8.299699348928659, 11.410562949114803),
Point2f(-4.762231255541469, -6.049689968864188),
Point2f(6.044201798355603, 11.142251206712338),
Point2f(-0.15250354553109213, -5.778486353853048),
Point2f(9.270730012878964, 9.920014398738353),
Point2f(-5.094467013216385, -2.7054723380993106),
Point2f(5.295379659245205, 11.543803363595138),
Point2f(2.5131667630815775, -6.599988471925734),
Point2f(2.758800465233053, 10.567724183438239),
Point2f(-0.6244904844509938, -5.150858513432233),
Point2f(10.240024837214303, 9.719775173985296),
Point2f(2.285841278954558, -7.22878356281036),
Point2f(4.521000197756237, 10.979081414379182),
Point2f(-1.280092648668831, -6.115930444896678),
Point2f(9.497627480224256, 10.499261966392726),
Point2f(-4.067895170205784, -2.9685826703702567),
Point2f(6.17134852170299, 11.756416185024266),
Point2f(2.8486740826745893, -6.2630509499238585),
Point2f(-0.37973260828469646, 10.98372783732616),
Point2f(-2.5847314531366745, 0.5715053784241357),
Point2f(11.060064179549517, 4.401351407064664),
Point2f(-0.306428132624299, -2.126140758444736),
Point2f(2.5578147551018096, 10.06675687471904),
Point2f(6.862114054115636, -1.8633657165571726), ],
d3vel = [Point2f(-6.440951025914804, -6.176324202525377),
Point2f(6.80775954892866, 7.6693443491148034),
Point2f(-5.725186195541469, -7.6108035688641875),
Point2f(5.361369598355603, 9.898955506712339),
Point2f(-3.4382112455310923, -8.964430653853048),
Point2f(6.510349512878963, 6.279002698738353),
Point2f(-7.321937913216385, -6.77948463809931),
Point2f(4.028509159245205, 8.247082363595139),
Point2f(-1.989301236918423, -9.480201971925734),
Point2f(1.4072873652330529, 7.6868960834382385),
Point2f(-5.252801184450994, -7.267304513432233),
Point2f(6.182413837214303, 7.127467873985296),
Point2f(-2.208830021045442, -8.55416496281036),
Point2f(1.407369197756237, 8.605699414379183),
Point2f(-1.643406748668831, -6.948439844896678),
Point2f(5.220822080224257, 6.0682812663927255),
Point2f(-6.906157170205783, -6.7318391703702565),
Point2f(3.17884352170299, 8.681109985024266),
Point2f(-0.6573472173254106, -7.652491349923858),
Point2f(-0.7167216082846964, 6.615014437326161),
Point2f(-6.001848453136675, -3.8491389215758645),
Point2f(6.629728679549516, 3.4232114070646635),
Point2f(-4.056238832624299, -6.829381758444736),
Point2f(0.39148655510180935, 5.760722874719041),
Point2f(2.0522132541156353, -5.054709716557173), ]
)
end
@testset "case2" begin
g, pos = initgrid()
test_sim(ForceSimulation(pos, vertices(g);
link=LinkForce(edges=edges(g), strength=1, distance=20, iterations=5),
);
d3pos = [Point2f(-3.3245472779234477, -0.7843089033085806),
Point2f(5.89859956589116, 8.392889013341104),
Point2f(-2.4658781577866535, -2.1313697174875537),
Point2f(4.737808042565102, 7.5104963790562245),
Point2f(0.7611659689559747, -2.927913901127913),
Point2f(6.938033992372833, 7.707128776348669),
Point2f(-2.484173743440433, -0.126291659919052),
Point2f(4.720176213450422, 8.461027183443802),
Point2f(2.9725219124196065, -3.23417012794705),
Point2f(2.420798206764892, 8.612888028451207),
Point2f(0.9620239912345947, -2.823296261341196),
Point2f(7.5149796138632, 7.248691496807689),
Point2f(2.8215513499059766, -3.924584293427451),
Point2f(4.094372070073858, 8.484592580609112),
Point2f(-0.7972557564349454, -3.957134388834164),
Point2f(7.626959446291536, 8.102347919038314),
Point2f(-1.488563914083152, -0.8969745040462369),
Point2f(5.323612380711253, 8.395951576673951),
Point2f(2.910725548462931, -3.6001605466554114),
Point2f(-0.07211785418647038, 9.250958587790667),
Point2f(-1.9568714047737412, 0.6392654840409153),
Point2f(9.726148805240133, 3.892548566925247),
Point2f(-0.3190380703550626, -1.6826771570745551),
Point2f(2.44102505981799, 9.745784001504429),
Point2f(6.89274320389123, -2.2043931122607106), ],
d3vel = [Point2f(-3.799082827923448, -3.5106954033085804),
Point2f(4.40665976589116, 4.651670413341105),
Point2f(-3.4288330977866535, -3.6924833174875538),
Point2f(4.054975842565102, 6.2672006790562245),
Point2f(-2.5245417310440255, -6.113858201127913),
Point2f(4.177653492372833, 4.066117076348669),
Point2f(-4.711644643440433, -4.200303959919052),
Point2f(3.453305713450422, 5.164306183443802),
Point2f(-1.5299460875803939, -6.11438362794705),
Point2f(1.0692851067648916, 5.732059928451207),
Point2f(-3.6662867087654054, -4.939742261341196),
Point2f(3.4573686138632, 4.656384196807689),
Point2f(-1.673119950094024, -5.249965693427451),
Point2f(0.9807410700738579, 6.111210580609112),
Point2f(-1.1605698564349454, -4.789643788834164),
Point2f(3.3501540462915362, 3.6713672190383133),
Point2f(-4.326825914083152, -4.660231004046237),
Point2f(2.3311073807112535, 5.320645376673952),
Point2f(-0.5952957515370693, -4.989600946655411),
Point2f(-0.40910685418647036, 4.882245187790668),
Point2f(-5.373988404773741, -3.781378815959085),
Point2f(5.295813305240132, 2.9144085669252466),
Point2f(-4.068848770355062, -6.385918157074555),
Point2f(0.2746968598179898, 5.4397500015044296),
Point2f(2.0828424038912297, -5.3957371122607105), ]
)
end
@testset "case3" begin
g, pos = initgrid()
test_sim(ForceSimulation(pos, vertices(g);
link=LinkForce(edges=edges(g), distance=20, iterations=4),
);
d3pos = [Point2f(-3.469165627949963, -0.8311708114229415),
Point2f(5.155566719810249, 8.13339025133682),
Point2f(-1.2993304184848737, -4.0943762353771245),
Point2f(-2.349521732045157, 6.29579313610429),
Point2f(8.798814969769918, 0.6701223547548789),
Point2f(6.601391914128325, 7.747288007024977),
Point2f(-2.3025123719537177, -0.6762224462987696),
Point2f(1.466321189145842, 8.912959768938837),
Point2f(9.161271882648279, -0.3305104367792544),
Point2f(-3.0051381694890624, 5.7388538717641335),
Point2f(2.0352650480855723, -3.1289773458921486),
Point2f(7.618991804495266, 7.390017618129205),
Point2f(5.513817081462501, -3.8746388123515607),
Point2f(-0.23308261509129435, 7.252276366839931),
Point2f(2.9326774064555297, -4.455268055783812),
Point2f(7.647268194290955, 8.770291191955938),
Point2f(-1.7609729452315208, -0.15043281242752427),
Point2f(3.4140460456464647, 9.418231518998795),
Point2f(5.57458130365433, -3.1658696489831506),
Point2f(-2.283910757866974, 8.00421311495209),
Point2f(-1.8395491719637649, 1.5901911596006282),
Point2f(10.928525425326406, 2.2512742180666376),
Point2f(1.309483434177864, -1.6673496841673776),
Point2f(0.23116667999740526, 9.516206620728173),
Point2f(8.538365724012154, -0.37811497113449066), ],
d3vel = [Point2f(-3.943701177949963, -3.5575573114229413),
Point2f(3.6636269198102496, 4.39217165133682),
Point2f(-2.2622853584848737, -5.655489835377124),
Point2f(-3.032353932045157, 5.05249743610429),
Point2f(5.513107269769917, -2.515821945245121),
Point2f(3.8410114141283245, 4.106276307024977),
Point2f(-4.529983271953718, -4.750234746298769),
Point2f(0.19945068914584213, 5.616238768938838),
Point2f(4.65880388264828, -3.2107239367792544),
Point2f(-4.3566512694890625, 2.858025771764134),
Point2f(-2.593045651914428, -5.245423345892148),
Point2f(3.561380804495267, 4.797710318129205),
Point2f(1.019145781462501, -5.200020212351561),
Point2f(-3.346713615091294, 4.878894366839932),
Point2f(2.5693633064555295, -5.287777455783812),
Point2f(3.370462794290956, 4.339310491955938),
Point2f(-4.599234945231521, -3.913689312427524),
Point2f(0.4215410456464649, 6.3429253189987955),
Point2f(2.06856000365433, -4.55531004898315),
Point2f(-2.620899757866974, 3.6354997149520907),
Point2f(-5.256666171963765, -2.830453140399372),
Point2f(6.498189925326407, 1.2731342180666376),
Point2f(-2.4403272658221358, -6.370590684167378),
Point2f(-1.935161520002595, 5.210172620728174),
Point2f(3.7284649240121532, -3.5694589711344906), ]
)
end
@testset "case4 - 1 step" begin
g, pos = initgrid()
test_sim(ForceSimulation(pos, vertices(g);
link=LinkForce(edges=edges(g), iterations=3),
);
steps = 1,
d3pos = [Point2f(-5.8359510532756955, -3.119281922553579),
Point2f(7.346168702857163, 10.483206742537758),
Point2f(-3.6637919611674596, -5.729130857754015),
Point2f(1.5658120709830377, 10.941740327116442),
Point2f(4.776020025677243, -6.527950125320289),
Point2f(8.813826920914746, 9.769968985251495),
Point2f(-4.358556690468158, -2.446211938631399),
Point2f(3.623066793746944, 11.57242033382742),
Point2f(7.000590334462917, -5.709398235855176),
Point2f(-1.191422237328955, 10.842982013997949),
Point2f(-0.1109326616594597, -5.235487748063701),
Point2f(9.758770838137085, 9.291075200283164),
Point2f(4.060375612021622, -6.932583396662638),
Point2f(1.6049082935258931, 10.892083706794224),
Point2f(0.913016360605754, -7.303364633445146),
Point2f(9.515001570313835, 11.08172234497355),
Point2f(-3.8107953564406194, -2.2883226034575213),
Point2f(4.8076753379165655, 12.02618839808806),
Point2f(4.951302927379738, -6.506593776595333),
Point2f(-2.444687925150272, 11.596091307751593),
Point2f(-4.140681119350706, -0.7902674652910386),
Point2f(12.980145689828193, 4.93750849011238),
Point2f(-0.15567559195371095, -3.9874126159826826),
Point2f(0.7426377273241456, 12.464699299266723),
Point2f(9.249342228034662, -3.8839427460602156), ],
d3vel = [Point2f(-6.310486603275695, -5.845668422553579),
Point2f(5.854228902857163, 6.741988142537759),
Point2f(-4.62674690116746, -7.290244457754015),
Point2f(0.8829798709830377, 9.69844462711644),
Point2f(1.490312325677243, -9.713894425320289),
Point2f(6.053446420914747, 6.128957285251496),
Point2f(-6.586027590468158, -6.520224238631399),
Point2f(2.356196293746944, 8.27569933382742),
Point2f(2.4981223344629164, -8.589611735855176),
Point2f(-2.542935337328955, 7.962153913997948),
Point2f(-4.73924336165946, -7.351933748063701),
Point2f(5.7011598381370865, 6.698767900283165),
Point2f(-0.4342956879783777, -8.257964796662637),
Point2f(-1.5087227064741067, 8.518701706794225),
Point2f(0.549702260605754, -8.135874033445146),
Point2f(5.238196170313834, 6.650741644973548),
Point2f(-6.649057356440619, -6.051579103457521),
Point2f(1.815170337916566, 8.95088219808806),
Point2f(1.4452816273797386, -7.896034176595332),
Point2f(-2.781676925150272, 7.227377907751594),
Point2f(-7.557798119350706, -5.210911765291039),
Point2f(8.549810189828193, 3.9593684901123805),
Point2f(-3.9054862919537108, -8.690653615982683),
Point2f(-1.4236904726758546, 8.158665299266723),
Point2f(4.4394414280346615, -7.075286746060216), ]
)
end
@testset "case4 - 2 steps" begin
g, pos = initgrid()
test_sim(ForceSimulation(pos, vertices(g);
link=LinkForce(edges=edges(g), iterations=3),
);
steps = 2,
d3pos = [Point2f(-8.035262426693496, -5.086444454491533),
Point2f(9.578850557569424, 13.009344002074151),
Point2f(-5.558572925172668, -8.39392588430575),
Point2f(2.199342159075976, 14.922627168368077),
Point2f(5.250392185033552, -10.058968947153124),
Point2f(11.25059105350537, 12.26054552551814),
Point2f(-7.216081702104267, -5.281799605697552),
Point2f(4.812787917025695, 15.287396220283615),
Point2f(8.09728335159283, -9.385873026097858),
Point2f(-2.2441645978405864, 14.094103950583936),
Point2f(-2.1264223253763976, -8.409855121754266),
Point2f(12.149724007389986, 12.234230018018746),
Point2f(3.8393631564741177, -10.416999463273301),
Point2f(0.8879504160193351, 14.65971157026279),
Point2f(1.0896295401521556, -10.84288108747732),
Point2f(11.44395254829974, 13.668547178184646),
Point2f(-6.4795824453106405, -4.879744161173191),
Point2f(5.641092258328568, 15.740997321568448),
Point2f(5.52761848408907, -9.651621328730918),
Point2f(-3.5007479995621136, 14.451524274915188),
Point2f(-7.0935008719705905, -3.016610488836278),
Point2f(16.584429982758458, 6.583122138347939),
Point2f(-2.2478697062796473, -8.295144155822396),
Point2f(0.18607388009603087, 16.054572269798843),
Point2f(11.163976235782538, -6.834782987487397), ],
d3vel = [Point2f(-2.1993113734178005, -1.9671625319379533),
Point2f(2.2326818547122618, 2.5261372595363927),
Point2f(-1.8947809640052087, -2.6647950265517344),
Point2f(0.6335300880929383, 3.9808868412516345),
Point2f(0.47437215935630833, -3.5310188218328356),
Point2f(2.436764132590625, 2.4905765402666438),
Point2f(-2.8575250116361084, -2.8355876670661533),
Point2f(1.1897211232787508, 3.7149758864561937),
Point2f(1.0966930171299136, -3.6764747902426835),
Point2f(-1.0527423605116313, 3.2511219365859865),
Point2f(-2.015489663716938, -3.1743673736905644),
Point2f(2.3909531692529016, 2.9431548177355817),
Point2f(-0.2210124555475047, -3.4844160666106636),
Point2f(-0.716957877506558, 3.767627863468565),
Point2f(0.17661317954640174, -3.539516454032174),
Point2f(1.9289509779859042, 2.586824833211096),
Point2f(-2.6687870888700216, -2.5914215577156696),
Point2f(0.8334169204120019, 3.714808923480389),
Point2f(0.5763155567093314, -3.145027552135586),
Point2f(-1.0560600744118416, 2.855432967163596),
Point2f(-2.9528197526198845, -2.2263430235452395),
Point2f(3.6042842929302665, 1.645613648235559),
Point2f(-2.0921941143259364, -4.307731539839713),
Point2f(-0.5565638472281147, 3.5898729705321197),
Point2f(1.9146340077478763, -2.9508402414271813), ]
)
end
end
end | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 1217 | # run example codes
@testset "Examples" begin
@testset "README.md" begin
using Graphs, GraphPlayground, GeometryBasics, GLMakie
@testset "playground" begin
g = smallgraph(:karate)
p = playground(g)
display(p.sim)
GLMakie.closeall()
end
p = playground(g;
link_options=(;distance=25),
charge_options=(;strength=-100))
GLMakie.closeall()
g = grid([100,100]) # make a 20x20 grid from Graphs
p = playground(g,
ForceSimulation(Point2f, vertices(g);
link=LinkForce(;edges=edges(g), iterations=10, distance=0.5, strength=1),
charge=ManyBodyForce(;strength=-1),
center=PositionForce(target=Point2f(300,300)));
graphplot_options = (;node_size=[2 for _ in 1:nv(g)], edge_width=[1.0 for _ in 1:ne(g)]))
display(p)
p.sim.alpha.alpha_target = 0.5 # keep the simulation hot for a while
GLMakie.closeall()
end
@testset "examples" begin
@testset "sbm-test" begin
include("../examples/sbm-test.jl")
GLMakie.closeall()
end
@testset "mouse-pointer-collision" begin
include("../examples/mouse-pointer-collision.jl")
GLMakie.closeall()
end
end
end | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 765 | @testset "LinkForce tests" begin
nodes = [1,2]
edgelist = [(1, 2)]
positions = [Point2f(0.0, 0.0), Point2f(10.0, 0.0)]
sim = ForceSimulation(positions, nodes;
link=LinkForce(edges=edgelist, strength=10, distance=1.0, bias=0.5))
iforce = sim.forces.link
GraphPlayground.force!(0.1, sim, iforce) # Assume alpha=0.1
@test sim.velocities[1][1] > 0 # Node 1 velocity should decrease (move left)
@test sim.velocities[2][1] < 0 # Node 2 velocity should increase (move right)
@test sim.velocities[1][2] != 0 # we dont' have zero velocity, because we've jiggled it
@test sim.velocities[2][2] != 0
@test sim.velocities[1][2] <= 10*eps(eltype(sim.velocities[1])) #
@test sim.velocities[2][2] <= 10*eps(eltype(sim.velocities[1])) #
end
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 1522 | function simpleforces(pts, vel2;
strength=-30.0,
min_distance2 = 1.0,
max_distance2 = Inf,
alpha=1.0)
function _compute_force(pt1, pt2, strength)
d = pt2 .- pt1
d2 = GraphPlayground.dot(d, d) # use dot from GraphPlayground
if d2 < max_distance2
#d = jiggle(d, rng)
d2 = GraphPlayground.dot(d, d) # use dot from GraphPlayground
if d2 < min_distance2
d2 = sqrt(min_distance2*d2)
end
w = strength*alpha / d2
return d .* w
else
return 0.0 .* pt1
end
end
for i in eachindex(pts)
targetpt = pts[i]
f = 0.0 .* targetpt
for j in eachindex(pts)
if i != j
f = f .+ _compute_force(targetpt, pts[j], strength)
end
end
vel2[i] = f
end
end
function approxforces(pts)
vel = 0 .* pts
strengths = map(pt -> Float32(-30), pts)
rng = StableRNG(1)
theta2 = Float32(0.81)
max_distance2 = Float32(Inf)
min_distance2 = Float32(1.0)
GraphPlayground.manybodyforce!(Float32(1.0), pts, pts, vel, strengths, min_distance2, max_distance2, theta2, rng)
return vel
end
function test_simpleforces(npts, approxfun, rtol; kwargs...)
pts = rand(StableRNG(1), Point2f, npts)
vel2 = approxfun(pts)
vel = similar(pts)
simpleforces(pts, vel; kwargs...)
return isapprox(vel, vel2; rtol=rtol)
end
@testset "simpleforces" begin
@test test_simpleforces(100, approxforces, 0.025)
@test test_simpleforces(250, approxforces, 0.025)
@test test_simpleforces(500, approxforces, 0.025)
end | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 189 | @testset "PositionForce" begin
sim = ForceSimulation(1:10, center=PositionForce())
for _ in 1:50
step!(sim)
end
@test all( x -> all(xi->abs(xi) <= 0.01, x), sim.positions)
end | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | code | 956 | using GraphPlayground
using Test
using Aqua
using JET
using StableRNGs
using GeometryBasics
using Graphs
##
#=@testset "dev" begin
using NearestNeighbors
# create a set of points that are on x axis from 0 to 1
n = 100
pts = [Point2f(i, 0) for i in range(start=1.0, stop=0.01, length=n)]
radii = range(start=1.0, stop=0.01, length=100)
p = randperm(StableRNG(0), 100)
pts = pts[p]
radii = radii[p]
T = KDTree(pts)
maxradius = GraphPlayground._build_tree_info_maxradius(T, pts, radii)
end=#
##
@testset "GraphPlayground.jl" begin
include("linkforce.jl")
include("manybodyforce.jl")
include("positionforce.jl")
include("centerforce.jl")
include("d3-compare.jl")
include("examples.jl")
@testset "Code quality (Aqua.jl)" begin
Aqua.test_all(GraphPlayground;
ambiguities = false
)
end
@testset "Code linting (JET.jl)" begin
JET.test_package(GraphPlayground; target_defined_modules = true)
end
end
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | docs | 4640 | # GraphPlayground.jl
[](https://dgleich.github.io/GraphPlayground.jl/stable/)
[](https://dgleich.github.io/GraphPlayground.jl/dev/)
[](https://github.com/dgleich/GraphPlayground.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/dgleich/GraphPlayground.jl)
[](https://github.com/JuliaTesting/Aqua.jl)
Directly play and manipulate your graphs in Julia! This is hugely inspired by the
[d3-force](https://d3js.org/d3-force) package. This includes a port of this
package to Julia to handle the force directed layout.
Usage
-----
Using it should be as simple as giving the graph (right now, this has to be a graph from `Graphs.jl`) you want to play with to the playground function.
```
using Graphs, GraphPlayground
g = smallgraph(:karate)
playground(g)
```
This should open an interactive window that will visualize the graph.

But for such a small graph, I find the following looks better.
```
playground(g;
link_options=(;distance=25),
charge_options=(;strength=-100))
```
Unfortunately, tweaking the options is a real thing with this package. Future plans include slightly better automated choices.
Grids or very highly spatial graphs require a little bit more fussing.
```
using Graphs, GraphPlayground, GeometryBasics
g = grid([100,100]) # make a 100x100 grid from Graphs
p = playground(g,
ForceSimulation(Point2f, vertices(g);
link=LinkForce(;edges=edges(g), iterations=10, distance=0.5, strength=1),
charge=ManyBodyForce(;strength=-1),
center=PositionForce(target=Point2f(300,300)));
graphplot_options = (;node_size=[2 for _ in 1:nv(g)], edge_width=[1.0 for _ in 1:ne(g)]))
display(p)
p.sim.alpha.alpha_target = 0.5 # keep the simulation hot for a while
```

This does a few things differently. The defaults are setup for graphs of around 100 nodes.
- The code explicitly creates a force simulation. This is what controls how the graph layout works.
- The force simulation is setup to have: link attractive forces, node/charge repulsion forces,
and centering forces.
- The LinkForce gives an edge-attraction forces have an ideal distance of 0.5,
the iterations parameter controls how many times we iterate over edges updating velocity vectors.
For grids, we want this to be large, but the default value of 1 is okay unless you have a ton of structure.
- The ManyBodyForce handles the node repulsion. strength=-1 is good if you want nodes close by, such as for
a large graph.
- The center force / Position force seeks to push all nodes to point (300, 300), this is roughly in the
middle of the display area.
- Because the graph is so big, we adjust the default sizes to 2 points for a node and 1 point for an edge.
- Finally, we want to keep the simulation "hot" so it doesn't decay too quickly. (Grids take a long time
to settle, so we setup the alpha_target to be 0.5.)
See the excellent documentation for d3-force for more information on the force simulation and some
of the parameters.
- [https://d3js.org/d3-force/link]
- [https://d3js.org/d3-force/many-body]
- [https://d3js.org/d3-force/position]
To get the positions of the nodes, just call
```
p = playground(g)
xy = p.sim.positions
```
This will return an array of node positions.
Important Note
--------------
Right now, GraphPlayground is setup with `GLMakie.jl` to create a window that continuously renders.
This has an odd side effect that it makes printing to the console slow. So you will
see this. Just close the GraphPlayground window and it'll go away.
Requirements
------------
- `GLMakie`: This is required to implement a window with a callback on each frame.
- `GraphMakie`: This is required for the graph plotting code
- `NearestNeighbors`: This is required for the balltree to implement the distant cell
approximation in the ManyBodyForce and CollisionForce
- `Graphs`: This is what we use to interact with GraphMakie
- `Colors`: I guess we need this to specify plot colors
- `Makie`: Just general Makie code as well.
Standard libraries
------------------
- `Random`: We use random data.
- `LinearAlgebra`: This is used for norms and dot products.
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | docs | 2245 | ```@meta
CurrentModule = GraphPlayground
```
# Forces
The [`ForceSimulation`](@ref) is the key dynamics implementation. It is hugely inspired
by the excellent `d3-force` library. It's setup for nice displays instead
of scientific accuracy. This includes things like
- randomly jiggles to avoid singularities.
- graceful fallbacks and approximations.
## How to give information about the data or graph to influence the forces
[`LinkForce`](@ref) takes information about edges along with a number of
additional optional weights. If you wish to specify them yourself you can
provide
- *a constant*. This constant is used for all edges
- *an array*. The array needs to have the same order as the edge input array.
- *a function*. The function is computed once and turned into the array by calling
the function for each edge. The function must take the following arguments:
`(i,e,src,dst)` where
This is called with
- `i`: The index of the edge in the edges array.
- `e`: The edge structure.
- `src`: The source node.
- `dst`: The destination node.
You can use this interface for the `distance`, `strength`, and `bias`.
[`ManyBodyForce`](@ref) takes in a `strength` argument that determines
the impact on each node. If this is positive, the effect
is attractive to the node. If it is negative, the effect is repulsive
from the node. As before, this can be
- *a constant*. This constant is used for all nodes
- *an array*. The array needs to have the same order as the node input array.
- *a function*. The function is computed once and turned into the array by calling
the function for each node. The function is actually just a `map` over the
`nodes` input to the force simulation, as in `map(f, nodes)`.
[`PositionForce`](@ref) takes in a `target` argument that determines
the target postion for each node.
- *a constant*. This constant position is used for all nodes
- *an array*. The array of positions which needs to have the same order
as the node input array.
- *a function*. The function is computed once and turned into the array by calling
the function for each node. The function is actually just a `map` over the
`nodes` input to the force simulation, as in `map(f, nodes)`.
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | docs | 384 | ```@meta
CurrentModule = GraphPlayground
```
# GraphPlayground
There are two main components to [GraphPlayground](https://github.com/dgleich/GraphPlayground.jl).
- A "easy-to-use" way to play around with a `Graphs.jl` graph
with an interactive window.
- An underlying [`ForceSimulation`](@ref) that handles the dynamics.
See the pages in the sidebar for more information.
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | docs | 327 | # Library
## User Friendly Interfaces
```@docs
playground
```
## Force Simulator Interface
```@docs
ForceSimulation
fixnode!
freenode!
step!
LinkForce
ManyBodyForce
CollisionForce
PositionForce
CenterForce
GraphPlayground.CoolingStepper
```
## Extra window help
```@docs
GraphPlayground.Window
```
## Index
```@index
``` | GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT",
"ISC"
] | 0.1.2 | 3df83f6cc7510605ca33ce6e9a2b0f5b60bce115 | docs | 5784 | ```@meta
CurrentModule = GraphPlayground
```
# Mouse Pointer Collsion Demo
This is a port of the
[mouse pointer repulsion demo from the d3-force library](https://d3js.org/d3-force/collide) to Julia as an example of how the library works.
## Required packages
```
using GraphPlayground, StableRNGs, GeometryBasics, GLMakie
```
We use GeometryBasics for the `Point2f` type.
## Setup the nodes.
There are going to be `n+1` nodes in our simulation. `n` for each ball
and `1` for the mouse pointer. These parameters come from the d3 demo.
In this case, we allocate one extra node and set it's radius to 1.
This is going to represent the mouse pointer.
```
rng = StableRNG(1)
nballs = 200
nnodes = nballs + 1
width = 564
k = width/nnodes
radiusdist = k:4k
radius = rand(rng, radiusdist, nnodes )
radius[end] = 1
```
**Generate initial positions**
We generate random initial positions without concern for any collisions
or overlaps, etc.
```
pos = [Point2f0(rand(rng, 0:width), rand(rng, 0:width)) for _ in 1:nnodes]
pos = pos .- sum(pos) / length(pos)
```
## Setup the simulation
We setup the [`ForceSimulation`](@ref) now. This is going
to have a centering force to keep everything at (0,0).
We are going to model collisions for all of the nodes, except
with the radius grown by 1 so that they shouldn't look like
they are touching.
Finally, we need to setup the repulsion for the mouse pointer.
This is done by setting strength for each node to `0` except
for the last node. For this one we model a strong repulsive
force by setting strength to `-width*2/3` (recall that
negative strength corresponds to repulsion).
The last thing we change in this simulation is the
`alpha` option. (Maybe I need a better parameter name.)
This controls the simulation "cooling",
or how we want to force the simulation to settle even if
it might not want to settle. In this case, we want to keep
the simulation fairly "hot", which means we set a target
value of alpha to be `0.3`.
Finally, to mirror the dynamics of the _d3-force_ example,
we set the velocity decay to `0.9`.
```
sim = ForceSimulation(
pos, # the starting list of positions
eachindex(pos); # the list of nodes, it's just all the indices.
position=PositionForce(;strength=0.01), # a centering force
collide=CollisionForce(;radius=radius.+1,iterations=3), # the collision force
charge=ManyBodyForce(strength=(i) -> i==nnodes ? -width*2/3 : 0.0, theta2=0.82),
# this creates a strong repulsion from the mouse pointer (which is the
# last node)
alpha=GraphPlayground.CoolingStepper(alpha_target=0.3),
velocity_decay=0.9,)
```
A few notes, as pointed out in a few places, the _names_ of each
force do not matter. We simply treat them as a list. The names
are meant to help you, the user understand what you are doing or
communicate with others. For instance, the following is also fine.
(But really, don't do this... )
```
sim = ForceSimulation(
pos, # the starting list of positions
eachindex(pos); # the list of nodes, it's just all the indices.
gamma=PositionForce(;strength=0.01), # a centering force
delta=CollisionForce(;radius=radius.+1,iterations=3), # the collision force
theta=ManyBodyForce(strength=(i) -> i==nnodes ? -width*2/3 : 0.0, theta2=0.82),
# this creates a strong repulsion from the mouse pointer (which is the
# last node)
alpha=GraphPlayground.CoolingStepper(alpha_target=0.3),
velocity_decay=0.9)
```
## Linking the simulation to a Makie window and keeping it updated.
Setting up the scene. We first setup a Makie scene to display the
dots. This is "standard" dynamic Makie. In this case, we create
a scene. Then we create an Observable array based on the
random initial positions. The idea is that we can update
the positions based on the simulation. We setup the plot
with a simple scatter. There is a weird scaling to get
the radius draw the same way. This was determined by trial
and error to get a radius of 10 to look correct.
Each ball will have a small stroke as well. (This is why
we need the extra 1 pixel of width in the collision force.)
```
s = Scene(camera = campixel!, size = (width, width))
pos = Observable(sim.positions .+ Point2f0(width/2, width/2))
scatter!(s,
pos,
markersize=pi*radius/1.11, # weird scaling to get size right
markerspace=:pixel,
color=:black,
strokewidth=0.5,
strokecolor=:white,
)
```
Now, the heart of this is setting up an update loop. There seems
to be no good way to do this in Makie without create a Screen yourself.
So we setup a `Window` function to make it easier. The `Window` function
takes an update function that gets run every frame. (For the Makie
afficionados, this is mapped to the `on(screen.render_tick)` function.)
```
function update(_) # we don't use the argument
mp = mouseposition(s) # get the mouse position
fixnode!(sim, nnodes, mp .- Point2f0(width/2, width/2)) # fix the last node to the mouse pointer
step!(sim) # take a step in the simulation
pos[] = sim.positions .+ Point2f0(width/2, width/2) # update the positions
end
GraphPlayground.Window(update, s;
title="Mouse Pointer Repulsion Demo")
```
Of course, Julia supports the slick equivalent syntax to make this easier to write:
```
GraphPlayground.Window(s; title="Mouse Pointer Repulsion Demo") do _
mp = mouseposition(s) # get the mouse position
@show mp
fixnode!(sim, nnodes, mp .- Point2f0(width/2, width/2)) # fix the last node to the mouse pointer
step!(sim) # take a step in the simulation
pos[] = sim.positions .+ Point2f0(width/2, width/2) # update the positions
end
```
And we have our
We want the simulation to advance at a regular rate. For this reason,
we created a Window type that creates a `GLMakie` window with a regular
update
```
Window()
| GraphPlayground | https://github.com/dgleich/GraphPlayground.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 1578 | using Documenter
using Cropbox
makedocs(
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true",
canonical = "https://cropbox.github.io/Cropbox.jl/stable/",
assets = ["assets/favicon.ico"],
analytics = "UA-192782823-1",
),
sitename = "Cropbox.jl",
pages = [
"Introduction" => [
"Cropbox" => "index.md",
"Installation" => "installation.md"
],
"Tutorials" => [
"Getting started with Julia" => "tutorials/julia.md",
"Getting started with Cropbox" => "tutorials/cropbox.md",
"Making a model" => "tutorials/makingamodel.md",
"Using an existing model" => "tutorials/usingamodel.md",
],
"Manual" => [
"System" => "guide/system.md",
"Variable" => "guide/variable.md",
"Configuration" => "guide/configuration.md",
"Simulation" => "guide/simulation.md",
"Visualization" => "guide/visualization.md",
"Inspection" => "guide/inspection.md",
],
"Gallery" => "gallery.md",
"Reference" => [
"Index" => "reference/index.md",
"Declaration" => "reference/declaration.md",
"Simulation" => "reference/simulation.md",
"Visualization" => "reference/visualization.md",
"Inspection" => "reference/inspection.md",
],
"Frequently Asked Questions" => "faq.md"
]
)
deploydocs(
repo = "github.com/cropbox/Cropbox.jl.git",
devbranch = "main",
)
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 4891 | #!/usr/bin/julia
__precompile__(true)
module FunctionWrappers
# Used to bypass NULL check
if VERSION >= v"1.6.0-DEV.663"
@inline function assume(v::Bool)
Base.llvmcall(
("""
declare void @llvm.assume(i1)
define void @fw_assume(i8)
{
%v = trunc i8 %0 to i1
call void @llvm.assume(i1 %v)
ret void
}
""", "fw_assume"), Cvoid, Tuple{Bool}, v)
end
else
@inline function assume(v::Bool)
Base.llvmcall(("declare void @llvm.assume(i1)",
"""
%v = trunc i8 %0 to i1
call void @llvm.assume(i1 %v)
ret void
"""), Cvoid, Tuple{Bool}, v)
end
end
#HACK: avoid strange LocalScan error on Julia 1.5, 1.7
# https://github.com/yuyichao/FunctionWrappers.jl/issues/17
#HACK: avoid cfunction return type error on Julia 1.8
# https://github.com/yuyichao/FunctionWrappers.jl/issues/25
Base.@pure pass_by_value(T) = isbitstype(T)
Base.@pure is_singleton(@nospecialize(T)) = isdefined(T, :instance)
# Base.@pure get_instance(@nospecialize(T)) = Base.getfield(T, :instance)
@inline function convert_ret(::Type{Ret}, ret) where Ret
# Only treat `Cvoid` as ignoring return value.
# Treating all singleton as ignoring return value is also possible as shown in the
# commented out implementation but it doesn't seem necessary.
# The stricter rule may help catching errors and can be more easily changed later.
Ret === Cvoid && return
# is_singleton(Ret) && return get_instance(Ret)
return convert(Ret, ret)
end
# Call wrapper since `cfunction` does not support non-function
# or closures
struct CallWrapper{Ret} <: Function end
(::CallWrapper{Ret})(f, args...) where Ret = convert_ret(Ret, f(args...))
# Specialized wrapper for
for nargs in 0:128
@eval (::CallWrapper{Ret})(f, $((Symbol("arg", i) for i in 1:nargs)...)) where {Ret} =
convert_ret(Ret, f($((Symbol("arg", i) for i in 1:nargs)...)))
end
# Convert return type and generates cfunction signatures
Base.@pure map_rettype(T) =
(pass_by_value(T) || T === Any || is_singleton(T)) ? T : Ref{T}
Base.@pure function map_cfunc_argtype(T)
if is_singleton(T)
return Ref{T}
end
return (pass_by_value(T) || T === Any) ? T : Ref{T}
end
Base.@pure function map_argtype(T)
if is_singleton(T)
return Any
end
return (pass_by_value(T) || T === Any) ? T : Any
end
Base.@pure get_cfunc_argtype(Obj, Args) =
Tuple{Ref{Obj}, (map_cfunc_argtype(Arg) for Arg in Args.parameters)...}
if isdefined(Base, Symbol("@cfunction"))
@generated function gen_fptr(::Type{Ret}, ::Type{Args}, ::Type{objT}) where {Ret,Args,objT}
quote
@cfunction($(CallWrapper{Ret}()), $(map_rettype(Ret)),
($(get_cfunc_argtype(objT, Args).parameters...),))
end
end
else
function gen_fptr(::Type{Ret}, ::Type{Args}, ::Type{objT}) where {Ret,Args,objT}
cfunction(CallWrapper{Ret}(), map_rettype(Ret), get_cfunc_argtype(objT, Args))
end
end
mutable struct FunctionWrapper{Ret,Args<:Tuple}
ptr::Ptr{Cvoid}
objptr::Ptr{Cvoid}
obj
objT
function (::Type{FunctionWrapper{Ret,Args}})(obj::objT) where {Ret,Args,objT}
objref = Base.cconvert(Ref{objT}, obj)
new{Ret,Args}(gen_fptr(Ret, Args, objT),
Base.unsafe_convert(Ref{objT}, objref), objref, objT)
end
(::Type{FunctionWrapper{Ret,Args}})(obj::FunctionWrapper{Ret,Args}) where {Ret,Args} = obj
end
Base.convert(::Type{T}, obj) where {T<:FunctionWrapper} = T(obj)
Base.convert(::Type{T}, obj::T) where {T<:FunctionWrapper} = obj
@noinline function reinit_wrapper(f::FunctionWrapper{Ret,Args}) where {Ret,Args}
objref = f.obj
objT = f.objT
ptr = gen_fptr(Ret, Args, objT)::Ptr{Cvoid}
f.ptr = ptr
f.objptr = Base.unsafe_convert(Ref{objT}, objref)
return ptr
end
@generated function do_ccall(f::FunctionWrapper{Ret,Args}, args) where {Ret,Args}
# Has to be generated since the arguments type of `ccall` does not allow
# anything other than tuple (i.e. `@pure` function doesn't work).
quote
$(Expr(:meta, :inline))
ptr = f.ptr
if ptr == C_NULL
# For precompile support
ptr = reinit_wrapper(f)
end
assume(ptr != C_NULL)
objptr = f.objptr
ccall(ptr, $(map_rettype(Ret)),
(Ptr{Cvoid}, $((map_argtype(Arg) for Arg in Args.parameters)...)),
objptr, $((:(convert($(Args.parameters[i]), args[$i]))
for i in 1:length(Args.parameters))...))
end
end
@inline (f::FunctionWrapper)(args...) = do_ccall(f, args)
# Testing only
const identityAnyAny = FunctionWrapper{Any,Tuple{Any}}(identity)
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 910 | """
Cropbox
Declarative crop modeling framework. https://github.com/cropbox/Cropbox.jl
See also: [`@system`](@ref), [`@config`](@ref), [`simulate`](@ref), [`evaluate`](@ref), [`calibrate`](@ref), [`visualize`](@ref), [`manipulate`](@ref)
"""
module Cropbox
include("system.jl")
include("unit.jl")
include("random.jl")
include("graph.jl")
include("macro.jl")
include("state.jl")
include("bundle.jl")
include("config.jl")
include("system/clock.jl")
include("system/context.jl")
include("system/controller.jl")
include("system/calendar.jl")
include("system/store.jl")
include("system/thermaltime.jl")
include("util/simulate.jl")
include("util/calibrate.jl")
include("util/evaluate.jl")
include("util/gather.jl")
include("util/color.jl")
include("util/dive.jl")
include("util/hierarchy.jl")
include("util/plot.jl")
include("util/visualize.jl")
include("util/manipulate.jl")
include("precompile.jl")
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 2221 | struct Bundle{S<:System,P,F}
produce::Produce{P}
recursive::Bool
filter::F
end
Bundle(p::Produce{P,V}, ops::AbstractString) where {P,V} = begin
recursive = false
filter = nothing
index = 0
for op in split(ops, "/")
if op == "*"
# collecting children only at the current level
recursive = false
elseif op == "**"
# collecting all children recursively
recursive = true
else
#TODO: support generic indexing function?
filter = op
end
end
S = eltype(p)
F = typeof(filter)
Bundle{S,P,F}(p, recursive, filter)
end
Base.collect(b::Bundle{S}) where {S<:System} = begin
p = getfield(b, :produce)
v = collect(p)
if getfield(b, :recursive)
l = S[]
#TODO: possibly reduce overhead by reusing calculated values in child nodes
g(V::Vector{<:System}) = for s in V; g(s) end
g(s::System) = (push!(l, s); g(value(getfield(s, p.name))))
g(::Nothing) = nothing
g(v)
else
l = copy(v)
end
f = getfield(b, :filter)
if !isnothing(f)
filter!(s -> value(s[f]), l)
end
l
end
Base.getindex(s::Produce, ops::AbstractString) = Bundle(s, ops)
struct Bunch{V}
it::Base.Generator
end
Base.iterate(b::Bunch, i...) = iterate(getfield(b, :it), i...)
Base.length(b::Bunch) = length(getfield(b, :it))
Base.eltype(::Type{<:Bunch{<:State{V}}}) where V = V
Base.getproperty(b::Bundle{S}, p::Symbol) where {S<:System} = (value(getfield(x, p)) for x in value(b)) |> Bunch{vartype(S, p)}
Base.getproperty(b::Bunch{S}, p::Symbol) where {S<:System} = (value(getfield(x, p)) for x in b) |> Bunch{vartype(S, p)}
Base.getindex(b::Bundle, i::AbstractString) = getproperty(b, Symbol(i))
Base.getindex(b::Bunch, i::AbstractString) = getproperty(b, Symbol(i))
value(b::Bundle) = collect(b)
#TODO: also make final value() based on generator, but then need sum(x; init=0) in Julia 1.6 for empty generator
#value(b::Bunch) = (value(v) for v in b)
value(b::Bunch) = collect(b)
Base.getindex(b::Bundle) = value(b)
Base.getindex(b::Bunch) = value(b)
Base.adjoint(b::Bundle) = value(b)
Base.adjoint(b::Bunch) = value(b)
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 14374 | using DataStructures: OrderedDict, DefaultOrderedDict
const _Config = OrderedDict{Symbol,Any}
"""
Config
Contains a set of configuration for systems. Configuration for a system contains parameter values.
# Examples
```julia-repl
julia> @config :S => :a => 1
Config for 1 system:
S
a = 1
```
See also: [`@config`](@ref)
"""
struct Config
config::_Config
Config(c...) = new(_Config(c...))
end
Base.getindex(c::Config, i) = c.config[i]
#HACK: handle :0
Base.getindex(c::Config, i::Int) = c.config[Symbol(i)]
Base.length(c::Config) = length(c.config)
Base.iterate(c::Config) = iterate(c.config)
Base.iterate(c::Config, i) = iterate(c.config, i)
Base.eltype(::Type{Config}) = Pair{Symbol,Any}
Base.get(c::Config, k, d) = get(c.config, k, d)
Base.haskey(c::Config, k) = haskey(c.config, k)
Base.:(==)(c::Config, d::Config) = c.config == d.config
Base.merge(f::Function, c::Config, D...) = merge(f, c.config, [d.config for d in D]...) |> Config
Base.show(io::IO, c::Config) = print(io, "<Config>")
Base.show(io::IO, ::MIME"text/plain", c::Config) = begin
n = length(c)
if n == 0
print(io, "Config empty")
elseif n == 1
println(io, "Config for 1 system:")
else
println(io, "Config for $n systems:")
end
f((s, C); color) = begin
b = IOBuffer()
x = IOContext(b, :color => color)
print(x, " ")
printstyled(x, s, color=:light_magenta)
K = keys(C)
l = isempty(K) ? 0 : maximum(length.(string.(K)))
for (k, v) in C
println(x)
print(x, " ")
printstyled(x, rpad(canonicalnamelabel(k), l), color=:light_blue)
printstyled(x, " = ", color=:light_black)
print(x, labelstring(v))
end
String(take!(b))
end
color = get(io, :color, false)
join(io, f.(c; color), '\n')
end
import Markdown
Base.show(io::IO, ::MIME"text/html", c::Config) = begin
n = length(c)
print(io, "<p style=\"font-family: monospace\">")
if n == 0
print(io, "Config empty")
elseif n == 1
print(io, "Config for 1 system:")
else
print(io, "Config for $n systems:")
end
println(io, "</p>")
f((s, C)) = begin
b = IOBuffer()
println(b, "<table style=\"font-family: monospace\">")
println(b, "<tr style=\"background-color: transparent\">")
println(b, "<td colspan=\"3\" style=\"text-align: left; padding: 2px; padding-left: 20px; color: rebeccapurple\">$s</th>")
println(b, "</tr>")
K = keys(C)
l = isempty(K) ? 0 : maximum(length.(string.(K)))
for (k, v) in C
print(b, "<tr style=\"background-color: transparent\">")
print(b, "<td style=\"text-align: left; padding: 2px; padding-left: 40px; color: royalblue\">$k</td>")
print(b, "<td style=\"text-align: center; padding: 2px 10px 2px 10px; color: gray\">=</td>")
print(b, "<td style=\"text-align: left; padding: 2px;\">$(Markdown.htmlesc(labelstring(v)))</td>")
println(b, "</tr>")
end
println(b, "</table>")
String(take!(b))
end
join(io, f.(c), '\n')
end
configure(c::Config) = c
configure(c::AbstractDict) = configure(c...)
configure(c::Pair) = _configure(c.first, c.second)
configure(c::Tuple) = configure(c...)
configure(c::Vector) = configure.(c)
configure(c...) = merge(merge, configure.(c)...)
configure(::Nothing) = configure()
configure(c) = error("unrecognized configuration: $c")
configure() = Config()
_configure(k::Symbol, v) = Config(k => _configure(v))
_configure(k::String, v) = begin
a = Symbol.(split(k, "."))
n = length(a)
if n == 2
Config(a[1] => _configure(a[2] => v))
elseif n == 1
Config(a[1] => _configure(v))
else
error("unrecognized configuration key string: $k")
end
end
_configure(S::Type{<:System}, l) = begin
P = filter!(istag(:parameter), [n.info for n in dependency(S).N])
K = map(v -> (v.name, v.alias), P) |> Iterators.flatten
U = fieldunits(S)
C = _configure(l)
C = map(collect(C)) do p
k, v = p
(k ∉ K) && error("unrecognized parameter: $S => $k => $v")
k => unitfy(v, U[k])
end |> OrderedDict
_configure(namefor(S), C)
end
_configure(k, v) = _configure(Symbol(k), v)
_configure(v) = _Config(v)
_configure(v::NamedTuple) = _Config(pairs(v))
parameterflatten(c::Config) = begin
l = OrderedDict()
for (s, d) in c
for (k, v) in d
l[(s, k)] = v
end
end
l
end
parameterkeys(c::Config) = collect(keys(parameterflatten(c)))
parametervalues(c::Config) = collect(values(parameterflatten(c)))
parameterunits(c::Config) = parameterunits(parameterkeys(c))
parameterunits(K) = [fieldunit(k, v) for (k, v) in K]
parameterzip(K, V) = parameterzip(K, V, repeat([nothing], length(V)))
parameterzip(K, V, U) = begin
l = DefaultOrderedDict(OrderedDict)
for ((s, k), v, u) in zip(K, V, U)
l[s][k] = unitfy(v, u)
end
configure(l)
end
codify(c::Config, p) = begin
p = Symbol(p)
K = [(p, Symbol(string(s, '.', k))) for (s, k) in parameterkeys(c)]
V = parametervalues(c)
U = parameterunits(c)
parameterzip(K, V, U)
end
decodify(c::Config, p) = begin
p = Symbol(p)
K = [Pair(Symbol.(split(string(k), "."))...) for k in keys(c[p])]
V = [v for v in values(c[p])]
U = parameterunits(K)
parameterzip(K, V, U)
end
#TODO: wait until TOML 0.5 gets support
# using TOML
# loadconfig(c::AbstractString) = configure(TOML.parse(c))
option(c::Config, s::Symbol, k::Symbol) = begin
v = get(c, s, missing)
ismissing(v) ? v : get(v, k, missing)
end
option(c::Config, S::Vector{Symbol}, k::Symbol) = option(c, S, [k])
option(c::Config, s::Symbol, K::Vector{Symbol}) = option(c, [s], K)
option(c::Config, S::Vector{Symbol}, K::Vector{Symbol}) = begin
v = missing
for (s, k) in Iterators.product(S, K)
#HACK: support private parameter (i.e. :S => :_a for S._a)
k = uncanonicalname(k, s)
v = option(c, s, k)
!ismissing(v) && break
end
v
end
using DataStructures: OrderedSet
"""
parameters(S; <keyword arguments>) -> Config
Extract a list of parameters defined for system `S`.
# Arguments
- `S::Type{<:System}`: type of system to be inspected.
# Keyword Arguments
- `alias=false`: show alias instead of parameter name.
- `recursive=false`: extract parameters from other systems declared in `S`.
- `exclude=()`: systems excluded in recurisve search.
- `scope=nothing`: evaluation scope; default is `S.name.module`.
# Examples
```julia-repl
julia> @system S(Controller) begin
a: aaa => 1 ~ preserve(parameter)
end;
julia> parameters(S)
Config for 1 system:
S
a = 1
julia> parameters(S; alias=true)
Config for 1 system:
S
aaa = 1
julia> parameters(S; recursive=true)
Config for 3 systems:
Clock
init = 0 hr
step = 1 hr
Context
S
a = 1
julia> parameters(S; recursive=true, exclude=(Context,))
Config for 1 system:
S
a = 1
```
"""
parameters(::Type{S}; alias=false, recursive=false, exclude=(), scope=nothing) where {S<:System} = begin
#HACK: default evaluation scope is the module where S was originally defined
isnothing(scope) && (scope = S.name.module)
V = [n.info for n in dependency(S).N]
P = filter(istag(:parameter), V)
key = alias ? (v -> isnothing(v.alias) ? v.name : v.alias) : (v -> v.name)
K = constsof(S) |> keys
# evaluate only if parameter has no dependency on other variables
val(v) = val(v, Val(isempty(v.args)))
val(v, ::Val{false}) = missing
val(v, ::Val{true}) = begin
@gensym CS
l = (:($k = $CS[$(Meta.quot(k))]) for k in K)
b = @eval scope let $CS = Cropbox.constsof($S), $(l...); $(v.body) end
u = @eval scope let $CS = Cropbox.constsof($S), $(l...); $(gettag(v, :unit)) end
unitfy(b, u)
end
C = configure(namefor(S) => ((key(v) => val(v) for v in P)...,))
if recursive
T = OrderedSet([@eval scope $(v.type) for v in V])
T = map(collect(T)) do t
#HACK: not working for dynamic type (i.e. eltype(Vector{<:System}) = Any)
et = eltype(t)
et <: System ? et : t <: System ? t : nothing
end
filter!(!isnothing, T)
filter!(t -> !any(t .<: exclude), T)
X = (S, T..., exclude...) |> Set
C = configure(parameters.(T; alias, recursive=true, exclude=X, scope)..., C)
end
C
end
"""
parameters(s; <keyword arguments>) -> Config
Extract a list of parameters from an existing instance of system `s`.
# Arguments
- `s::System`: instance of system to be inspected.
# Keyword Arguments
- `alias=false`: show alias instead of parameter name.
- `recursive=false`: extract parameters from other systems declared in `S`.
- `exclude=()`: systems excluded in recurisve search.
# Examples
```julia-repl
julia> @system S(Controller) begin
a: aaa => 1 ~ preserve(parameter)
end;
julia> s = instance(S; config = :S => :a => 2);
julia> parameters(s)
Config for 1 system:
S
a = 2.0
julia> parameters(s; alias = true)
Config for 1 system:
S
aaa = 2.0
julia> parameters(s; recursive = true)
Config for 3 systems:
Clock
init = 0.0 hr
step = 1.0 hr
Context
S
a = 2.0
julia> parameters(s; recursive = true, exclude = (Context,))
Config for 1 system:
S
a = 2.0
```
"""
parameters(s::S; alias=false, recursive=false, exclude=()) where {S<:System} = begin
V = [n.info for n in dependency(S).N]
P = filter(istag(:parameter), V)
key = alias ? (v -> isnothing(v.alias) ? v.name : v.alias) : (v -> v.name)
C = configure(namefor(S) => ((key(v) => s[v.name]' for v in P)...,))
if recursive
I = filter!(i -> i isa System && !any(isa.(i, exclude)), collect(s))
T = typefor.(I)
X = (S, T..., exclude...) |> Set
C = configure(parameters.(I; alias, recursive, exclude=X)..., C)
end
C
end
configmultiply(; base=()) = [configure(base)]
configmultiply(patches::Vector; base=()) = configmultiply(patches...; base)
configmultiply(patches...; base=()) = begin
C = configexpand(patches[1]; base)
for p in patches[2:end]
C = [configexpand(p; base=c) for c in C] |> Iterators.flatten |> collect
end
C
end
configexpand(patch; base=()) = begin
P = configure(patch)
configs = if isempty(P)
[]
else
s, C = only(P)
k, V = only(C)
#HACK: allow single patch (i.e. `0 => :a => 1` instead of `1:2`)
reshape([s => k => v for v in V], :)
end
configexpand(configs; base)
end
configexpand(configs::Vector; base=()) = configrebase(configs; base)
configrebase(configs::Vector; base=()) = isempty(configs) ? [configure(base)] : [configure(base, c) for c in configs]
configrebase(config; base=()) = configrebase([config]; base)
configreduce(a::Vector, b) = configure.(a, b)
configreduce(a, b::Vector) = configrebase(b; base=a)
configreduce(a::Vector, b::Vector) = configreduce.(a, b)
configreduce(a, b) = configure(a, b)
configreduce(a) = configure(a)
configreduce(a::Vector) = configure.(a)
using MacroTools: @capture
"""
@config c.. -> Config | Vector{Config}
Construct a set or multiple sets of configuration.
A basic unit of configuration for a system `S` is represented by a pair in the form of `S => pv`. System name `S` is expressed in a symbol. If actual type of system is used, its name will be automatically converted to a symbol.
A parameter name and corresponding value is then represented by another pair in the form of `p => v`. When specifiying multiple parameters, a tuple of pairs like `(p1 => v1, p2 => v2)` or a named tuple like `(p1 = v1, p2 = v2)` can be used. Parameter name must be a symbol and should indicate a variable declared with `parameter` tag as often used by `preserve` state variable. For example, `:S => (:a => 1, :b => 2)` has the same meaning as `S => (a = 1, b = 2)` in the same scope.
Configurations for multiple systems can be concatenated by a tuple. Multiple elements in `c` separated by commas implicitly forms a tuple. For example, `:S => (:a => 1, :b => 2), :T => :x => 1` represents a set of configuration for two systems `S` and `T` with some parameters. When the same names of system or variable appears again during concatenation, it will be overriden by later ones in an order appeared in a tuple. For example, `:S => :a => 1, :S => :a => 2` results into `:S => :a => 2`. Instead of commas, `+` operator can be used in a similar way as `(:S => :a => 1) + (:S => :a => 2)`. Note parentheses placed due to operator precedence.
When multiple sets of configurations are needed, as in `configs` for [`simulate`](@ref), a vector of `Config` is used. This macro supports some convenient ways to construct a vector by composing simpler configurations. Prefix operator `!` allows *expansion* of any iterable placed in the configuration value. Infix operator `*` allows *multiplication* of a vector of configurations with another vector or a single configuration to construct multiple sets of configurations. For example, `!(:S => :a => 1:2)` is expanded into two sets of separate configurations `[:S => :a => 1, :S => :a => 2]`. `(:S => :a => 1:2) * (:S => :b => 0)` is multiplied into `[:S => (a = 1, b = 0), :S => (a = 2, b = 0)]`.
# Examples
```julia-repl
julia> @config :S => (:a => 1, :b => 2)
Config for 1 system:
S
a = 1
b = 2
```
```julia-repl
julia> @config :S => :a => 1, :S => :a => 2
Config for 1 system:
S
a = 2
```
```julia-repl
julia> @config !(:S => :a => 1:2)
2-element Vector{Config}:
<Config>
<Config>
```
```julia-repl
julia> @config (:S => :a => 1:2) * (:S => :b => 0)
2-element Vector{Config}:
<Config>
<Config>
```
"""
macro config(ex)
@capture(ex, +(P__) | P__)
P = map(P) do p
if @capture(p, !x_)
:(Cropbox.configexpand($(esc(x))))
elseif @capture(p, *(x__))
:(Cropbox.configmultiply($(esc.(x)...)))
else
:(Cropbox.configreduce($(esc(p))))
end
end
reduce(P) do a, b
:(Cropbox.configreduce($a, $b))
end
end
macro config(ex, exs...)
:(Cropbox.@config(($(esc(ex)), $(esc.(exs)...))))
end
macro config()
:(Cropbox.@config(()))
end
export Config, parameters, @config
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 5876 | using Graphs: Graphs, DiGraph, add_edge!, add_vertex!, dst, edges, src
struct Dependency <: Graph
g::DiGraph
N::Vector{VarNode}
I::Dict{VarNode,Int}
M::Dict{Symbol,VarInfo}
end
dependency(M::Dict{Symbol,VarInfo}) = Dependency(DiGraph(), VarNode[], Dict{VarNode,VarInfo}(), M)
dependency(V::Vector{VarInfo}) = begin
M = Dict{Symbol,VarInfo}()
for v in V
for n in names(v)
M[n] = v
end
end
d = dependency(M)
add!(d, V)
d
end
dependency(::Type{S}) where {S<:System} = dependency(geninfos(S))
dependency(::S) where {S<:System} = dependency(S)
graph(d::Dependency) = d.g
node!(d::Dependency, n::VarNode) = begin
if !haskey(d.I, n)
add_vertex!(d.g)
push!(d.N, n)
d.I[n] = length(d.N)
#@show "new vertex at $(d.I[n])"
end
n
end
node!(d::Dependency, v::VarInfo, t::VarStep) = node!(d, VarNode(v, t))
node!(d::Dependency, v::Symbol, t::VarStep) = node!(d, VarNode(d.M[v], t))
prenode!(d::Dependency, v) = node!(d, v, PreStep())
mainnode!(d::Dependency, v) = node!(d, v, MainStep())
postnode!(d::Dependency, v) = node!(d, v, PostStep())
firstnode(d::Dependency, v::VarInfo) = begin
for t in (PreStep, MainStep, PostStep)
n = VarNode(v, t())
haskey(d.I, n) && return n
end
@error "no node found for $a"
end
extract(v::VarInfo; equation=true, tag=true, include=(), exclude=()) = begin
pick(a) = let k, v; extractfuncargdep(@capture(a, k_=v_) ? v : a) end
pack(A) = Iterators.flatten(filter!(!isnothing, pick.(A))) |> Tuple
eq = equation ? pack(v.args) : ()
#@show eq
#HACK: exclude internal tags (i.e. _type)
#HACK: filter included/excluded tags
#TODO: share logic with filterconstructortags() in macro?
tagfilter(t) = !startswith(String(t), "_") && (isempty(include) ? true : t ∈ include) && (isempty(exclude) ? true : t ∉ exclude)
par = tag ? Tuple(Iterators.flatten(filter!(!isnothing, [extractfuncargdep(p[2]) for p in v.tags if tagfilter(p[1])]))) : ()
#@show par
Set([eq..., par...]) |> collect
end
link!(d::Dependency, v::VarInfo, n::VarNode; kwargs...) = begin
A = extract(v; kwargs...)
#HACK: skip missing refs to allow const variable patch syntax (i.e. @system S{x=1})
V = [d.M[a] for a in A if haskey(d.M, a)]
for v0 in V
# self reference is handled within a single node
if v0 == v
continue
elseif v0.state == :Bisect
n0 = prenode!(d, v0)
link!(d, n0, n)
elseif isnothing(v0.state) && istag(v0, :context)
n1 = mainnode!(d, v0)
n2 = postnode!(d, v0)
link!(d, n1, n)
link!(d, n, n2)
else
n1 = mainnode!(d, v0)
link!(d, n1, n)
end
end
end
link!(d::Dependency, a::VarNode, b::VarNode) = begin
#@show "link: add edge $(a.info.name) ($(d.I[a])) => $(b.info.name) ($(d.I[b]))"
add_edge!(d.g, d.I[a], d.I[b])
end
add!(d::Dependency, v::VarInfo) = begin
#@show "add! $v"
if v.state == :Accumulate || v.state == :Capture
n1 = mainnode!(d, v)
n2 = postnode!(d, v)
link!(d, n1, n2)
# needs `time` tags update, but equation args should be excluded due to cyclic dependency
#HACK: support `when` tag while avoiding cyclic dependency
#TODO: more elegant way to handle tags include/exclude
link!(d, v, n1; equation=false, exclude=(:when,))
link!(d, v, n2; equation=false, include=(:when,))
link!(d, v, n2)
elseif v.state == :Bisect
n0 = prenode!(d, v)
n1 = mainnode!(d, v)
link!(d, n0, n1)
# needs `lower/upper` tags
link!(d, v, n0; equation=false)
link!(d, v, n1)
# needs access to context in Bisect constructor (otherwise convergence would fail)
c = mainnode!(d, :context)
link!(d, c, n0)
elseif v.state == :Produce
n0 = prenode!(d, v)
n1 = mainnode!(d, v)
n2 = postnode!(d, v)
link!(d, n0, n1)
link!(d, n1, n2)
# no tag available for produce, but just in case we need one later
link!(d, v, n0; equation=false)
link!(d, v, n2)
# make sure context get updated before updating subtree
c = mainnode!(d, :context)
link!(d, c, n1)
elseif isnothing(v.state) && istag(v, :context)
n0 = prenode!(d, v)
n1 = mainnode!(d, v)
n2 = postnode!(d, v)
link!(d, n0, n1)
link!(d, n1, n2)
link!(d, v, n0)
link!(d, v, n2)
else
n = mainnode!(d, v)
link!(d, v, n)
end
if istag(v, :parameter)
c = mainnode!(d, :config)
n = firstnode(d, v)
link!(d, c, n)
end
end
add!(d::Dependency, V::Vector{VarInfo}) = begin
for v in V
add!(d, v)
end
end
Base.sort(d::Dependency) = begin
C = Graphs.simplecycles(d.g)
!isempty(C) && error("no cyclic dependency allowed: $([[d.N[i].info.name for i in c] for c in C])")
J = Graphs.topological_sort_by_dfs(d.g)
[d.N[i] for i in J]
end
label(n::VarNode; alias=false) = begin
v = n.info
name = alias && !isnothing(v.alias) ? v.alias : v.name
name = string(name)
tag = string(n.step)
tag * name
end
labels(d::Dependency; kw...) = label.(d.N; kw...)
edgestyle(d::Dependency, a::VarNode, b::VarNode) = ""
edgestyles(d::Dependency; kw...) = Dict(
let a = src(e), b = dst(e)
(a, b) => edgestyle(d, d.N[a], d.N[b])
end for e in edges(d.g)
)
Base.show(io::IO, d::Dependency) = print(io, "Dependency")
Base.show(io::IO, ::MIME"text/plain", d::Dependency) = begin
color = get(io, :color, false)
VC = tokencolor(VarColor(); color)
MC = tokencolor(MiscColor(); color)
print(io, MC("["))
print(io, join(VC.(label.(sort(d))), MC(" → ")))
print(io, MC("]"))
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 1990 | abstract type Graph end
graph(g::Graph) = g
labels(g::Graph; kw...) = [] #error("labels() not defined for $x")
edgestyles(g::Graph; kw...) = Dict()
graphstyle(g::Graph; kw...) = begin
d1 = Dict(
:ratio => 0.5,
:ranksep => 0.2,
:margin => 0.03,
:fontname => "Courier",
:fontsize => 9,
:arrowsize => 0.2,
:penwidth => 0.2,
)
d2 = Dict(kw)
merge(d1, d2)
end
makedot(g::Graph; style=()) = begin
node(i, l) = """$i [label="$l"]\n"""
N = [node(i, l) for (i, l) in enumerate(labels(g; style...))]
edge(a, b) = """$a -> $b [style="$(get(ES, (a, b), ""))"]\n"""
ES = edgestyles(g; style...)
E = [edge(e.src, e.dst) for e in edges(graph(g))]
GS = graphstyle(g; style...)
"""
digraph {
ratio=$(GS[:ratio])
ranksep=$(GS[:ranksep])
node[
width=0
height=0
margin=$(GS[:margin])
shape=plaintext
fontname=$(GS[:fontname])
fontsize=$(GS[:fontsize])
]
edge [
arrowsize=$(GS[:arrowsize])
penwidth=$(GS[:penwidth])
]
$(N...)
$(E...)
}
"""
end
writedot(g::Graph; kw...) = writedot(tempname(), g; kw...)
writedot(name::AbstractString, g::Graph; style=()) = begin
!endswith(name, ".dot") && (name *= ".dot")
write(name, makedot(g; style))
name
end
import Graphviz_jll
writeimage(name::AbstractString, g::Graph; format=nothing, style=()) = begin
ext = splitext(name)[2]
if isnothing(format)
format = ext[2:end]
isempty(format) && error("format unspecified")
else
format = string(format)
ext != format && (name *= "."*format)
end
dot = writedot(g; style)
cmd = `$(Graphviz_jll.dot()) -T$format $dot -o $name`
success(cmd) || error("cannot execute: $cmd")
name
end
Base.show(io::IO, ::MIME"image/svg+xml", g::Graph) = begin
f = writeimage(tempname(), g; format=:svg)
s = read(f, String)
print(io, s)
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 7551 | import Markdown
"""
look(s[, k])
Look up information about system or variable. Both system type `S` and instance `s` are accepted. For looking up a variable, the name of variable `k` needs to be specified in a symbol.
See also: [`@look`](@ref), [`dive`](@ref)
# Arguments
- `s::Union{System,Type{<:System}}`: target system.
- `k::Symbol`: name of variable.
# Examples
```julia-repl
julia> "my system"
@system S(Controller) begin
"a param"
a => 1 ~ preserve(parameter)
end;
julia> s = instance(S);
julia> look(s)
[doc]
my system
[system]
S
context = <Context>
config = <Config>
a = 1.0
julia> look(s, :a)
[doc]
a param
[code]
a => 1 ~ preserve(parameter)
[value]
1.0
```
"""
look(a...; kw...) = look(stdout, MIME("text/plain"), a...; kw...)
look(io::IO, m::MIME, a...; kw...) = error("undefined look: $a")
look(m::Module, s::Symbol; kw...) = look(getfield(m, s); kw...)
look(io::IO, m::MIME, s::Union{S,Type{S}}; header=true, doc=true, system=true, kw...) where {S<:System} = begin
print(io, join(filter!(!isempty, strip.([
doc ? buf2str(io -> lookdoc(io, m, s; header, kw...)) : "",
system ? buf2str(io -> looksystem(io, m, s; header, kw...)) : "",
])), "\n\n"))
end
look(io::IO, m::MIME, S::Type{<:System}, k::Symbol; header=true, doc=true, code=true, kw...) = begin
print(io, join(filter!(!isempty, strip.([
doc ? buf2str(io -> lookdoc(io, m, S, k; header, kw...)) : "",
code ? buf2str(io -> lookcode(io, m, S, k; header, kw...)) : "",
])), "\n\n"))
end
look(io::IO, m::MIME, s::S, k::Symbol; header=true, value=true, kw...) where {S<:System} = begin
print(io, join(filter!(!isempty, strip.([
buf2str(io -> look(io, m, S, k; header, kw...)),
value ? buf2str(io -> lookvalue(io, m, s, k; header, kw...)) : "",
])), "\n\n"))
end
lookheader(io::IO, ::MIME, s; header=true, kw...) = begin
header && printstyled(io, s * "\n", color=:light_black)
end
lookdoc(io::IO, m::MIME, ::Union{S,Type{S}}; header=false, kw...) where {S<:System} = begin
lookheader(io, m, "[doc]"; header)
try
#HACK: mimic REPL.doc(b) with no dynamic concatenation
md = Docs.formatdoc(fetchdocstr(S))
show(io, m, md)
catch
end
end
looksystem(io::IO, m::MIME"text/plain", s::Union{S,Type{S}}; header=false, kw...) where {S<:System} = begin
lookheader(io, m, "[system]"; header)
printstyled(io, namefor(S), color=:light_magenta)
for (n, a) in fieldnamesalias(S)
print(io, "\n ")
printstyled(io, uncanonicalname(n), color=:light_blue)
!isnothing(a) && printstyled(io, " (", uncanonicalname(a), ")", color=:light_black)
s isa Type && continue
printstyled(io, " = ", color=:light_black)
print(io, labelstring(s[n]))
end
end
looksystem(io::IO, m::MIME"text/html", s::Union{S,Type{S}}; header=false, kw...) where {S<:System} = begin
lookheader(io, m, "[system]"; header)
println(io, "<table style=\"font-family: monospace\">")
println(io, "<tr style=\"background-color: transparent\">")
println(io, "<td colspan=\"4\" style=\"text-align: left; padding: 2px; padding-left: 0px; color: rebeccapurple\">$(namefor(S))</th>")
println(io, "</tr>")
for (n, a) in fieldnamesalias(S)
c1 = uncanonicalname(n)
c2 = isnothing(a) ? "" : "($(uncanonicalname(a)))"
c3 = isa(s, Type) ? "" : "="
c4 = isa(s, Type) ? "" : Markdown.htmlesc(labelstring(s[n]))
print(io, "<tr style=\"background-color: transparent\">")
print(io, "<td style=\"text-align: left; padding: 2px; padding-left: 20px; color: royalblue\">$c1</td>")
print(io, "<td style=\"text-align: left; padding: 2px 0px 2px 0px; color: gray\">$c2</td>")
print(io, "<td style=\"text-align: center; padding: 2px 10px 2px 10px; color: gray\">$c3</td>")
print(io, "<td style=\"text-align: left; padding: 2px;\">$c4</td>")
println(io, "</tr>")
end
println(io, "</table>")
end
lookdoc(io::IO, m::MIME, ::Union{S,Type{S}}, k::Symbol; header=false, excerpt=false, kw...) where {S<:System} = begin
lookheader(io, m, "[doc]"; header)
#HACK: mimic REPL.fielddoc(b, k) with no default description
docstr = fetchdocstr(S)
isnothing(docstr) && return
n = canonicalname(k, S)
ds = get(docstr.data[:fields], n, nothing)
isnothing(ds) && return
md = ds isa Markdown.MD ? ds : Markdown.parse(ds)
s = if excerpt
ts = Markdown.terminline_string(io, md)
split(strip(ts), '\n')[1] |> Text
else
md
end
show(io, m, s)
end
lookcode(io::IO, m::MIME, ::Union{S,Type{S}}, k::Symbol; header=false, kw...) where {S<:System} = begin
lookheader(io, m, "[code]"; header)
d = dependency(S)
n = canonicalname(k, S)
v = d.M[n]
Highlights.stylesheet(io, m, Highlights.Themes.DefaultTheme)
Highlights.highlight(io, m, " " * string(v.line), Highlights.Lexers.JuliaLexer)
end
lookvalue(io::IO, m::MIME, s::System, k::Symbol; header=false, kw...) = begin
lookheader(io, m, "[value]"; header)
n = canonicalname(k, s)
show(io, m, s[n])
end
buf2str(f; color=true, kw...) = begin
d = Dict(:color => color, kw...)
b = IOBuffer()
io = IOContext(b, d...)
f(io)
String(take!(b))
end
using MacroTools: @capture
"""
@look ex
@look s[, k]
Macro version of `look` supports a convenient way of accessing variable without relying on symbol. Both `@look s.a` and `@look s a` work the same as `look(s, :a)`.
See also: [`look`](@ref)
# Examples
```julia-repl
julia> "my system"
@system S(Controller) begin
"a param"
a => 1 ~ preserve(parameter)
end;
julia> @look S.a
[doc]
a param
[code]
a => 1 ~ preserve(parameter)
```
"""
macro look(ex)
if @capture(ex, s_.k_(args__))
f(x) = begin
if isexpr(x, :parameters)
x.args
elseif isexpr(x, :kw)
[x]
else
[]
end
end
g(x) = begin
Expr(:kw, x.args[1], esc(x.args[2]))
end
a = filter(x -> !isexpr(x, :parameters, :kw), args)
kw = args .|> f |> Iterators.flatten .|> g
:(Cropbox.value($(esc(s)), $(Meta.quot(k)), $(a...); $(kw...)))
elseif @capture(ex, s_.k_)
:(Cropbox.look($(esc(s)), $(Meta.quot(k))))
else
:(Cropbox.look($(esc(ex))))
end
end
macro look(s, k)
:(Cropbox.look($(esc(s)), $(Meta.quot(k))))
end
fetchdocstr(S::Type{<:System}) = begin
b = Docs.Binding(scopeof(S), nameof(typefor(S)))
for m in Docs.modules
d = Docs.meta(m)
haskey(d, b) && return d[b].docs[Union{}]
end
nothing
end
getdoc(S::Type{<:System}) = begin
docstr = fetchdocstr(S)
isnothing(docstr) && return Markdown.parse("""
No documentation found.
Type `@look $(scopeof(S)).$(namefor(S))` for more information.
""")
b = IOBuffer()
io = IOContext(b, :color => true)
println(io, only(docstr.text))
fields = docstr.data[:fields]
if !isempty(fields)
for (n, a) in fieldnamesalias(S)
!haskey(fields, n) && continue
ds = get(docstr.data[:fields], n, nothing)
isnothing(ds) && continue
entry = isnothing(a) ? "- `$n`" : "- `$n` (`$a`)"
excerpt = split(strip(ds), '\n')[1]
println(io, entry * ": " * excerpt)
end
end
String(take!(b)) |> Markdown.parse
end
export look, @look
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 36904 | using MacroTools: MacroTools, isexpr, isline, @capture, @q
using Setfield: @set
struct VarInfo{S<:Union{Symbol,Nothing}}
system::Symbol
name::Symbol
alias::Union{Symbol,Nothing}
args::Vector
kwargs::Vector
body#::Union{Expr,Symbol,Nothing}
state::S
type::Union{Symbol,Expr,Nothing}
tags::Dict{Symbol,Any}
line::Expr
linenumber::LineNumberNode
docstring::String
end
Base.show(io::IO, v::VarInfo) = begin
println(io, "system: $(v.system)")
println(io, "name: $(v.name)")
println(io, "alias: $(v.alias)")
println(io, "func ($(repr(v.args)); $(repr(v.kwargs))) = $(repr(v.body))")
println(io, "state: $(repr(v.state))")
println(io, "type: $(repr(v.type))")
for (a, b) in v.tags
println(io, "tag $a = $(repr(b))")
end
println(io, "line: $(v.line)")
println(io, "linenumber: $(v.linenumber)")
println(io, "docstring: $(v.docstring)")
end
VarInfo(system::Symbol, line::Expr, linenumber::LineNumberNode, docstring::String, scope::Module, substs::Dict) = begin
n, a, as, kws, b, s, st, dt, tgs = parseline(line, scope)
genvarinfo(system, n, a, as, kws, b, s, st, dt, tgs, line, linenumber, docstring, scope, substs)
end
VarInfo(system, line, linenumber, a...) = error("syntax error ($system): \"$line\" ($linenumber)")
parseline(line, scope) = begin
# name[(args..; kwargs..)][: alias] [=> body] [~ [state][::stype|<:dtype][(tags..)]]
@capture(bindscope(line, scope), (decl_ ~ deco_) | decl_)
@capture(deco,
(state_::stype_(tags__)) | (::stype_(tags__)) | (state_::stype_) | (::stype_) |
(state_<:dtype_(tags__)) | (<:dtype_(tags__)) | (state_<:dtype_) | (<:dtype_) |
state_(tags__) | state_
)
@capture(decl, (def1_ => body_) | def1_)
@capture(def1, (def2_: alias_) | def2_)
@capture(def2, name_(args__; kwargs__) | name_(; kwargs__) | name_(args__) | name_)
(name, alias, args, kwargs, body, state, stype, dtype, tags)
end
genvarinfo(system, n, a, as, kws, b, s, st, dt, tgs, line, linenumber, docstring, scope, substs) = begin
name = parsename(n, system)
alias = parsealias(a, system)
args = parseargs(as, system)
kwargs = parsekwargs(kws)
body = parsebody(b)
state = parsestate(s)
type = parsetype(st, dt, state, scope, substs)
tags = parsetags(tgs; name, alias, args, kwargs, system, state, type)
try
VarInfo{typeof(state)}(system, name, alias, args, kwargs, body, state, type, tags, line, linenumber, docstring)
catch
error("unrecognized variable declaration: $line")
end
end
#HACK: experimental support for scope placeholder `:$`
bindscope(l, s::Module) = MacroTools.postwalk(x -> @capture(x, :$) ? nameof(s) : x, l)
parsename(name, system) = canonicalname(name, system)
parsealias(alias, system) = parsename(alias, system)
canonicalname(n::Symbol, s) = Symbol(isprivatename(n) ? "__$(s)_$(n)" : n)
canonicalname(n::Symbol, ::Union{S,Type{S}}) where {S<:System} = canonicalname(n, namefor(S))
canonicalname(n, _) = n
splitcanonicalname(n) = begin
m = match(r"__(.+)__(.+)", string(n))
isnothing(m) ? (nothing, n) : Symbol.((m[1], '_' * m[2]))
end
uncanonicalname(n) = splitcanonicalname(n)[2]
uncanonicalname(n, s) = begin
cs, cn = splitcanonicalname(n)
(s == cs) ? cn : Symbol(n)
end
canonicalnamelabel(n) = begin
cs, cn = splitcanonicalname(n)
isnothing(cs) ? string(cn) : "($cs) $cn"
end
isprivatename(n) = begin
s = string(n)
#HACK: support private variable name with single prefix `_` (i.e. _a => __S__a, __b => __b)
startswith(s, "_") && !startswith(s, "__")
end
privatename(n) = Symbol(isprivatename(n) ? string(n)[2:end] : n)
bindname(ex, s) = MacroTools.postwalk(x -> x isa Symbol ? canonicalname(x, s) : x, ex)
#TODO: prefixscope to args/kwargs type specifier?
parseargs(args, system) = parsearg.(args, system)
#HACK: support private variable name in the dependency list
parsearg(a::Symbol, system) = Expr(:kw, privatename(a), canonicalname(a, system))
parsearg(a::Expr, system) = @capture(a, k_=v_) ? Expr(:kw, k, canonicalname(v, system)) : a
parseargs(::Nothing, _) = []
parsekwargs(kwargs) = kwargs
parsekwargs(::Nothing) = []
parsebody(body) = begin
#HACK: disable return checking for now, too aggressive for local scope return
#TODO: translate `return` to a local safe statement
#MacroTools.postwalk(x -> @capture(x, return(_)) ? error("`return` is not allowed: $body") : x, body)
body
end
parsebody(::Nothing) = nothing
#TODO: make `nothing` an alias of generic state `pass` which may share some logics with `bring`
parsestate(state) = typestate(Val(state))
typestate(::Val{S}) where {S} = Symbol(uppercasefirst(string(S)))
typestate(::Val{nothing}) = nothing
parsetype(::Nothing, ::Nothing, state, _, _) = typetag(Val(state))
parsetype(stype, ::Nothing, _, args...) = parsetype(Val(:static), stype, args...)
parsetype(::Nothing, dtype, _, args...) = parsetype(Val(:dynamic), dtype, args...)
parsetype(trait, type, scope, substs) = begin
if haskey(substs, type)
s = substs[type]
type = s.type
scope = s.scope
#HACK: force static type when substituion takes place
trait = Val(:static)
end
parsetype(type, scope, trait)
end
parsetype(type, scope, trait) = begin
T = if @capture(type, elemtype_[])
:(Vector{$(gentype(elemtype, scope, trait))})
else
gentype(type, scope, trait)
end
end
gentype(type, scope, trait) = genactualtype(genscopedtype(type, scope), trait)
genscopedtype(type, scope) = begin
l = Symbol[]
add(t::Symbol) = push!(l, t)
add(t) = nothing
isexpr(type, :braces) && MacroTools.postwalk(type) do ex
@capture(ex, $:|(T__)) && add.(T)
ex
end
conv(t) = prefixscope(parsetypealias(t), scope)
isempty(l) ? conv(type) : :(Union{$(conv.(l)...)})
end
genactualtype(type, ::Val{:static}) = :($C.typefor($type))
genactualtype(type, ::Val{:dynamic}) = type
parsetypealias(type::Symbol) = parsetypealias(Val(type), type)
parsetypealias(type) = type
parsetypealias(::Val{:int}, _) = :Int64
parsetypealias(::Val{:uint}, _) = :UInt64
parsetypealias(::Val{:float}, _) = :Float64
parsetypealias(::Val{:bool}, _) = :Bool
parsetypealias(::Val{:sym}, _) = :Symbol
parsetypealias(::Val{:str}, _) = :String
parsetypealias(::Val{:∅}, _) = :Nothing
parsetypealias(::Val{:_}, _) = :Missing
parsetypealias(::Val{:date}, _) = :($C.Dates.Date)
parsetypealias(::Val{:datetime}, _) = :($C.TimeZones.ZonedDateTime)
parsetypealias(::Val, type) = type
using Dates: Date
using TimeZones: ZonedDateTime
extractscope(x) = begin
l = []
if @capture(x, a_{c__})
x = a
end
while true
if @capture(x, a_.b_)
push!(l, b)
if isexpr(a)
x = a
else
push!(l, a)
break
end
else
push!(l, x)
break
end
end
(; l=reverse(l), c)
end
genscope(lc) = genscope(lc.l, lc.c)
genscope(l, ::Nothing) = reduce((a, b) -> Expr(:., a, QuoteNode(b)), l)
genscope(l, c) = :($(genscope(l, nothing)){$(c...)})
prefixscope(x, p::Module) = prefixscope(x, nameof(p))
prefixscope(x, p::Symbol) = let (l, c) = extractscope(x)
genscope([p, l...], c)
end
getsystem(m::Module, i) = reduce((a, b) -> getfield(a, b), split(String(i), ".") .|> Symbol, init=m)
parsetags(::Nothing; a...) = parsetags([]; a...)
parsetags(tags::Vector; name, system, state, type, a...) = begin
s = Val(state)
d = Dict{Symbol,Any}()
for t in tags
if @capture(t, k_=v_)
d[k] = bindname(v, system)
elseif @capture(t, @u_str(v_))
d[:unit] = @q @u_str($v)
else
d[t] = true
end
end
T = supportedtags(s)
(:unit ∈ T) && !haskey(d, :unit) && (d[:unit] = nothing)
d[:_type] = esc(type)
updatetags!(d, s; a...)
for (k, v) in d
if !isnothing(state) && !(startswith(string(k), "_") || (k ∈ T))
error("unsupported tag: $k = $v for $system => $name ~ $state")
end
end
d
end
typetag(::Val) = :Float64
typetag(::Val{:Advance}) = :Float64 #HACK: avoid unexpected promotion (i.e. Rational) when using :Int
typetag(::Val{:Flag}) = :Bool
typetag(::Val{:Produce}) = :(Vector{System})
typetag(::Val{:Provide}) = :(Cropbox.DataFrame)
typetag(::Val{nothing}) = nothing
supportedtags(::Val) = ()
filterconstructortags(v::VarInfo) = begin
stags = constructortags(Val(v.state))
filter(v.tags) do (k, v)
isnothing(stags) ? true : k in stags ||
startswith(String(k), "_")
end
end
updatetags!(d, ::Val; _...) = nothing
istag(v::VarInfo, t) = get(v.tags, t, false)
istag(v::VarInfo, t::Tuple) = any(istag.(Ref(v), t))
istag(v::VarInfo, t...) = istag(v, t)
istag(t) = Base.Fix2(istag, t)
hastag(v::VarInfo, t) = haskey(v.tags, t)
gettag(v::VarInfo, t, d=nothing) = get(v.tags, t, d)
Base.names(v::VarInfo) = let n = v.name, a = v.alias
isnothing(a) ? [n] : [n, a]
end
linenumber(v::VarInfo, prefix="", postfix="") = begin
n = v.linenumber
@set n.file = Symbol(n.file, ":$prefix|", v.name, "|$postfix")
end
####
abstract type VarStep end
struct PreStep <: VarStep end
struct MainStep <: VarStep end
struct PostStep <: VarStep end
Base.print(io::IO, ::PreStep) = print(io, "∘")
Base.print(io::IO, ::MainStep) = print(io, "")
Base.print(io::IO, ::PostStep) = print(io, "⋆")
struct Node{I,S}
info::I
step::S
end
const VarNode = Node{VarInfo,VarStep}
prev(n::VarNode) = begin
if n.step == MainStep()
VarNode(n.info, PreStep())
elseif n.step == PostStep()
VarNode(n.info, MainStep())
elseif n.step == PreStep()
error("Pre-step node can't have a previous node: $n")
end
end
####
const C = :Cropbox
const ϵ = @q begin end
genvartype(v::VarInfo{Nothing}) = gettag(v, :_type)
genvartype(v::VarInfo{Symbol}) = begin
#HACK: escape hatch for `bring`
if v.state == :Bring
return gettag(v, :_type)
end
N = gettag(v, :_type)
U = gettag(v, :unit)
V = @q $C.valuetype($N, $U)
T = if istag(v, :override)
#HACK: support overrides between common state variables
l = [genvartype(v, Val(t); N, U, V) for t in (:Track, :Preserve, :Flag, :Drive)]
:(Union{$(l...)})
else
genvartype(v, Val(v.state); N, U, V)
end
istag(v, :ref) ? @q(StateRef{$V,$T}) : T
end
genfield(v::VarInfo) = begin
type = genvartype(v)
name = symname(v)
docstring = isempty(v.docstring) ? ϵ : v.docstring
alias = v.alias
@q begin
$docstring
$name::$type
$(isnothing(alias) ? ϵ : docstring)
$(isnothing(alias) ? ϵ : :($alias::$type))
end
end
genfields(infos) = [genfield(v) for v in infos]
genpredecl(name) = @q _names = $C.names.($C.mixincollect($(esc(name)))) |> reverse |> Iterators.flatten |> collect
gennewargs(infos) = names.(infos) |> Iterators.flatten |> collect
genoverride(v::VarInfo) = begin
!isnothing(v.body) && error("`override` can't have funtion body: $(v.body)")
d = istag(v, :parameter) ? genstate(v) : missing
gengetkwargs(v, d)
end
genextern(v::VarInfo, default) = gengetkwargs(v, default)
gengetkwargs(v::VarInfo, default) = begin
K = [Meta.quot(n) for n in names(v)]
K = names(v)
@gensym o
@q let $o = $C.getbynames(__kwargs__, $K)
ismissing($o) ? $default : $o
end
end
getbynames(d, K, default=missing) = begin
for k in K
haskey(d, k) && return d[k]
end
default
end
genstate(v::VarInfo) = begin
name = Meta.quot(v.name)
alias = Meta.quot(v.alias)
value = istag(v, :extern) ? genextern(v, gendefault(v)) : gendefault(v)
stargs = [:($(esc(k))=$v) for (k, v) in filterconstructortags(v)]
@q $C.$(v.state)(; _name=$name, _alias=$alias, _value=$value, $(stargs...))
end
gennostate(v::VarInfo) = begin
args = emitfuncargpair.(v.args; value=false)
if istag(v, :option)
push!(args, @q $(esc(:option)) = __kwargs__)
end
decl = if istag(v, :override)
genoverride(v)
elseif isnothing(v.body)
@q $(esc(v.type))(; $(args...))
else
@q let $(args...); $(esc(v.body)) end
end
# implicit :expose
decl = :($(esc(v.name)) = $decl)
gendecl(v, decl)
end
using DataStructures: OrderedSet
gendecl(N::Vector{VarNode}) = gendecl.(OrderedSet([n.info for n in N]))
gendecl(v::VarInfo) = begin
#HACK: escape hatch for `bring`
if v.state == :Bring
return gennostate(v)
end
decl = if istag(v, :override)
genoverride(v)
else
genstate(v)
end
decl = istag(v, :ref) ? @q(StateRef($decl)) : decl
gendecl(v, decl)
end
gendecl(v::VarInfo{Nothing}) = begin
gennostate(v)
end
gendecl(v::VarInfo, decl) = @q begin
$(linenumber(v, "gendecl"))
$(v.name) = $decl
$(isnothing(v.alias) ? ϵ : :($(v.alias) = $(v.name)))
end
#HACK: @capture doesn't seem to support GlobalRef
const DOCREF = GlobalRef(Core, Symbol("@doc"))
isdoc(ex) = isexpr(ex, :macrocall) && ex.args[1] == DOCREF
gensource(v::VarInfo, name) = begin
v.system != name && return ϵ
if isempty(v.docstring)
@q begin $(v.linenumber); $(v.line) end
else
Expr(:macrocall, DOCREF, v.linenumber, v.docstring, v.line)
end
end
gensource(infos, name) = MacroTools.flatten(@q begin $(gensource.(infos, name)...) end)
updateconsts(consts, infos) = begin
#HACK: keep type of Context in case needed for field construction (i.e. timeunit for Accumulate)
context = filter(v -> v.name == :context, infos) |> only
merge(consts, Dict(:__Context__ => context.type))
end
genconstpatches(consts, scope, incl) = begin
@gensym CS
consts0 = merge(constsof.(_mixincollect(mixinsof(scope, incl)))...)
K = [keys(consts0)..., keys(consts)...]
[
:($CS = $(genconstbase(:(mixinsof($scope, $incl)), consts))),
(:($k = $CS[$(Meta.quot(k))]) for k in K)...
]
end
genconstbase(M, consts) = :(merge(constsof.(_mixincollect($M))..., Dict($((@q($(Meta.quot(p[1])) => $(esc(p[2]))) for p in consts)...))))
genfieldnamesunique(infos) = Tuple(v.name for v in infos)
genfieldnamesalias(infos) = Tuple((v.name, v.alias) for v in infos)
genfieldunits(infos) = @q filter!(p -> !isnothing(p[1]), Dict(
$((@q($(Meta.quot(v.name)) => $(gettag(v, :unit))) for v in infos)...),
$((@q($(Meta.quot(v.alias)) => $(gettag(v, :unit))) for v in infos)...),
))
genstruct(name, type, infos, consts, substs, incl, scope) = begin
#FIXME: simple `gensym(name)` leads to very strange invalid redefinition of constant error on Julia 1.6.0
_S = esc(gensym(Symbol(:_, name)))
S = esc(name)
T = esc(type)
N = Meta.quot(name)
nodes = sortednodes(infos)
#HACK: field declarations inside block doesn't work as expected
#fields = genfields(infos)
fields = MacroTools.flatten(@q begin $(genfields(infos)...) end).args
predecl = genpredecl(name)
decls = gendecl(nodes)
args = gennewargs(infos)
source = gensource(infos, name)
consts = updateconsts(consts, infos)
constpatches = genconstpatches(consts, scope, incl)
system = quote
Core.@__doc__ abstract type $S <: $T end
$S(; kw...) = $_S(; kw...)
$C.typefor(::Type{<:$S}) = $_S
let $(constpatches...)
Core.@__doc__ mutable struct $_S <: $S
$(fields...)
function $_S(; __kwargs__...)
$predecl
$(decls...)
new($(args...))
end
end
end
$C.namefor(::Type{$_S}) = $C.namefor($S)
$C.typefor(::Type{$_S}) = $_S
$C.source(::Type{$_S}) = $(Meta.quot(source))
$C.constsof(::Type{$_S}) = $(genconstbase(:(mixinsof($S)), consts))
$C.substsof(::Type{$_S}) = $substs
$C.mixinsof(::Type{$_S}) = $(mixinsof(scope, incl))
$C.fieldnamesunique(::Type{$_S}) = $(genfieldnamesunique(infos))
$C.fieldnamesalias(::Type{$_S}) = $(genfieldnamesalias(infos))
$C.fieldunits(::Type{$_S}) = let $(constpatches...); $(genfieldunits(infos)) end
$C.scopeof(::Type{$_S}) = $scope
$C._update!($(esc(:self))::$_S, ::$C.MainStage) = $(genupdate(nodes, MainStage(); scope))
$C._update!($(esc(:self))::$_S, ::$C.PreStage) = $(genupdate(infos, PreStage(); scope))
$C._update!($(esc(:self))::$_S, ::$C.PostStage) = $(genupdate(infos, PostStage(); scope))
Base.Docs.getdoc(::Type{$S}) = $C.getdoc($_S)
Base.Docs.getdoc(::Type{$_S}) = $C.getdoc($_S)
$S
end
system #|> MacroTools.flatten
end
source(s::S) where {S<:System} = source(S)
source(::Type{S}) where {S<:System} = source(typefor(S))
source(::Type{System}) = quote
context ~ ::Context(override)
config(context) => context.config ~ ::Config
end
source(::Type) = :()
constsof(s::S) where {S<:System} = constsof(S)
constsof(::Type{S}) where {S<:System} = constsof(typefor(S))
constsof(::Type{System}) = Dict()
substsof(s::S) where {S<:System} = substsof(S)
substsof(::Type{S}) where {S<:System} = substsof(typefor(S))
substsof(::Type{System}) = Dict()
mixinsof(s::S) where {S<:System} = mixinsof(S)
mixinsof(::Type{S}) where {S<:System} = mixinsof(typefor(S))
mixinsof(::Type{System}) = (System,)
mixinsof(::Type) = ()
mixinsof(scope::Module, incl) = Tuple(getsystem.(Ref(scope), incl))
using DataStructures: OrderedSet
mixincollect(s::S) where {S<:System} = mixincollect(S)
mixincollect(S::Type{<:System}, l=OrderedSet()) = begin
S in l && return l
push!(l, S)
_mixincollect(mixinsof(S), l)
#HACK: ensure mixins come before composite system
#TODO: need testsets for mixins/mixincollect
push!(delete!(l, S), S)
l
end
_mixincollect(M::Tuple, l=OrderedSet()) = begin
for m in M
union!(l, mixincollect(m, l))
end
l
end
mixincollect(s) = ()
mixinof(s, SS::Type{<:System}...) = begin
M = mixincollect(s)
for S in SS
for m in M
m <: S && return S
end
end
nothing
end
mixindispatch(s, SS::Type{<:System}...) = begin
m = mixinof(s, SS...)
n = isnothing(m) ? m : namefor(m)
(s, Val(n))
end
typefor(s::Symbol, m::Module=Main) = getsystem(m, s) |> typefor
typefor(T) = T
vartype(::Type{S}, k) where {S<:System} = fieldtype(typefor(S), k) |> typefor
fieldnamesunique(::Type{System}) = ()
fieldnamesalias(::Type{System}) = ()
fieldunits(::Type{System}) = Dict()
fieldnamesunique(::S) where {S<:System} = fieldnamesunique(S)
fieldnamesalias(::S) where {S<:System} = fieldnamesalias(S)
fieldunits(::S) where {S<:System} = fieldunits(S)
fieldnamesunique(::Type{S}) where {S<:System} = fieldnamesunique(typefor(S))
fieldnamesalias(::Type{S}) where {S<:System} = fieldnamesalias(typefor(S))
fieldunits(::Type{S}) where {S<:System} = fieldunits(typefor(S))
fieldunit(S, k) = get(fieldunits(S), k, missing)
fieldunit(s::Symbol, k) = begin
#HACK: if no system type found, assume unit is missing
#TODO: forbid implicit Symbol -> System conversion in the future
try
fieldunit(typefor(s), k)
catch
missing
end
end
subsystemsof(::Type{System}) = ()
subsystemsof(::S) where {S<:System} = subsystemsof(S)
subsystemsof(::Type{S}) where {S<:System} = filter(n -> fieldtype(typefor(S), n) <: System, fieldnamesunique(S))
scopeof(::Type{System}) = @__MODULE__
scopeof(::S) where {S<:System} = scopeof(S)
scopeof(::Type{S}) where {S<:System} = scopeof(typefor(S))
abstract type UpdateStage end
struct PreStage <: UpdateStage end
struct MainStage <: UpdateStage end
struct PostStage <: UpdateStage end
Base.print(io::IO, ::PreStage) = print(io, "†")
Base.print(io::IO, ::MainStage) = print(io, "")
Base.print(io::IO, ::PostStage) = print(io, "‡")
_update!(S::Vector{<:System}, t) = begin
for s in S
update!(s, t)
end
end
_update!(s, t) = s
update!(s, t=MainStage()) = _update!(s, t)
parsehead(head; scope) = begin
# @system name[{patches..}][(mixins..)] [<: type]
@capture(head, (decl_ <: type_) | decl_)
@capture(decl, name_{patches__}(mixins__) | name_{patches__} | name_(mixins__) | name_)
type = isnothing(type) ? :System : type
patches = isnothing(patches) ? [] : patches
mixins = isnothing(mixins) ? [] : mixins
consts, substs = parsepatches(patches, scope)
incl = [:System]
for m in mixins
push!(incl, Symbol(m))
end
(; name, consts, substs, incl, type)
end
parsepatches(patches::Vector, scope::Module) = begin
consts = Dict()
substs = Dict()
for p in patches
if @capture(p, o_ => n_)
substs[o] = (; type=n, scope)
elseif @capture(p, o_ = n_)
consts[o] = n
else
error("unsupported patch format: $p")
end
end
(consts, substs)
end
using DataStructures: OrderedDict, OrderedSet
gensystem(body; name, consts, substs, incl, type, scope, _...) = genstruct(name, type, geninfos(body; name, substs, incl, scope), consts, substs, incl, scope)
geninfos(body; name, substs, incl, scope, _...) = begin
# update type substitution dict cascaded over all participating mixins
M = _mixincollect(mixinsof(scope, incl))
substs = merge(substsof.(M)..., substs)
con(b, s, sc) = begin
d = OrderedDict{Symbol,VarInfo}()
#HACK: default in case LineNumberNode is not attached
ln = LineNumberNode(@__LINE__, @__FILE__)
#HACK: ensure single line expr is still inside a block
for l in MacroTools.block(b).args
isline(l) && (ln = l; continue)
if isdoc(l)
lnn, ds, l = l.args[2:4]
isline(lnn) && (ln = lnn)
else
ds = ""
end
v = VarInfo(s, l, ln, ds, sc, substs)
n = v.name
#TODO: consolidate `bring` handling logic
if v.state == :Bring
_, _, _, _, _, _, t, _, _ = parseline(v.line, scope)
m = getsystem(scope, t)
#HACK: do not require mixin declaration since not all variables may be brought in
#(m in M) || error("undefined mixin used for `bring`: $t")
#TODO: iterate over variables from mixins too?
for (k, v1) in con(source(m), namefor(m), scopeof(m))
line = v1.line
linenumber = v1.linenumber
docstring = ""
# proxy variable is a track to the variable `brought` in
let (n1, a, as, kws, b, s, st, dt, tgs) = parseline(line, scope)
#HACK: bring in only supported state-variables
#TODO: support bringing in non-state variables (i.e. pass)
if s in (:track, :preserve, :flag, :drive)
as = [:($n.$n1)]
kws = nothing
b = nothing
s = :track
tgs = isnothing(tgs) ? [] : filter!(x -> @capture(x, @u_str(_)), tgs)
#HACK: make preserve(parameter) if needed (i.e. for testing)
if istag(v, :parameters)
as = nothing
s = :preserve
push!(tgs, :parameter)
end
# generate a proxy variable
v2 = genvarinfo(t, n1, a, as, kws, b, s, st, dt, tgs, line, linenumber, docstring, scope, substs)
# do not overwrite a newer declaration of the variable
if !haskey(d, k)
d[k] = v2
end
end
end
end
end
v0 = get(d, n, nothing)
if !isnothing(v0)
@warn "duplicate variable" system=s name=v.name alias=(v0.alias => v.alias) state=(v0.state => v.state)
end
d[n] = v
end
d
end
add!(d, b, s, sc) = begin
for (n, v) in con(b, s, sc)
if haskey(d, n)
v0 = d[n]
if v.state == :Hold
continue
# support simple body replacement (i.e. `a => 1` without `~ ...` part)
elseif isnothing(v.state) && isnothing(v.type)
v = @set v0.body = v.body
elseif v0.alias != v.alias && v0.state != :Hold
@warn "variable replaced with inconsistent alias" name=v.name system=(v0.system => v.system) alias=(v0.alias => v.alias)
end
end
d[n] = v
end
end
combine() = begin
d = OrderedDict{Symbol,VarInfo}()
for i in incl
S = getsystem(scope, i)
for m in mixincollect(S)
add!(d, source(m), namefor(m), scopeof(m))
end
end
add!(d, body, name, scope)
d
end
combine() |> values |> collect
end
geninfos(S::Type{<:System}) = geninfos(source(S); name=namefor(S), substs=substsof(S), incl=Symbol.(mixinsof(S)), scope=scopeof(S))
include("dependency.jl")
sortednodes(infos) = sort(dependency(infos))
"""
@system name[{patches..}][(mixins..)] [<: type] [decl] -> Type{<:System}
Declare a new system called `name` with new variables declared in `decl` block using a custom syntax. The resultant system is subtype of `System` or a custom `type`. `mixins` allows reusing specification of existing systems to be pasted into the declaration of new system. `patches` may provide type substitution and/or constant definition needed for advanced use.
# Variable
name[(args..; kwargs..)][: alias] [=> expr] [~ [state][::type|<:type][(tags..)]]
- `name`: variable name; usually short abbreviation.
- `args`: automatically bound depending variables
- `kwargs`: custom bound depending variables; used by `call` and `integrate`.
- `alias`: alternative name; long description.
- `expr`: state-specific code snippet; use begin-end block for multiple statements.
- `type`: internal data type; default is Float64 for many, but not all, variables.
- `tags`: state-specific options; `unit`, `min`/`max`, etc.
# States
- `hold`: marks a placeholder for variable shared between mixins.
- `wrap`: passes a state variable to other fucnction as is with no unwrapping its value.
- `advance`: manages a time-keeping variable; `time` and `tick` from `Clock`.
- `preserve`: keeps initially assigned value with no further updates; constants, parameters.
- `tabulate`: makes a two dimensional table with named keys; *i.e.* partitioning table.
- `interpolate`: makes a curve function interpolated with discrete values; *i.e.* soil characteristic curve.
- `track`: evaluates variable expression as is for each update.
- `remember`: keeps tracking variable until a certain condition is met; essentially `track` turning into `preserve`.
- `provide`: manages a table of time-series in DataFrame.
- `drive`: fetches the current value from a time-series; maybe supplied by `provide`.
- `call`: defines a partial function bound with some variables.
- `integrate`: calculates integral using Gaussian method; not for time domain.
- `accumulate`: emulates integration of rate variable over time; essentially Euler method.
- `capture`: calculates difference between integration for each time step.
- `flag`: sets a boolean flag; essentially `track::Bool`.
- `produce`: attaches a new instance of system dynamically constructed; *i.e.* root structure growth.
- `bisect`: solves nonlinear equation using bisection method; *i.e.* gas-exchange model coupling.
- `solve`: solves polynomial equation symbolically; *i.e.* quadratic equations in photosynthesis model.
# Examples
```julia-repl
julia> @system S(Controller) begin
a => 1 ~ preserve(parameter)
b(a) ~ accumulate
end
S
```
"""
macro system(head, body=:(begin end))
scope = __module__
gensystem(body; scope, parsehead(head; scope)...)
end
export @system, update!
gendefault(v::VarInfo) = gendefault(v, Val(v.state))
gendefault(v::VarInfo, ::Val) = gendefaultvalue(v)
gensample(v::VarInfo, x) = @q $C.sample($x)
genunitfy(v::VarInfo, x) = begin
u = gettag(v, :unit)
isnothing(u) ? x : @q $C.unitfy($x, $C.value($u))
end
genminmax(v::VarInfo, x) = begin
l = gettag(v, :min)
u = gettag(v, :max)
#TODO: validate (min <= max)
x = isnothing(l) ? x : @q max($(genunitfy(v, @q $C.value($l))), $x)
x = isnothing(u) ? x : @q min($x, $(genunitfy(v, @q $C.value($u))))
x
end
genround(v::VarInfo, x) = begin
f = gettag(v, :round)
isnothing(f) && return x
f = if f isa Bool
f ? :round : return x
elseif f isa QuoteNode
f.value
else
error("unsupported value for tag `round`: $f")
end
U = gettag(v, :roundunit)
U = isnothing(U) ? gettag(v, :unit) : U
N = gettag(v, :_type)
if isnothing(U)
#HACK: rounding functions with explicit type only supports Integer target
# https://github.com/JuliaLang/julia/issues/37984
@q convert($N, $f($x))
else
@q $f($C.valuetype($N, $U), $x)
end
end
genwhen(v::VarInfo, x) = begin
w = gettag(v, :when)
isnothing(w) && return x
N = gettag(v, :_type)
U = gettag(v, :unit)
i = gettag(v, :init)
d = if isnothing(i)
@q zero($C.valuetype($N, $U))
else
@q $C.unitfy($C.value($i), $C.value($U))
end
@q $C.value($w) ? $x : $d
end
genparameter(v::VarInfo) = begin
@gensym o
@q let $o = $C.option(config, _names, $(names(v)))
ismissing($o) ? $(genbody(v)) : $o
end
end
geninit(v::VarInfo) = @q $C.value($(gettag(v, :init)))
gendefaultvalue(v::VarInfo; parameter=false, init=false, sample=true, unitfy=true, minmax=true, round=true, when=true) = begin
x0 = if parameter && istag(v, :parameter)
genparameter(v)
elseif init && hastag(v, :init)
geninit(v)
else
genbody(v)
end
x = x0
sample && (x = gensample(v, x))
unitfy && (x = genunitfy(v, x))
minmax && (x = genminmax(v, x))
round && (x = genround(v, x))
when && (x = genwhen(v, x))
if istag(v, :optional)
@q isnothing($x0) ? nothing : $x
else
x
end
end
genupdate(nodes::Vector{VarNode}, ::MainStage; kw...) = @q begin
$([genupdateinit(n.info; kw...) for n in nodes]...)
$([genupdate(n; kw...) for n in nodes]...)
self
end
genupdate(infos::Vector{VarInfo}, t::UpdateStage; kw...) = @q begin
$([genupdateinit(v; kw...) for v in infos]...)
$([genupdate(v, t; kw...) for v in infos]...)
self
end
symname(v::VarInfo) = symname(v.system, v.name)
symname(s::Symbol, n::Symbol) = n #Symbol(:_, s, :__, n)
symstate(v::VarInfo) = symname(v) #Symbol(symname(v), :__state)
symsuffix(::T) where {T} = "__$T"
symlabel(v::VarInfo, t, s...) = Symbol(symname(v), symsuffix(t), s...)
symcall(v::VarInfo) = Symbol(v.name, :__call)
genupdateinit(v::VarInfo; kw...) = begin
# implicit :expose
@q begin
$(v.name) = self.$(v.name)
$(isnothing(v.alias) ? ϵ : :($(v.alias) = $(v.name)))
end
end
genupdate(n::VarNode; kw...) = genupdate(n.info, n.step; kw...)
genupdate(v::VarInfo, t; kw...) = @q begin
$(linenumber(v, "genupdate", t))
@label $(symlabel(v, t))
$(genupdate(v, Val(v.state), t; kw...))
end
genvalue(v::VarInfo) = @q $C.value($(symstate(v)))
genstore(v::VarInfo, val=nothing; unitfy=true, minmax=true, round=true, when=true) = begin
isnothing(val) && (val = genbody(v))
unitfy && (val = genunitfy(v, val))
minmax && (val = genminmax(v, val))
round && (val = genround(v, val))
when && (val = genwhen(v, val))
#TODO: remove redundant unitfy() in store!()
@gensym s
@q let $s = $(symstate(v))
$C.store!($s, $val)
end
end
genupdate(v::VarInfo, ::Val{nothing}, ::PreStep; kw...) = begin
if istag(v, :context)
@gensym c
@q let $c = $(v.name)
$C.update!(self, $C.PreStage())
$c
end
end
end
genupdate(v::VarInfo, ::Val{nothing}, ::MainStep; kw...) = begin
if istag(v, :override, :skip)
nothing
else
@q $C.update!($(v.name))
end
end
genupdate(v::VarInfo, ::Val{nothing}, ::PostStep; kw...) = begin
if istag(v, :context)
l = symlabel(v, PreStep())
@gensym c cc
@q let $c = $(v.name),
$cc = $c.context
$C.update!(self, $C.PostStage())
if !isnothing($cc) && $C.value($c.clock.time) < $C.value($cc.clock.time)
@goto $l
end
$c
end
end
end
genupdate(v::VarInfo, ::Val, ::PreStep; kw...) = nothing
genupdate(v::VarInfo, ::Val, ::MainStep; kw...) = istag(v, :override, :skip) ? nothing : genstore(v)
genupdate(v::VarInfo, ::Val, ::PostStep; kw...) = nothing
genupdate(v::VarInfo, ::Val, ::UpdateStage; kw...) = nothing
genupdate(v::VarInfo, ::Val{nothing}, ::PreStage; kw...) = @q $C.update!($(v.name), $C.PreStage())
genupdate(v::VarInfo, ::Val{nothing}, ::PostStage; kw...) = @q $C.update!($(v.name), $C.PostStage())
#TODO: merge extractfuncargdep() and extractfuncargkey()?
extractfuncargdep(v::Expr) = begin
a = v.args
if isexpr(v, :call)
# detect boolean operators between state vars (i.e. `a`, `b` in `a && b`, `a || b`)
if a[1] == :& || a[1] == :|
extractfuncargdep.(a[2:3]) |> Iterators.flatten |> collect
# detect variable inside wrapping function (i.e. `a` in `nounit(a.b, ..)`)
else
extractfuncargdep(a[2])
end
# detect shorthand syntax for calling value() (i.e. `a` in `a'` = `value(a)`)
elseif isexpr(v, Symbol("'"))
extractfuncargdep(a[1])
# detect first callee of dot chaining (i.e. `a` in `a.b.c`)
elseif isexpr(v, :., :ref)
extractfuncargdep(a[1])
else
nothing
end
end
extractfuncargdep(v::Symbol) = [v]
extractfuncargdep(v) = nothing
extractfuncargkey(v::Expr) = begin
a = v.args
if isexpr(v, :call)
# detect boolean operators between state vars (i.e. `a`, `b` in `a && b`, `a || b`)
if a[1] == :& || a[1] == :|
error("missing function argument key: $v")
# detect variable inside wrapping function (i.e. `b` in `nounit(a.b, ..)`)
else
extractfuncargkey(a[2])
end
# detect shorthand syntax for calling value() (i.e. `b` in `a.b'` = `value(a.b)`)
elseif isexpr(v, Symbol("'"))
extractfuncargkey(a[1])
# detect last callee of dot chaining (i.e. `c` in `a.b.c`)
elseif isexpr(v, :., :ref)
extractfuncargkey(a[2])
else
error("unrecognized function argument key: $v")
end
end
extractfuncargkey(v::QuoteNode) = extractfuncargkey(v.value)
extractfuncargkey(v::Symbol) = v
extractfuncargpair(a) = let k, v
!@capture(a, k_=v_) && (k = a; v = a)
extractfuncargkey(k) => v
end
emitfuncargpair(a; kw...) = emitfuncargpair(extractfuncargpair(a)...; kw...)
emitfuncargpair(k, v; value=true) = begin
k = esc(k)
v = value ? @q($C.value($v)) : v
@q $k = $v
end
genbody(v::VarInfo, body=nothing) = begin
if isnothing(v.body) && length(v.args) == 1
a = v.args[1]
#HACK: can't use or (|) syntax: https://github.com/MikeInnes/MacroTools.jl/issues/36
if @capture(a, k_=v_)
args = emitfuncargpair(k, v)
elseif @capture(a, k_Symbol)
args = emitfuncargpair(k, k)
else
@gensym k
args = emitfuncargpair(k, a)
end
body = esc(k)
else
args = @q begin $(emitfuncargpair.(v.args)...) end
isnothing(body) && (body = esc(v.body))
end
MacroTools.flatten(@q let $args; $body end)
end
extractfunckwargtuple(a) = let k, t, u
@capture(a, k_::t_(u_) | k_::t_ | k_(u_))
isnothing(k) && (k = a)
isnothing(u) && (u = missing)
(k, t, u)
end
emitfunckwargkey(a) = @q $(esc(extractfunckwargtuple(a)[1]))
emitfunckwargpair(a) = begin
k, t, u = extractfunckwargtuple(a)
v = @q($C.unitfy($(esc(k)), $u))
# Skip type assertion (maybe only needed for Call, not Integrate)
#v = @q $v::$C.valuetype($(gencallargtype(t)), $u)
@q $k = $v
end
genfunc(v::VarInfo; unitfy=true) = begin
innerargs = @q begin $(emitfuncargpair.(v.args)...) end
innerbody = MacroTools.flatten(@q let $innerargs; $(esc(v.body)) end)
unitfy && (innerbody = @q $C.unitfy($innerbody, $C.value($(gettag(v, :unit)))))
callargs = emitfunckwargkey.(v.kwargs)
argsheader = emitfunckwargpair.(v.kwargs)
@q function $(symcall(v))($(callargs...))
let $(argsheader...)
$innerbody
end
end
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 884 | using PrecompileTools: @setup_workload, @compile_workload
@setup_workload begin
@system S(Controller) begin
a => 1 ~ preserve
b(a) ~ track
c(b) ~ accumulate
end
c = @config
@compile_workload begin
r = simulate(S, config=c)
simulate(S, configs=[c])
visualize(r, :time, :c; backend=:UnicodePlots)
visualize(r, :time, :c; backend=:Gadfly)[] #|> Cropbox.Gadfly.SVGJS()
visualize(r, :time, :c; kind=:line, backend=:Gadfly)' #|> Cropbox.Gadfly.SVG()
visualize(S, :time, :c; backend=:UnicodePlots)
visualize(S, :time, :c; backend=:Gadfly)[] #|> Cropbox.Gadfly.SVGJS()
#visualize(S, :time, :c; backend=:Gadfly)[] |> Cropbox.Gadfly.PDF()
#visualize(S, :time, :c; backend=:Gadfly)[] |> Cropbox.Gadfly.PNG()
#r |> display
#display(MIME("text/html"), r)
end
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 946 | using Distributions: Distribution, Normal
#HACK: Measurements.jl causes weird StackOverFlow error when used with other packages (i.e. Plots, UnicodePlots)
#HACK: not exactly same, but maybe related with https://github.com/PumasAI/Pumas.jl/issues/609
#import Measurements
#using Measurements: Measurement, ±
using Unitful: Quantity
#HACK: define own Measurement just for supporting ± syntax
struct Measurement{T} <: Number
val::T
err::T
end
measurement(a::Quantity, b::Quantity) = begin
u = Unitful.promote_unit(Unitful.unit(a), Unitful.unit(b))
measurement(deunitfy(a, u), deunitfy(b, u)) * u
end
measurement(a, b) = Measurement(promote(a, b)...)
const ± = measurement
export ±
Base.show(io::IO, m::Measurement) = print(io, "$(m.val) ± $(m.err)")
sample(v::Distribution) = rand(v)
sample(v::Measurement) = rand(Normal(v.val, v.err))
sample(v::Quantity{<:Measurement}) = unitfy(sample(deunitfy(v)), unittype(v))
sample(v) = v
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 3593 | abstract type State{V} end
#TODO: support call syntax in Julia 1.3
#(s::State)() = value(s)
value(v) = v
value(s::State) = getfield(s, :value)
value(S::Vector{<:State}) = value.(S)
export value
Base.getindex(s::State) = value(s)
Base.adjoint(s::State) = value(s)
store!(s::State, v) = setfield!(s, :value, convert(valuetype(s), unitfy(v, unittype(s))))
Base.getindex(s::State, i) = value(s)[i]
Base.length(s::State) = 1
Base.iterate(s::State) = (s, nothing)
Base.iterate(s::State, i) = nothing
Base.eltype(::Type{<:State{V}}) where V = V
unittype(::Type{V}) where V = nothing
unittype(::Type{V}) where {V<:Number} = Unitful.unit(V)
unittype(::Type{<:State{V}}) where V = unittype(V)
unittype(::Type{<:Vector{V}}) where V = unittype(V)
unittype(::Type{<:Vector{Union{V,Missing}}}) where V = unittype(V)
unittype(::Type{<:AbstractRange{V}}) where V = unittype(V)
unittype(::T) where T = unittype(T)
using DataFrames: AbstractDataFrame
unittype(v::AbstractDataFrame) = unittype.(eltype.(eachcol(v)))
valuetype(::State{V}) where V = V
valuetype(T, ::Nothing) = T
valuetype(T, U::Units) = Unitful.isunitless(U) ? T : Quantity{T, Unitful.dimension(U), typeof(U)}
valuetype(::Type{Array{T,N}}, U::Units) where {T,N} = Array{valuetype(T, U), N}
rateunittype(U::Nothing, T::Units) = T^-1
rateunittype(U::Units, T::Units) = (R = U/T; Unitful.isunitless(R) ? nothing : R)
rateunittype(U::Units, T::Nothing) = U
rateunittype(U::Nothing, T::Nothing) = nothing
timeunittype(U, TU=u"hr") = isnothing(U) ? TU : (Unitful.dimension(U) == Unitful.𝐓) ? U : TU
struct Nounit{S,U}
state::S
unit::U
end
nounit(s::State, u::Units) = Nounit(s, u)
nounit(s::State) = Nounit(s, missing)
value(s::Nounit) = deunitfy(value(s.state), s.unit)
export nounit
struct Not{S}
state::S
end
Base.:!(s::State) = Not(s)
value(s::Not) = !value(s.state)
struct And{S1,S2}
state1::S1
state2::S2
end
struct Or{S1,S2}
state1::S1
state2::S2
end
Base.:&(a::Union{State,And,Or,Not}, b::Union{State,And,Or,Not}) = And(a, b)
Base.:|(a::Union{State,And,Or,Not}, b::Union{State,And,Or,Not}) = Or(a, b)
value(s::And) = value(s.state1) && value(s.state2)
value(s::Or) = value(s.state1) || value(s.state2)
mutable struct StateRef{V,S<:State{V}} <: State{V}
state::S
end
Base.getindex(r::StateRef) = r.state
value(r::StateRef) = value(r[])
setvalue!(r::StateRef, s::State) = (r.state = s)
#HACK: swap out state variable of mutable System after initialization
setvar!(s::System, k::Symbol, v::State) = begin
r = s[k]
@assert r isa StateRef
setvalue!(r, v)
a = Dict(fieldnamesalias(s))[k]
!isnothing(a) && setvalue!(s[a], v)
nothing
end
abstract type Priority end
struct PrePriority <: Priority end
struct PostPriority <: Priority end
priority(::S) where {S<:State} = priority(S)
priority(::Type{<:State}) = PostPriority()
Base.show(io::IO, s::State) = begin
v = value(s)
maxlength = get(io, :maxlength, nothing)
r = labelstring(v; maxlength)
print(io, r)
end
Base.show(io::IO, ::MIME"text/plain", s::State) = print(io, value(s))
include("state/hold.jl")
include("state/bring.jl")
include("state/wrap.jl")
include("state/advance.jl")
include("state/preserve.jl")
include("state/tabulate.jl")
include("state/interpolate.jl")
include("state/track.jl")
include("state/remember.jl")
include("state/provide.jl")
include("state/drive.jl")
include("state/call.jl")
include("state/integrate.jl")
include("state/accumulate.jl")
include("state/capture.jl")
include("state/flag.jl")
include("state/produce.jl")
include("state/bisect.jl")
include("state/solve.jl")
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 3361 | abstract type System end
namefor(s::S) where {S<:System} = namefor(S)
namefor(S) = nameof(S)
typefor(s::S) where {S<:System} = S
Base.names(s::S) where {S<:System} = names(S)
Base.names(S::Type{<:System}) = (n = split(String(Symbol(S)), "."); [Symbol(join(n[i:end], ".")) for i in 1:length(n)])
Base.length(s::System) = length(fieldnamesunique(s))
Base.iterate(s::System) = iterate(s, 1)
Base.iterate(s::System, i) = begin
F = fieldnamesunique(s)
l = length(F)
l == 0 ? nothing : (s[F[i]], l == i ? nothing : i+1)
end
Base.iterate(s::System, ::Nothing) = nothing
Base.broadcastable(s::System) = Ref(s)
Base.getindex(s::System, i) = getproperty(s, i)
Base.getindex(s::System, ::Nothing) = s
Base.getproperty(s::System, n::AbstractString) = begin
isempty(n) && return s
reduce((a, b) -> begin
m = match(r"([^\[\]]+)(?:\[(.+)\])?", b)
n, i = m[1], m[2]
v = getfield(a, Symbol(n))
if isnothing(i)
v
else
#HACK: support symbol as-is (i.e. "a[:i]" vs. "a[i]")
k = if startswith(i, ":")
i
elseif isprivatename(i)
canonicalname(i, n)
else
try
#HACK: support indexing of non-Variable (i.e. "a[1]" for Vector{Layer})
parse(Int, i)
catch
#HACK: support accessing index (i.e. "a[i]")
getfield(a, Symbol(i)) |> value
end
end
v[k]
end
end, [s, split(n, ".")...])
end
Base.hasproperty(s::System, n::AbstractString) = begin
try
getproperty(s, n)
catch
return false
end
true
end
#HACK: calculate variable body with external arguments for debugging purpose
value(s::S, k::Symbol, a...; kw...) where {S<:System} = begin
d = dependency(S)
v = d.M[k]
P = extractfuncargpair.(v.args)
kw0 = Dict(k => s[v]' for (k, v) in P)
kw1 = merge(kw0, kw)
value(S, k, a...; kw1...)
end
value(S::Type{<:System}, k::Symbol, a...; kw...) = begin
d = dependency(S)
v = d.M[k]
body = v.body
type = v.type
emitvars(a) = let p = extractfuncargpair(a), k = p[1]; :($k = $(kw[k])) end
emitvals(a, v) = let (k, t, u) = extractfunckwargtuple(a); :($k = Cropbox.unitfy($v, $u)) end
#TODO: check args/kwargs existence
if v.state in (:Preserve, :Track)
vars = emitvars.(v.args)
eval(:($type(let $(vars...); $body end)))
elseif v.state == :Call
vars = emitvars.(v.args)
vals = emitvals.(v.kwargs, a)
eval(:($type(let $([vars..., vals...]...); $body end)))
else
error("unsupported state for value() call form: $v")
end
end
Base.show(io::IO, s::System) = print(io, "<$(namefor(s))>")
Base.show(io::IO, m::MIME"text/plain", s::System) = look(io, m, s; header=false, doc=false)
Base.show(io::IO, m::MIME"text/html", s::System) = look(io, m, s; header=false, doc=false)
#TODO: see if we can move it to util/look.jl
include("look.jl")
labelstring(v; maxlength=nothing) = begin
l = repr(v; context=IOContext(devnull, :compact => true, :limit => true))
n = length(l)
i = findfirst('\n', l)
i = isnothing(i) ? n : i-1
x = isnothing(maxlength) ? n : maxlength
i = min(i, x)
i < n ? l[1:i] * "…" : l
end
export System
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 4007 | using Unitful: Unitful, Units, Quantity, @u_str
export @u_str
unitfy(::Nothing, u) = nothing
unitfy(::Nothing, ::Nothing) = nothing
unitfy(::Missing, u) = missing
unitfy(::Missing, ::Nothing) = missing
unitfy(v, ::Nothing) = !hasunit(v) ? deunitfy(v) : error("unable to strip units: $v")
unitfy(::Nothing, ::Missing) = nothing
unitfy(::Missing, ::Missing) = missing
unitfy(v, ::Missing) = v
unitfy(v::Number, u::Units) = Quantity(v, u)
unitfy(v::AbstractArray, u::Units) = unitfy.(v, u)
unitfy(v::Tuple, u::Units) = unitfy.(v, u)
unitfy(v::Quantity, u::Units) = Unitful.uconvert(u, v)
unitfy(v::AbstractArray{<:Union{Quantity,Missing}}, u::Units) = unitfy.(v, u)
unitfy(v::Tuple{Vararg{Union{Quantity,Missing}}}, u::Units) = unitfy.(v, u)
unitfy(v::UnitRange, u::Units) = StepRange(unitfy(v.start, u), unitfy(1, Unitful.absoluteunit(u)), unitfy(v.stop, u))
unitfy(v::StepRange, u::Units) = StepRange(unitfy(v.start, u), unitfy(step(v), Unitful.absoluteunit(u)), unitfy(v.stop, u))
unitfy(v::StepRangeLen, u::Units) = begin
#HACK: avoid missing zero() for unitfied TwicePrecision called by StepRangeLen constructor
x = v.ref + step(v)
E = eltype(x)
T = typeof(unitfy(E(x), u))
r = unitfy(E(v.ref), u)
s = unitfy(step(v), Unitful.absoluteunit(u))
R = typeof(r)
S = typeof(s)
#TODO: use TwicePrecision?
StepRangeLen{T,R,S}(r, s, length(v), v.offset)
end
unitfy(v, u) = u(v)
unitfy(v::V, ::Type{V}) where V = v
deunitfy(v) = v
deunitfy(v::Quantity) = Unitful.ustrip(v)
deunitfy(v::AbstractArray) = deunitfy.(v)
deunitfy(v::Tuple) = deunitfy.(v)
deunitfy(v::UnitRange) = UnitRange(deunitfy(v.start), deunitfy(v.stop))
deunitfy(v::StepRange) = StepRange(deunitfy(v.start), deunitfy(step(v)), deunitfy(v.stop))
deunitfy(v::StepRangeLen) = StepRangeLen(deunitfy(eltype(v)(v.ref)), deunitfy(step(v)), length(v), v.offset)
deunitfy(v, u) = deunitfy(unitfy(v, u))
deunitfy(v, ::Missing) = deunitfy(v)
promoteunit(u...) = Unitful.promote_unit(filter(!isnothing, u)...)
promoteunit(::Nothing) = nothing
promoteunit() = nothing
hasunit(v::Units) = !Unitful.isunitless(v)
hasunit(::Nothing) = false
hasunit(v) = any(hasunit.(unittype(v)))
using DataFrames: AbstractDataFrame, DataFrame, DataFrames
for f in (:unitfy, :deunitfy)
@eval $f(df::AbstractDataFrame, U::Vector) = begin
r = DataFrame()
for (n, c, u) in zip(propertynames(df), eachcol(df), U)
r[!, n] = $f.(c, u)
end
r
end
end
import Dates
import JuliaInterpreter
unitfy(df::AbstractDataFrame; kw...) = begin
#HACK: default constructor for common types to avoid scope binding issue
D = merge(Dict(
:Date => Dates.Date,
), Dict(kw))
p = r"(.+)\(([^\(\)]+)\)$"
M = match.(p, names(df))
n(m::RegexMatch) = m.match => strip(m.captures[1])
n(m) = nothing
N = filter!(!isnothing, n.(M))
isempty(N) && return df
u(m::RegexMatch) = begin
s = m.captures[2]
#HACK: assume type constructor if the label starts with `:`
if startswith(s, ":")
e = Meta.parse(s[2:end])
#HACK: use Main scope for type constructor evaluation
#TODO: remove fallback eval in favor of explict constructor mapping
haskey(D, e) ? D[e] : Main.eval(e)
else
#HACK: use JuliaInterpreter to avoid eval during precompilation
#Unitful.uparse(s)
# https://github.com/PainterQubits/Unitful.jl/issues/649
e = Meta.parse(s)
if isa(e, Symbol)
Unitful.lookup_units(Unitful, e)
else
JuliaInterpreter.finish_and_return!(JuliaInterpreter.Frame(Unitful, e))
end
end
end
u(m) = missing
U = u.(M)
DataFrames.rename(unitfy(df, U), N...)
end
unitfy(df::AbstractDataFrame, ::Nothing) = df
deunitfy(df::AbstractDataFrame) = DataFrame(((hasunit(u) ? "$n ($u)" : n) => deunitfy(df[!, n]) for (n, u) in zip(names(df), unittype(df)))...)
export unitfy, deunitfy
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 2286 | mutable struct Accumulate{V,T,R} <: State{V}
value::V
time::T
rate::R
reset::Bool
end
Accumulate(; unit, time, timeunit, _value, _type, _...) = begin
U = value(unit)
V = valuetype(_type, U)
v = _value
#V = promote_type(V, typeof(v))
TU = value(timeunit)
t = unitfy(value(time), TU)
T = typeof(t)
RU = rateunittype(U, TU)
R = valuetype(_type, RU)
Accumulate{V,T,R}(v, t, zero(R), false)
end
supportedtags(::Val{:Accumulate}) = (:unit, :init, :time, :timeunit, :reset, :min, :max, :when)
constructortags(::Val{:Accumulate}) = (:unit, :init, :time, :timeunit, :reset)
@generated rateunit(::Accumulate{V,T,R}) where {V,T,R} = unittype(R)
updatetags!(d, ::Val{:Accumulate}; _...) = begin
N = d[:_type]
U = d[:unit]
!haskey(d, :init) && (d[:init] = @q zero($N))
!haskey(d, :time) && (d[:time] = :(context.clock.time))
#TODO: automatic inference without explicit `timeunit` tag
!haskey(d, :timeunit) && (d[:timeunit] = @q $C.timeunittype($U, $C.timeunit(__Context__)))
!haskey(d, :reset) && (d[:reset] = false)
end
genvartype(v::VarInfo, ::Val{:Accumulate}; N, U, V, _...) = begin
TU = gettag(v, :timeunit)
T = @q $C.valuetype(Float64, $TU)
RU = @q $C.rateunittype($U, $TU)
R = @q $C.valuetype($N, $RU)
@q Accumulate{$V,$T,$R}
end
gendefault(v::VarInfo, ::Val{:Accumulate}) = begin
i = gettag(v, :init)
u = gettag(v, :unit)
@q $C.unitfy($C.value($i), $C.value($u))
end
genupdate(v::VarInfo, ::Val{:Accumulate}, ::MainStep; kw...) = begin
@gensym s a0 t t0 a
@q let $s = $(symstate(v)),
$a0 = $s.reset ? $(gendefault(v)) : $s.value,
$t = $C.value($(gettag(v, :time))),
$t0 = $s.time,
$a = $a0 + $s.rate * ($t - $t0)
$(genstore(v, a; unitfy=false, minmax=true, round=false, when=false))
$s.time = $t
end
end
genupdate(v::VarInfo, ::Val{:Accumulate}, ::PostStep; kw...) = begin
w = gettag(v, :when)
f = isnothing(w) ? genbody(v) : @q $C.value($w) ? $(genbody(v)) : zero($(gettag(v, :_type)))
@gensym s r e
@q let $s = $(symstate(v)),
$r = $C.unitfy($f, $C.rateunit($s)),
$e = $C.value($(gettag(v, :reset)))
$s.rate = $r
$s.reset = $e
end
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 811 | mutable struct Advance{T} <: State{T}
value::T
t::T
Δt::T
end
Advance(; init=nothing, step=nothing, unit, _type, _...) = begin
U = value(unit)
T = valuetype(_type, U)
t = isnothing(init) ? zero(T) : unitfy(value(init), U)
Δt = isnothing(step) ? oneunit(T) : unitfy(value(step), U)
#T = promote_type(typeof(t), typeof(Δt))
Advance{T}(t, t, Δt)
end
supportedtags(::Val{:Advance}) = (:init, :step, :unit)
constructortags(::Val{:Advance}) = (:init, :step, :unit)
genvartype(v::VarInfo, ::Val{:Advance}; V, _...) = @q Advance{$V}
gendefault(v::VarInfo, ::Val{:Advance}) = missing
genupdate(v::VarInfo, ::Val{:Advance}, ::MainStep; kw...) = begin
@gensym s t
@q let $s = $(symstate(v)),
$t = $s.t
$s.t += $s.Δt
$C.store!($s, $t)
end
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 5182 | mutable struct Bisect{V,E} <: State{V}
value::V
step::Symbol
N::Int
a::V
b::V
c::V
d::V
fa::E
fb::E
fc::E
end
Bisect(; unit, evalunit, _type, _...) = begin
V = valuetype(_type, value(unit))
E = valuetype(_type, value(evalunit))
v = zero(V)
e = zero(E)
Bisect{V,E}(v, :z, 0, v, v, v, v, e, e, e)
end
supportedtags(::Val{:Bisect}) = (:unit, :evalunit, :lower, :upper, :maxiter, :tol, :min, :max)
constructortags(::Val{:Bisect}) = (:unit, :evalunit)
@generated evalunit(s::Bisect{V,E}) where {V,E} = unittype(E)
updatetags!(d, ::Val{:Bisect}; _...) = begin
!haskey(d, :lower) && haskey(d, :min) && (d[:lower] = d[:min])
!haskey(d, :upper) && haskey(d, :max) && (d[:upper] = d[:max])
!haskey(d, :evalunit) && (d[:evalunit] = d[:unit])
!haskey(d, :maxiter) && (d[:maxiter] = 150)
!haskey(d, :tol) && (d[:tol] = 0.00001)
end
genvartype(v::VarInfo, ::Val{:Bisect}; N, V, _...) = begin
EU = gettag(v, :evalunit)
E = @q $C.valuetype($N, $EU)
@q Bisect{$V,$E}
end
gendefault(v::VarInfo, ::Val{:Bisect}) = nothing
#HACK: needs update in case min/max variables changed during bisection loop
#TODO: other variables wanting to use min/max would require similar work
genupdate(v::VarInfo, ::Val{:Bisect}, ::PreStep; kw...) = begin
@gensym s d
@q let $s = $(symstate(v)),
$d = $(genminmax(v, @q $C.value($s)))
$C.store!($s, $d)
end
end
genupdate(v::VarInfo, ::Val{:Bisect}, ::MainStep; kw...) = begin
maxiter = gettag(v, :maxiter)
tol = gettag(v, :tol)
lstart = symlabel(v, PreStep())
lrepeat = symlabel(v, MainStep(), :__repeat)
lexit = symlabel(v, MainStep(), :__exit)
@gensym s u Δ
@q let $s = $(symstate(v))
if $s.step == :z
$s.N = 1
$u = $C.value($(gettag(v, :unit)))
$s.a = $(genminmax(v, @q $C.unitfy($C.value($(gettag(v, :lower))), $u)))
$s.b = $(genminmax(v, @q $C.unitfy($C.value($(gettag(v, :upper))), $u)))
$s.c = zero($s.c)
$s.d = $s.b - $s.a
$s.fa = zero($s.fa) * NaN
$s.fb = zero($s.fb) * NaN
$s.fc = zero($s.fc) * NaN
$s.step = :a
$C.store!($s, $s.a)
@goto $lrepeat
elseif $s.step == :a
$s.fa = $(genbody(v))
if $s.fa ≈ zero($s.fa)
@debug "bisect: $($v.name) [$($s.N)] found! $($C.value($s))"
@goto $lexit
elseif isnan($s.fa)
@warn "bisect: $($v.name) [$($s.N)] $($s.a) => $($s.fa)"
else
@debug "bisect: $($v.name) [$($s.N)] $($s.a) => $($s.fa)"
end
$s.step = :b
$C.store!($s, $s.b)
@goto $lrepeat
elseif $s.step == :b
$s.fb = $(genbody(v))
if $s.fb ≈ zero($s.fb)
@debug "bisect: $($v.name) [$($s.N)] found! $($C.value($s))"
@goto $lexit
elseif isnan($s.fb)
@warn "bisect: $($v.name) [$($s.N)] $($s.b) => $($s.fb)"
else
@debug "bisect: $($v.name) [$($s.N)] $($s.b) => $($s.fb)"
end
if sign($s.fa) == sign($s.fb)
#HACK: try expanding bracket
#$s.N += round(Int, 0.1*$maxiter)
$Δ = $s.d / 2
#HACK: reduce redundant unitfy when generating min/max clipping
#TODO: check no expansion case where Δ gets clipped by min/max
$s.a = $(genminmax(v, @q $s.a - $Δ))
$s.b = $(genminmax(v, @q $s.b + $Δ))
@debug "bisect: $($v.name) [$($s.N)] $($s.a) <- a, b -> $($s.b) "
$s.step = :a
$C.store!($s, $s.a)
@goto $lrepeat
end
$s.c = ($s.a + $s.b) / 2
$C.store!($s, $s.c)
$s.step = :c
@goto $lrepeat
elseif $s.step == :c
$s.fc = $(genbody(v))
if isnan($s.fc)
@warn "bisect: $($v.name) [$($s.N)] $($s.c) => $($s.fc)"
else
@debug "bisect: $($v.name) [$($s.N)] $($s.c) => $($s.fc)"
end
if $s.fc ≈ zero($s.fc) || ($s.b - $s.a) / $s.d < $tol
@debug "bisect: $($v.name) [$($s.N)] finished! $($C.value($s))"
@goto $lexit
end
if sign($s.fc) == sign($s.fa)
$s.a = $s.c
$s.fa = $s.fc
@debug "bisect: $($v.name) [$($s.N)] a <- $($s.c)"
else
$s.b = $s.c
$s.fb = $s.fc
@debug "bisect: $($v.name) [$($s.N)] b <- $($s.c)"
end
$s.c = ($s.a + $s.b) / 2
$C.store!($s, $s.c)
@goto $lrepeat
end
@label $lrepeat
if $s.N <= $maxiter
$s.N += 1
@goto $lstart
else
@warn "bisect: $($v.name) [$($s.N)] convergence failed!" a=$s.a b=$s.b c=$s.c fa=$s.fa fb=$s.fb fc=$s.fc d=$s.d $(v.name)=$C.value($s)
end
@label $lexit
$s.step = :z
$C.value($s)
end
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 756 | struct Bring{V} <: State{V}
value::V
end
Bring(; _value, kw...) = begin
v = _value
V = typeof(v)
V(; kw...)
end
supportedtags(::Val{:Bring}) = (:parameters, :override)
constructortags(::Val{:Bring}) = ()
genvartype(v::VarInfo, ::Val{:Bring}; V, _...) = V
genupdate(v::VarInfo, ::Val{:Bring}, t::PreStep; kw...) = genupdate(v, Val(nothing), t; kw...)
genupdate(v::VarInfo, ::Val{:Bring}, t::MainStep; kw...) = genupdate(v, Val(nothing), t; kw...)
genupdate(v::VarInfo, ::Val{:Bring}, t::PostStep; kw...) = genupdate(v, Val(nothing), t; kw...)
genupdate(v::VarInfo, ::Val{:Bring}, t::PreStage; kw...) = genupdate(v, Val(nothing), t; kw...)
genupdate(v::VarInfo, ::Val{:Bring}, t::PostStage; kw...) = genupdate(v, Val(nothing), t; kw...)
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 1875 | #TODO: consider MethodWrapper: https://julialang.zulipchat.com/#narrow/stream/274208-helpdesk-.28published.29/topic/Constraining.20method.20signatures/near/262358928
#TODO: use vendored FunctionWrappers for now: https://github.com/JuliaLang/julia/issues/40187
include("../../lib/FunctionWrappers/FunctionWrappers.jl")
using .FunctionWrappers: FunctionWrapper
struct Call{V,F<:FunctionWrapper} <: State{V}
value::F
end
Call(; unit, _value, _type, _calltype, _...) = begin
V = valuetype(_type, value(unit))
F = _calltype
Call{V,F}(_value)
end
(c::Call)(a...) = value(c)(a...)
#HACK: for debugging with automatic unitfy
call(c::Call{V,FunctionWrapper{V,T}}, args...) where {V,T<:Tuple} = begin
U = [unittype(t) for (a, t) in zip(args, T.types)]
value(c)([unitfy(a, u) for (a, u) in zip(args, U)]...)
end
supportedtags(::Val{:Call}) = (:unit,)
constructortags(::Val{:Call}) = (:unit,)
#HACK: showing s.value could trigger StackOverflowError
Base.show(io::IO, s::Call) = print(io, "<call>")
gencallargtype(t) = isnothing(t) ? :Float64 : esc(t)
updatetags!(d, ::Val{:Call}; kwargs, _...) = begin
#FIXME: lower duplicate efforts in genvartype()
N = d[:_type]
U = d[:unit]
V = @q $C.valuetype($N, $U)
extract(a) = let k, t, u
@capture(a, k_::t_(u_) | k_::t_ | k_(u_) | k_)
@q $C.valuetype($(gencallargtype(t)), $u)
end
F = @q FunctionWrapper{$V, Tuple{$(extract.(kwargs)...)}}
d[:_calltype] = F
end
genvartype(v::VarInfo, ::Val{:Call}; V, _...) = begin
extract(a) = let k, t, u
@capture(a, k_::t_(u_) | k_::t_ | k_(u_) | k_)
@q $C.valuetype($(gencallargtype(t)), $u)
end
F = @q FunctionWrapper{$V, Tuple{$(extract.(v.kwargs)...)}}
@q Call{$V,$F}
end
gendefault(v::VarInfo, ::Val{:Call}) = genfunc(v)
genupdate(v::VarInfo, ::Val{:Call}, ::MainStep; kw...) = genvalue(v)
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 1834 | mutable struct Capture{V,T,R} <: State{V}
value::V
time::T
rate::R
end
Capture(; unit, time, timeunit, _type, _...) = begin
U = value(unit)
V = valuetype(_type, U)
v = unitfy(zero(_type), U)
#V = promote_type(V, typeof(v))
TU = value(timeunit)
t = unitfy(value(time), TU)
T = typeof(t)
RU = rateunittype(U, TU)
R = valuetype(_type, RU)
Capture{V,T,R}(v, t, zero(R))
end
supportedtags(::Val{:Capture}) = (:unit, :time, :timeunit, :when)
constructortags(::Val{:Capture}) = (:unit, :time, :timeunit)
@generated rateunit(s::Capture{V,T,R}) where {V,T,R} = unittype(R)
updatetags!(d, ::Val{:Capture}; _...) = begin
!haskey(d, :time) && (d[:time] = :(context.clock.time))
#TODO: automatic inference without explicit `timeunit` tag
!haskey(d, :timeunit) && (d[:timeunit] = @q $C.timeunittype($(d[:unit]), $C.timeunit(__Context__)))
end
genvartype(v::VarInfo, ::Val{:Capture}; N, U, V, _...) = begin
TU = gettag(v, :timeunit)
T = @q $C.valuetype(Float64, $TU)
RU = @q $C.rateunittype($U, $TU)
R = @q $C.valuetype($N, $RU)
@q Capture{$V,$T,$R}
end
gendefault(v::VarInfo, ::Val{:Capture}) = nothing
genupdate(v::VarInfo, ::Val{:Capture}, ::MainStep; kw...) = begin
@gensym s t t0 d
@q let $s = $(symstate(v)),
$t = $C.value($(gettag(v, :time))),
$t0 = $s.time,
$d = $s.rate * ($t - $t0)
$C.store!($s, $d)
end
end
genupdate(v::VarInfo, ::Val{:Capture}, ::PostStep; kw...) = begin
w = gettag(v, :when)
f = isnothing(w) ? genbody(v) : @q $C.value($w) ? $(genbody(v)) : zero($(gettag(v, :_type)))
@gensym s t r
@q let $s = $(symstate(v)),
$t = $C.value($(gettag(v, :time))),
$r = $C.unitfy($f, $C.rateunit($s))
$s.time = $t
$s.rate = $r
end
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 1522 | mutable struct Drive{V} <: State{V}
value::V
array::Vector{V}
tick::Int
end
Drive(; tick, unit, _value, _type, _...) = begin
t = value(tick)
U = value(unit)
V = valuetype(_type, U)
a = _value
v = a[1]
Drive{V}(v, a, t)
end
supportedtags(::Val{:Drive}) = (:tick, :unit, :from, :by, :parameter, :override)
constructortags(::Val{:Drive}) = (:tick, :unit)
updatetags!(d, ::Val{:Drive}; _...) = begin
!haskey(d, :tick) && (d[:tick] = :(context.clock.tick))
end
genvartype(v::VarInfo, ::Val{:Drive}; V, _...) = @q Drive{$V}
gendefault(v::VarInfo, ::Val{:Drive}) = begin
s = gettag(v, :from)
x = if isnothing(s)
k = gettag(v, :by)
!isnothing(k) && error("missing `from` provider for `by` = $k")
istag(v, :parameter) ? genparameter(v) : genbody(v)
else
istag(v, :parameter) && error("`parameter` is not allowed with provider: $s")
!isnothing(v.body) && error("function body is not allowed with provider: $s\n$(v.body)")
#HACK: needs quot if key is a symbol from VarInfo name
k = gettag(v, :by, Meta.quot(v.name))
@q $C.value($s)[!, $k]
end
u = gettag(v, :unit)
@q $C.unitfy($x, $C.value($u))
end
genupdate(v::VarInfo, ::Val{:Drive}, ::MainStep; kw...) = begin
t = gettag(v, :tick)
@gensym s t0 t1 i e
@q let $s = $(symstate(v)),
$t0 = $s.tick,
$t1 = $C.value($t),
$i = $t1 - $t0 + 1,
$e = $s.array[$i]
$C.store!($s, $e)
end
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 864 | mutable struct Flag{Bool} <: State{Bool}
value::Bool
end
Flag(; _value, _...) = begin
Flag{Bool}(_value)
end
supportedtags(::Val{:Flag}) = (:parameter, :override, :extern, :once, :when)
constructortags(::Val{:Flag}) = ()
genvartype(v::VarInfo, ::Val{:Flag}; _...) = @q Flag{Bool}
gendefault(v::VarInfo, ::Val{:Flag}) = istag(v, :parameter) ? genparameter(v) : false
genupdate(v::VarInfo, ::Val{:Flag}, ::MainStep; kw...) = begin
@gensym s f q
if istag(v, :override, :parameter)
nothing
elseif istag(v, :once)
@q let $s = $(symstate(v))
if !$C.value($s)
let $f = $(genbody(v))
$C.store!($s, $f)
end
end
end
else
@q let $s = $(symstate(v)),
$f = $(genbody(v))
$C.store!($s, $f)
end
end
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 557 | struct Hold{Any} <: State{Any}
name::Symbol
alias::Union{Symbol,Nothing}
end
Hold(; _name, _alias, _...) = begin
Hold{Any}(_name, _alias)
end
supportedtags(::Val{:Hold}) = ()
constructortags(::Val{:Hold}) = ()
value(s::Hold) = error("cannot read variable on hold: $(s.name) $(isnothing(s.alias) ? "" : "($(s.alias))")")
store!(s::Hold, _) = error("cannot store variable on hold: $(s.name) $(isnothing(s.alias) ? "" : "($(s.alias))")")
genvartype(v::VarInfo, ::Val{:Hold}; _...) = @q Hold{Any}
gendefault(v::VarInfo, ::Val{:Hold}) = nothing
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 1030 | import QuadGK
mutable struct Integrate{V} <: State{V}
value::V
end
Integrate(; unit, _type, _...) = begin
V = valuetype(_type, value(unit))
v = zero(V)
Integrate{V}(v)
end
supportedtags(::Val{:Integrate}) = (:unit, :from, :to)
constructortags(::Val{:Integrate}) = (:unit,)
updatetags!(d, ::Val{:Integrate}; _...) = begin
!haskey(d, :from) && (d[:from] = @q zero($(d[:_type])))
!haskey(d, :to) && (d[:to] = @q zero($(d[:_type])))
end
genvartype(v::VarInfo, ::Val{:Integrate}; V, _...) = @q Integrate{$V}
gendefault(v::VarInfo, ::Val{:Integrate}) = nothing
genupdate(v::VarInfo, ::Val{:Integrate}, ::MainStep; kw...) = begin
kwarg = only(v.kwargs)
u = extractfunckwargtuple(kwarg)[3]
@gensym s a b f i
@q let $s = $(symstate(v)),
$a = $C.unitfy($C.value($(gettag(v, :from))), $u),
$b = $C.unitfy($C.value($(gettag(v, :to))), $u),
$f = $(genfunc(v; unitfy=false))
$i = $C.QuadGK.quadgk($f, $a, $b) |> first
$C.store!($s, $i)
end
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 1449 | using Interpolations: linear_interpolation, Extrapolation
using DataStructures: OrderedDict
struct Interpolate{V} <: State{V}
#TODO: check performance of non-concrete type
value::Extrapolation{V}
end
Interpolate(; unit, knotunit, reverse, _value, _type, _...) = begin
l = if _value isa Extrapolation
i = _value.itp
if reverse
#HACK: reverse interpolation
zip(i.coefs, i.knots[1])
else
zip(i.knots[1], i.coefs)
end
else
if _value isa Matrix
zip(_value[:, 1], _value[:, 2])
else
_value
end
end
d = OrderedDict(l)
sort!(d)
K = unitfy(collect(keys(d)), value(knotunit))
V = unitfy(collect(values(d)), value(unit))
v = linear_interpolation(K, V)
#HACK: pick up unitfy-ed valuetype
V = typeof(v).parameters[1]
Interpolate{V}(v)
end
supportedtags(::Val{:Interpolate}) = (:unit, :knotunit, :reverse, :parameter)
constructortags(::Val{:Interpolate}) = (:unit, :knotunit, :reverse)
updatetags!(d, ::Val{:Interpolate}; _...) = begin
!haskey(d, :reverse) && (d[:reverse] = false)
!haskey(d, :knotunit) && (d[:knotunit] = missing)
end
genvartype(v::VarInfo, ::Val{:Interpolate}; V, U, _...) = @q Interpolate{$V}
gendefault(v::VarInfo, ::Val{:Interpolate}) = gendefaultvalue(v, parameter=true, unitfy=false)
genupdate(v::VarInfo, ::Val{:Interpolate}, ::MainStep; kw...) = nothing
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 898 | struct Preserve{V} <: State{V}
value::V
end
# Preserve is the only State that can store value `nothing`
Preserve(; unit, optional, _value, _type, _...) = begin
U = value(unit)
V = valuetype(_type, U)
v = _value
#V = promote_type(V, typeof(v))
V = optional ? Union{V,Nothing} : V
Preserve{V}(v)
end
supportedtags(::Val{:Preserve}) = (:unit, :optional, :parameter, :override, :extern, :ref, :min, :max, :round)
constructortags(::Val{:Preserve}) = (:unit, :optional)
updatetags!(d, ::Val{:Preserve}; _...) = begin
!haskey(d, :optional) && (d[:optional] = false)
end
genvartype(v::VarInfo, ::Val{:Preserve}; V, _...) = begin
if istag(v, :optional)
V = @q Union{$V,Nothing}
end
@q Preserve{$V}
end
gendefault(v::VarInfo, ::Val{:Preserve}) = gendefaultvalue(v, parameter=true)
genupdate(v::VarInfo, ::Val{:Preserve}, ::MainStep; kw...) = nothing
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 3850 | struct Production{S<:System}
type::Type{S}
args
end
Base.iterate(p::Production) = (p, nothing)
Base.iterate(p::Production, ::Nothing) = nothing
Base.eltype(::Type{Production}) = Production
mutable struct Produce{P,V,S} <: State{P}
name::Symbol # used in recurisve collecting in collect()
value::V
productions::Vector{Production{<:S}}
end
Produce(; _name, _type, _...) = begin
T = _type
#TODO: maybe now we can get rid of V
if T <: System
P = T
V = Union{T,Nothing}
v = nothing
S = typefor(P)
elseif T <: Vector{<:System}
P = T
V = T
v = V[]
S = typefor(eltype(P))
end
Produce{P,V,S}(_name, v, Production{S}[])
end
supportedtags(::Val{:Produce}) = (:single, :when)
constructortags(::Val{:Produce}) = ()
produce(s::Type{<:System}; args...) = Production(typefor(s), args)
produce(::Nothing; args...) = nothing
unittype(s::Produce) = nothing
Base.getindex(s::Produce{<:System}, i::Int) = i == 1 ? s.value : throw(BoundsError(s, i))
Base.length(s::Produce{<:System}) = isnothing(s.value) ? 0 : 1
Base.iterate(s::Produce{<:System}) = isempty(s) ? nothing : (s.value, nothing)
Base.iterate(s::Produce{<:System}, ::Nothing) = nothing
Base.eltype(::Type{Produce{S}}) where {S<:System} = S
Base.getindex(s::Produce{<:Vector}, i::Int) = getindex(s.value, i)
Base.getindex(s::Produce{<:Vector}, ::Nothing) = s
Base.length(s::Produce{<:Vector}) = length(s.value)
Base.iterate(s::Produce{<:Vector}, i=1) = i > length(s) ? nothing : (s[i], i+1)
Base.eltype(::Type{<:Produce{Vector{S}}}) where {S<:System} = S
Base.isempty(s::Produce) = length(s) == 0
priority(::Type{<:Produce}) = PrePriority()
produce!(s::Produce, ::Nothing) = nothing
produce!(s::Produce, p::Production) = push!(s.productions, p)
produce!(s::Produce, P::Vector) = produce!.(Ref(s), P)
produce!(s::Produce, p::System) = (s.value = p)
produce!(s::Produce{V}, p::System) where {V<:Vector} = push!(s.value, p)
export produce
updatetags!(d, ::Val{:Produce}; _...) = begin
#HACK: fragile type string check
d[:single] = !occursin("Vector{", string(d[:_type]))
end
genvartype(v::VarInfo, ::Val{:Produce}; V, _...) = begin
if istag(v, :single)
@q Produce{$V,Union{$V,Nothing},typefor($V)}
else
@q Produce{$V,$V,typefor(eltype($V))}
end
end
gendefault(v::VarInfo, ::Val{:Produce}) = nothing
genupdate(v::VarInfo, ::Val{:Produce}, ::PreStage; kw...) = begin
@gensym s a P c p b
@q let $s = $(symstate(v)),
$a = $C.value($s),
$P = $s.productions,
$c = context
for $p in $P
$b = $p.type(; context=$c, $p.args...)
$C.produce!($s, $b)
end
empty!($P)
$C.update!($a, $C.PreStage())
end
end
# Produce referenced in args expected to be raw state, not extracted by value(), for querying
genupdate(v::VarInfo, ::Val{:Produce}, ::PreStep; kw...) = symstate(v)
genupdate(v::VarInfo, ::Val{:Produce}, ::MainStep; kw...) = begin
@gensym s a
@q let $s = $(symstate(v)),
$a = $C.value($s)
$C.update!($a)
$s
end
end
genupdate(v::VarInfo, ::Val{:Produce}, ::PostStep; kw...) = begin
@gensym s P
q = if istag(v, :single)
@q let $s = $(symstate(v))
if isempty($s)
let $P = $(genbody(v))
$C.produce!($s, $P)
end
end
end
else
@q let $s = $(symstate(v)),
$P = $(genbody(v))
$C.produce!($s, $P)
end
end
w = gettag(v, :when)
isnothing(w) ? q : @q if $C.value($w); $q end
end
genupdate(v::VarInfo, ::Val{:Produce}, ::PostStage; kw...) = begin
@gensym s a
@q let $s = $(symstate(v)),
$a = $C.value($s)
$C.update!($a, $C.PostStage())
end
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 1263 | using DataFrames: DataFrame
import CSV
mutable struct Provide{V} <: State{V}
value::V
end
Provide(; index, init, step, autounit, _value, _type, _...) = begin
i = value(index)
t = value(init)
Δt = value(step)
df = DataFrame(_value isa String ? CSV.File(_value) : _value)
df = autounit ? unitfy(df) : df
v = filter!(r -> r[i] >= t && iszero(typeof(Δt)(r[i] - t) % Δt), df)
v[1, i] != t && error("incompatible index for initial time = $t\n$v")
!all(isequal(Δt), diff(v[!, i])) && error("incompatible index for time step = $Δt\n$v")
V = _type
Provide{V}(v)
end
supportedtags(::Val{:Provide}) = (:index, :init, :step, :autounit, :parameter)
constructortags(::Val{:Provide}) = (:index, :init, :step, :autounit)
updatetags!(d, ::Val{:Provide}; _...) = begin
!haskey(d, :index) && (d[:index] = QuoteNode(:index))
!haskey(d, :init) && (d[:init] = :(context.clock.time))
!haskey(d, :step) && (d[:step] = :(context.clock.step))
!haskey(d, :autounit) && (d[:autounit] = true)
end
genvartype(v::VarInfo, ::Val{:Provide}; V, _...) = @q Provide{$V}
gendefault(v::VarInfo, ::Val{:Provide}) = istag(v, :parameter) ? genparameter(v) : genbody(v)
genupdate(v::VarInfo, ::Val{:Provide}, ::MainStep; kw...) = nothing
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 902 | mutable struct Remember{V} <: State{V}
value::V
done::Bool
end
Remember(; unit, _value, _type, _...) = begin
U = value(unit)
V = valuetype(_type, U)
v = _value
Remember{V}(v, false)
end
supportedtags(::Val{:Remember}) = (:unit, :init, :when)
constructortags(::Val{:Remember}) = (:unit,)
genvartype(v::VarInfo, ::Val{:Remember}; V, _...) = @q Remember{$V}
gendefault(v::VarInfo, ::Val{:Remember}) = begin
N = gettag(v, :_type)
U = gettag(v, :unit)
i = gettag(v, :init)
if isnothing(i)
@q zero($C.valuetype($N, $U))
else
@q $C.unitfy($C.value($i), $C.value($U))
end
end
genupdate(v::VarInfo, ::Val{:Remember}, ::MainStep; kw...) = begin
w = gettag(v, :when)
@gensym s
@q let $s = $(symstate(v))
if !($s.done) && $C.value($w)
$(genstore(v; when=false))
$s.done = true
end
end
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 3498 | mutable struct Solve{V} <: State{V}
value::V
end
Solve(; unit, _type, _...) = begin
U = value(unit)
V = valuetype(_type, U)
v = zero(V)
Solve{V}(v)
end
supportedtags(::Val{:Solve}) = (:unit, :lower, :upper, :pick)
constructortags(::Val{:Solve}) = (:unit,)
# \Equal opeartor for both solve/bisect
⩵(x, y) = x - y
export ⩵
using SymPy: SymPy, sympy
genpolynomial(v::VarInfo; scope) = begin
x = v.name
V = extractfuncargpair.(v.args) .|> first
push!(V, x)
#HACK: use scope of caller, avoiding eval into precompiled Cropbox module
p = scope.eval(@q let $(V...)
Cropbox.SymPy.@syms $(V...)
Cropbox.sympy.Poly(begin
$(v.body)
end, $x)
end)
Q = p.coeffs()
#HACK: normalize coefficient to avoid runtime unit generation
Q = Q / Q[1] |> reverse .|> SymPy.simplify
Q .|> repr .|> Meta.parse
end
genpolynomialunits(U, n) = [@q($U^$(i-1)) for i in n:-1:1]
import PolynomialRoots
solvepolynomial(p, pu) = begin
sp = [deunitfy(q, qu) for (q, qu) in zip(p, pu)]
r = PolynomialRoots.roots(sp)
real.(filter!(isreal, r))
end
solvequadratic(a, b, c) = begin
(a == 0) && return (-c/b,)
Δ = b^2 - 4a*c
if Δ > 0
s = √Δ
((-b - s) / 2a, (-b + s) / 2a)
elseif Δ == 0
(-b/2a,)
else
#HACK: make up a non-complex solution
x = -b/2a
@warn "ignore complex roots for quadratic equation: $a*x^2 + $b*x + $c = 0" x
(x,)
end
end
updatetags!(d, ::Val{:Solve}; _...) = begin
!haskey(d, :lower) && (d[:lower] = -Inf)
!haskey(d, :upper) && (d[:upper] = Inf)
!haskey(d, :pick) && (d[:pick] = QuoteNode(:maximum))
end
genvartype(v::VarInfo, ::Val{:Solve}; V, _...) = @q Solve{$V}
gendefault(v::VarInfo, ::Val{:Solve}) = nothing
genupdate(v::VarInfo, ::Val{:Solve}, ::MainStep; scope, kw...) = begin
U = gettag(v, :unit)
isnothing(U) && (U = @q(u"NoUnits"))
P = genpolynomial(v; scope)
n = length(P)
PU = genpolynomialunits(U, n)
lower = gettag(v, :lower)
upper = gettag(v, :upper)
pick = gettag(v, :pick).value
body = if n == 2 # linear
@gensym a b xl xu
@q let $a = $(esc(P[2])),
$b = $(esc(P[1])),
$xl = $C.unitfy($C.value($lower), $U),
$xu = $C.unitfy($C.value($upper), $U)
clamp(-$b / $a, $xl, $xu)
end
elseif n == 3 # quadratic
@gensym a b c X xl xu l
@q let $a = $C.deunitfy($(esc(P[3])), $(PU[3])),
$b = $C.deunitfy($(esc(P[2])), $(PU[2])),
$c = $C.deunitfy($(esc(P[1])), $(PU[1])),
$X = $C.unitfy($C.solvequadratic($a, $b, $c), $U),
$xl = $C.unitfy($C.value($lower), $U),
$xu = $C.unitfy($C.value($upper), $U)
#TODO: remove duplication
$l = filter(x -> $xl <= x <= $xu, $X)
isempty($l) && ($l = clamp.($X, $xl, $xu))
$l |> $pick
end
else # generic polynomials (slow!)
@gensym X xl xu l
@q let $X = $C.unitfy($C.solvepolynomial([$(esc.(P)...)], [$(PU...)]), $U),
$xl = $C.unitfy($C.value($lower), $U),
$xu = $C.unitfy($C.value($upper), $U)
$l = filter(x -> $xl <= x <= $xu, $X)
#TODO: better report error instead of silent clamp?
isempty($l) && ($l = clamp.($X, $xl, $xu))
$l |> $pick
end
end
val = genbody(v, body)
genstore(v, val)
end
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 2304 | using DataStructures: OrderedDict
abstract type Tabulation{V} end
value(t::Tabulation) = getfield(t, :value)
Base.adjoint(t::Tabulation) = value(t)
Base.getproperty(t::Tabulation, k::Symbol) = value(t)[k]
Base.getindex(t::Tabulation, k::Symbol) = getproperty(t, k)
Base.iterate(t::Tabulation) = iterate(value(t))
Base.iterate(t::Tabulation, i) = iterate(value(t), i)
struct TabulationCols{V} <: Tabulation{V}
value::OrderedDict{Symbol,V}
TabulationCols{V}(x...) where V = new{V}(OrderedDict(x...))
end
struct TabulationRows{V} <: Tabulation{V}
value::OrderedDict{Symbol,TabulationCols{V}}
TabulationRows{V}(x...) where V = new{V}(OrderedDict(x...))
end
import DataFrames: DataFrames, DataFrame
DataFrames.DataFrame(t::TabulationCols) = DataFrame(value(t))
DataFrames.DataFrame(t::TabulationRows; index=true) = begin
v = value(t)
I = DataFrame("" => collect(keys(v)))
C = DataFrame(value.(values(v)))
index ? [I C] : C
end
Base.Matrix(t::TabulationRows) = Matrix(DataFrame(t; index=false))
Base.show(io::IO, t::TabulationCols) = show(io, DataFrame(t); summary=false, eltypes=false, show_row_number=false, vlines=:none)
Base.show(io::IO, t::TabulationRows) = show(io, DataFrame(t); summary=false, eltypes=false, show_row_number=false, vlines=[1])
tabulation(m, R, C, V) = TabulationRows{V}(R .=> [TabulationCols{V}(zip(C, V.(m[i,:]))) for i in 1:size(m, 1)])
struct Tabulate{V} <: State{V}
value::TabulationRows{V}
rows::Tuple{Vararg{Symbol}}
columns::Tuple{Vararg{Symbol}}
end
Base.getproperty(t::Tabulate, k::Symbol) = t[k]
Base.show(io::IO, t::Tabulate) = show(io, Matrix(value(t)))
Base.show(io::IO, ::MIME"text/plain", t::Tabulate) = show(io, value(t))
Tabulate(; unit, rows, columns=(), _value, _type, _...) = begin
U = value(unit)
V = valuetype(_type, U)
columns = isempty(columns) ? rows : columns
v = tabulation(_value, rows, columns, V)
Tabulate{V}(v, rows, columns)
end
supportedtags(::Val{:Tabulate}) = (:unit, :rows, :columns, :parameter)
constructortags(::Val{:Tabulate}) = (:unit, :rows, :columns)
genvartype(v::VarInfo, ::Val{:Tabulate}; V, _...) = @q Tabulate{$V}
gendefault(v::VarInfo, ::Val{:Tabulate}) = gendefaultvalue(v, parameter=true)
genupdate(v::VarInfo, ::Val{:Tabulate}, ::MainStep; kw...) = nothing
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 499 | mutable struct Track{V} <: State{V}
value::V
end
Track(; unit, _value, _type, _...) = begin
U = value(unit)
V = valuetype(_type, U)
v = _value
#V = promote_type(V, typeof(v))
Track{V}(v)
end
supportedtags(::Val{:Track}) = (:unit, :override, :extern, :ref, :skip, :init, :min, :max, :round, :when)
constructortags(::Val{:Track}) = (:unit,)
genvartype(v::VarInfo, ::Val{:Track}; V, _...) = @q Track{$V}
gendefault(v::VarInfo, ::Val{:Track}) = gendefaultvalue(v, init=true)
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 241 | struct Wrap{V} <: State{V}
value::V
end
Wrap(; _value, _...) = begin
v = _value
V = typeof(v)
Wrap{V}(v)
end
supportedtags(::Val{:Wrap}) = ()
constructortags(::Val{:Wrap}) = ()
wrap(v::V) where V = Wrap{V}(v)
export wrap
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 833 | using TimeZones: TimeZones, ZonedDateTime, @tz_str
export ZonedDateTime, @tz_str
using Dates: Dates, Date
export Dates, Date
@system Calendar begin
init ~ preserve::datetime(extern, parameter)
last => nothing ~ preserve::datetime(extern, parameter, optional)
time(t0=init, t=context.clock.time) => t0 + convert(Cropbox.Dates.Second, t) ~ track::datetime
date(time) => Cropbox.Dates.Date(time) ~ track::date
step(context.clock.step) ~ preserve(u"hr")
stop(time, last) => begin
isnothing(last) ? false : (time >= last)
end ~ flag
count(init, last, step) => begin
if isnothing(last)
nothing
else
# number of update!() required to reach `last` time
(last - init) / step
end
end ~ preserve::int(round, optional)
end
export Calendar
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 586 | abstract type Clock <: System end
timeunit(::Type{<:Clock}) = u"hr"
@system Clock{timeunit = timeunit(Clock)} begin
context ~ ::Nothing
config ~ ::Config(override)
init => 0 ~ preserve(unit=timeunit, parameter)
step => 1 ~ preserve(unit=timeunit, parameter)
time => nothing ~ advance(init=init, step=step, unit=timeunit)
tick => nothing ~ advance::int
end
abstract type DailyClock <: Clock end
timeunit(::Type{<:DailyClock}) = u"d"
@system DailyClock{timeunit = timeunit(DailyClock)}(Clock) <: Clock
timeunit(c::C) where {C<:Clock} = timeunit(C)
export Clock
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 670 | @system Context begin
context ~ ::Nothing
config ~ ::Config(override)
clock(config) ~ ::Clock
end
#TODO: remove once Context and Clock get merged
@system DailyContext{Clock => DailyClock}(Context) <: Context
timeunit(c::Context) = timeunit(c.clock)
#HACK: explicitly set up timeunit for default Context
#TODO: merge Context and Clock to remove boilerplates
timeunit(::Type{Context}) = timeunit(Clock)
timeunit(::Type{typefor(Context)}) = timeunit(Context)
#HACK: fallback when timeunit() not available for custom Context
timeunit(C::Type{<:Context}) = only(filter!(v -> v.name == :clock, geninfos(C))).type |> scopeof(C).eval |> timeunit
export Context
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 1273 | import Random
@system Controller begin
config ~ ::Config(override)
context(config) ~ ::Context(context)
end
"""
instance(S; <keyword arguments>) -> S
Make an instance of system `S` with an initial condition specified in configuration and additional options.
See also: [`@config`](@ref), [`simulate`](@ref)
# Arguments
- `S::Type{<:System}`: type of system to be instantiated.
# Keyword Arguments
- `config=()`: configuration containing parameter values for the system.
- `options=()`: keyword arguments passed down to the constructor of `S`; named tuple expected.
- `seed=nothing`: random seed initialized before parsing configuration and making an instance.
# Examples
```julia-repl
julia> @system S(Controller) begin
a => 1 ~ preserve(parameter)
b(a) ~ accumulate
end;
julia> instance(S)
S
context = <Context>
config = <Config>
a = 1.0
b = 0.0
```
"""
instance(S::Type{<:System}; config=(), options=(), seed=nothing) = begin
!isnothing(seed) && Random.seed!(seed)
c = configure(config)
#HACK: support placeholder (0) for the controller name
c = configure(((k == Symbol(0) ? namefor(S) : k) => v for (k, v) in c)...)
s = S(; config=c, options...)
update!(s)
end
export Controller, instance
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 2934 | using DataFrames: DataFrames, DataFrame
import Dates
using TimeZones: TimeZones, ZonedDateTime, @tz_str
using TypedTables: TypedTables, Table
import CSV
@system StoreBase begin
filename => "" ~ preserve::str(parameter)
ik: indexkey => :index ~ preserve::sym(parameter)
i(t=context.clock.tick): index => t + 1 ~ track::int
ix: indexer ~ hold
s: store ~ hold
end
@system DataFrameStore(StoreBase) begin
ix(; r::Cropbox.DataFrames.DataFrameRow): indexer => Cropbox.DataFrames.row(r) ~ call::int
df(filename, ik, ix): dataframe => begin
df = Cropbox.CSV.File(filename) |> Cropbox.DataFrames.DataFrame |> unitfy
df[!, ik] = map(ix, eachrow(df))
df
end ~ preserve::Cropbox.DataFrames.DataFrame(extern, parameter)
gdf(df, ik): grouped_dataframe => begin
Cropbox.DataFrames.groupby(df, ik)
end ~ preserve::Cropbox.DataFrames.GroupedDataFrame{Cropbox.DataFrame}
s(gdf, i): store => begin
gdf[(i,)][1, :]
end ~ track::Cropbox.DataFrames.DataFrameRow{Cropbox.DataFrame,Cropbox.DataFrames.Index}
end
@system DayStore(DataFrameStore) begin
i(context.clock.time): index ~ track::int(u"d")
daykey => :day ~ preserve::sym(parameter)
ix(daykey; r::Cropbox.DataFrames.DataFrameRow): indexer => r[daykey] ~ call::int(u"d")
end
@system DateStore(DataFrameStore) begin
calendar(context) ~ ::Calendar
i(t=calendar.time): index => Cropbox.Dates.Date(t) ~ track::date
datekey => :date ~ preserve::sym(parameter)
ix(datekey; r::Cropbox.DataFrames.DataFrameRow): indexer => r[datekey] ~ call::date
end
@system TimeStore(DataFrameStore) begin
calendar(context) ~ ::Calendar
i(calendar.time): index ~ track::datetime
datekey => :date ~ preserve::sym(parameter)
timekey => :time ~ preserve::sym(parameter)
tz: timezone => Cropbox.tz"UTC" ~ preserve::Cropbox.TimeZones.TimeZone(parameter)
ix(datekey, timekey, tz; r::Cropbox.DataFrames.DataFrameRow): indexer => begin
#HACK: handle ambiguous time conversion under DST
occurrence = 1
i = Cropbox.DataFrames.row(r)
if i > 1
r0 = parent(r)[i-1, :]
r0[timekey] == r[timekey] && (occurrence = 2)
end
dt = Cropbox.Dates.DateTime(r[datekey], r[timekey])
Cropbox.ZonedDateTime(dt, tz, occurrence)
end ~ call::datetime
end
@system TableStore(StoreBase) begin
#TODO: avoid dynamic dispatch on Table/NamedTuple
ix(; i::Int, r::NamedTuple): indexer => i ~ call::int
tb(filename, ik, ix): table => begin
tb = Cropbox.CSV.File(filename) |> Cropbox.TypedTables.FlexTable
setproperty!(tb, ik, map(enumerate(eachrow(tb))) do (i, r)
ix(i, r)
end)
Cropbox.Table(tb)
end ~ preserve::Cropbox.Table(extern, parameter)
s(tb, i): store => tb[i] ~ track::NamedTuple
end
export DataFrameStore, DayStore, DateStore, TimeStore, TableStore
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 1758 | @system ThermalTime begin
T: temperature ~ track(u"°C", override)
Δt(context.clock.step): timestep ~ preserve(u"hr")
ΔT: magnitude ~ track
r(ΔT, Δt): rate => ΔT / Δt ~ track(u"hr^-1")
end
@system GrowingDegree(ThermalTime) begin
Tb: base_temperature ~ preserve(u"°C", extern, parameter)
To: optimal_temperature ~ preserve(u"°C", optional, extern, parameter)
Tx: maximum_temperature ~ preserve(u"°C", optional, extern, parameter)
ΔT(T, Tb, To, Tx): magnitude => begin
T = !isnothing(To) ? min(T, To) : T
T = !isnothing(Tx) && T >= Tx ? Tb : T
T - Tb
end ~ track(u"K", min=0)
r(ΔT, Δt): rate => ΔT / Δt ~ track(u"K/hr")
end
@system BetaFunction(ThermalTime) begin
Tn: minimum_temperature => 0 ~ preserve(u"°C", extern, parameter)
To: optimal_temperature ~ preserve(u"°C", extern, parameter)
Tx: maximum_temperature ~ preserve(u"°C", extern, parameter)
β: beta => 1 ~ preserve(parameter)
ΔT(T, Tn, To, Tx, β): magnitude => begin
# beta function, See Yin et al. (1995), Ag For Meteorol., Yan and Hunt (1999) AnnBot, SK
if (Tn < T < Tx) && (Tn < To < Tx)
Ton = To - Tn
Txo = Tx - To
f = (T - Tn) / Ton
g = (Tx - T) / Txo
α = β * (Ton / Txo)
f^α * g^β
else
0.
end
end ~ track
end
@system Q10Function(ThermalTime) begin
To: optimal_temperature ~ preserve(u"°C", extern, parameter)
Q10 => 2 ~ preserve(extern, parameter)
#FIXME: Q10 isn't actually a thermal function like others (not a rate, check unit)
ΔT(T, To, Q10): magnitude => begin
Q10^((T - To) / 10u"K")
end ~ track
end
export GrowingDegree, BetaFunction, Q10Function
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 6348 | using DataStructures: OrderedDict
import DataFrames
using StatsBase: StatsBase, mean
import Random
import BlackBoxOptim
metricfunc(metric::Symbol) = begin
if metric == :rmse
(E, O) -> √mean((E .- O).^2)
elseif metric == :nrmse
(E, O) -> √mean((E .- O).^2) / mean(O)
elseif metric == :rmspe
(E, O) -> √mean(((E .- O) ./ O).^2)
elseif metric == :mae
(E, O) -> mean(abs.(E .- O))
elseif metric == :mape
(E, O) -> mean(abs.((E .- O) ./ O))
# Nash-Sutcliffe model efficiency coefficient (NSE)
elseif metric == :ef
(E, O) -> 1 - sum((E .- O).^2) / sum((O .- mean(O)).^2)
# Willmott's refined index of agreement (d_r)
elseif metric == :dr
(E, O) -> let a = sum(abs.(E .- O)),
b = 2sum(abs.(O .- mean(O)))
a <= b ? 1 - a/b : b/a - 1
end
else
error("unsupported metric: $metric")
end
end
metricfunc(metric) = metric
#HACK: handle index columns with non-identical, but compatible units
# https://github.com/JuliaData/DataFrames.jl/issues/2486
normalize!(dfs::DataFrame...; on) = begin
n = length(dfs)
is(i::Symbol) = repeat([i], n)
is(i) = collect(i)
for i in on
cols = getindex.(dfs, !, is(i))
elts = eltype.(cols)
t = promote_type(elts...)
for (d, c, e) in zip(dfs, cols, elts)
e != t && setindex!(d, convert.(t, c), !, i)
end
end
end
"""
calibrate(S, obs; <keyword arguments>) -> Config | OrderedDict
Obtain a set of parameters for the given system `S` that simulates provided observation `obs` closely as possible. A multitude of simulations are conducted with a differing combination of parameter sets specified by the range of possible values and the optimum is selected based on a choice of evaluation metric. Internally, differential evolution algorithm from BlackboxOptim.jl is used.
# Arguments
- `S::Type{<:System}`: type of system to be calibrated.
- `obs::DataFrame`: observation data to be used for calibration.
# Keyword Arguments
## Configuration
- `config=()`: a single base configuration for the system (can't be used with `configs`).
- `configs=[]`: multiple base configurations for the system (can't be used with `config`).
## Layout
- `index=nothing`: variables to construct index columns of the output; default falls back to `context.clock.time`.
- `target`: variables to construct non-index columns of the output.
## Calibration
- `parameters`: parameters with a range of boundary values to be calibrated within.
- `metric=nothing`: evaluation metric (`:rmse`, `:nrmse`, `:mae`, `:mape`, `:ef`, `:dr`); default is RMSE.
## Multi-objective
- `weight=nothing`: weights for calibrating multiple targets; default assumes equal weights.
- `pareto=false`: returns a dictionary containing Pareto frontier instead of a single solution satisfying multiple targets.
## Advanced
- `optim=()`: extra options for `BlackBoxOptim.bboptimize`.
Remaining keyword arguments are passed down to `simulate` with regard to running system `S`.
See also: [`simulate`](@ref), [`evaluate`](@ref), [`@config`](@ref)
# Examples
```julia-repl
julia> @system S(Controller) begin
a => 0 ~ preserve(parameter)
b(a) ~ accumulate
end;
julia> obs = DataFrame(time=10u"hr", b=200);
julia> p = calibrate(S, obs; target=:b, parameters=:S => :a => (0, 100), stop=10)
...
Config for 1 system:
S
a = 20.0
```
"""
calibrate(S::Type{<:System}, obs::DataFrame; config=(), configs=[], kwargs...) = begin
if isempty(configs)
calibrate(S, obs, [config]; kwargs...)
elseif isempty(config)
calibrate(S, obs, configs; kwargs...)
else
@error "redundant configurations" config configs
end
end
calibrate(S::Type{<:System}, obs::DataFrame, configs::Vector; index=nothing, target, parameters, metric=nothing, weight=nothing, pareto=false, optim=(), kwargs...) = begin
#HACK: use copy due to normalize!
obs = copy(obs)
P = configure(parameters)
K = parameterkeys(P)
V = parametervalues(P)
U = parameterunits(P)
I = parseindex(index, S) |> keys |> collect
T = parsetarget(target, S) |> keys |> collect
n = length(T)
multi = n > 1
isnothing(metric) && (metric = :rmse)
metric = metricfunc(metric)
IC = [t for t in zip(getproperty.(Ref(obs), I)...)]
IV = parseindex(index, S) |> values |> Tuple
snap(s) = getproperty.(s, IV) .|> value in IC
NT = DataFrames.make_unique([propertynames(obs)..., T...], makeunique=true)
T1 = NT[end-n+1:end]
residual(c) = begin
est = simulate(S; config=c, index, target, snap, verbose=false, kwargs...)
isempty(est) && return repeat([Inf], n)
normalize!(est, obs, on=I)
df = DataFrames.innerjoin(est, obs, on=I, makeunique=true)
r = [metric(df[!, e], df[!, o]) for (e, o) in zip(T, T1)]
end
config(X) = parameterzip(K, X, U)
cost(X) = begin
c = config(X)
l = length(configs)
R = Vector(undef, l)
Threads.@threads for i in 1:l
R[i] = residual(configure(configs[i], c))
end
A = eachrow(hcat(R...)) .|> Iterators.flatten .|> collect |> deunitfy
e = eachrow(hcat(A...)) |> sum
# BlackBoxOptim expects Float64, not even Int
e = Float64.(e)
multi ? Tuple(e) : e[1]
end
range = map((p, u) -> Float64.(Tuple(deunitfy(p, u))), V, U)
method = if multi
agg = isnothing(weight) ? mean : let w = StatsBase.weights(weight); f -> mean(f, w) end
(Method=:borg_moea, FitnessScheme=BlackBoxOptim.ParetoFitnessScheme{n}(aggregator=agg))
else
()
end
optim_default = (;
MaxSteps=5000,
TraceInterval=10,
RandomizeRngSeed=false,
)
#HACK: always initialize random seed first on our end regardless of RandomizeRngSeed option
# https://github.com/robertfeldt/BlackBoxOptim.jl/issues/158
Random.seed!(0)
r = BlackBoxOptim.bboptimize(cost;
SearchRange=range,
method...,
optim_default...,
optim...
)
if multi && pareto
pf = BlackBoxOptim.pareto_frontier(r)
OrderedDict(BlackBoxOptim.fitness.(pf) .=> config.(BlackBoxOptim.params.(pf)))
else
config(BlackBoxOptim.best_candidate(r))
end
end
export calibrate
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 2237 | import Crayons
import Highlights
#HACK: produce an empty string for `stylesheet()` under "text/plain"
function Highlights.Format.render(::IO, ::MIME"text/plain", ::Highlights.Themes.Theme) end
function Highlights.Format.render(io::IO, ::MIME"text/plain", tokens::Highlights.Format.TokenIterator)
for (str, id, style) in tokens
fg = style.fg.active ? map(Int, (style.fg.r, style.fg.g, style.fg.b)) : nothing
bg = style.bg.active ? map(Int, (style.bg.r, style.bg.g, style.bg.b)) : nothing
crayon = Crayons.Crayon(
foreground = fg,
background = bg,
bold = style.bold,
italics = style.italic,
underline = style.underline,
)
print(io, crayon, str, inv(crayon))
end
end
writecodehtml(filename::AbstractString, source; lexer=Highlights.Lexers.JuliaLexer, theme=Highlights.Themes.TangoTheme) = begin
open(filename, "w") do io
print(io, """
<style>
@import url(https://cdn.jsdelivr.net/gh/tonsky/FiraCode@4/distr/fira_code.css);
pre.hljl { font-family: 'Fira Code'; font-size: x-small }
</style>
""")
Highlights.stylesheet(io, MIME("text/html"), theme)
Highlights.highlight(io, MIME("text/html"), source, lexer, theme)
end
end
abstract type TokenColor end
struct SystemColor <: TokenColor end
struct VarColor <: TokenColor end
struct StateColor <: TokenColor end
struct NonStateColor <: TokenColor end
struct MiscColor <: TokenColor end
struct NoColor <: TokenColor end
tokencolor(c::TokenColor; color::Bool) = tokencolor(color ? c : NoColor())
tokencolor(::SystemColor) = Crayons.Box.LIGHT_MAGENTA_FG
tokencolor(::VarColor) = Crayons.Box.LIGHT_BLUE_FG
tokencolor(::StateColor) = Crayons.Box.CYAN_FG
tokencolor(::NonStateColor) = Crayons.Box.LIGHT_GREEN_FG
tokencolor(::MiscColor) = Crayons.Box.DARK_GRAY_FG
tokencolor(::NoColor) = Crayons.Box.DEFAULT_FG
# compatibility for dive() where escapes are mandatory
system_color(s) = tokencolor(SystemColor())(s)
variable_color(s) = tokencolor(VarColor())(s)
state_color(s) = tokencolor(StateColor())(s)
non_state_color(s) = tokencolor(NonStateColor())(s)
misc_color(s) = tokencolor(MiscColor())(s)
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 3983 | import REPL.TerminalMenus
import REPL.Terminals
struct MenuItem{V}
name::String
label::String
value::V
end
name(m::MenuItem) = m.name
label(m::MenuItem) = isempty(m.label) ? m.name : m.label
value(m::MenuItem) = m.value
text(m::MenuItem) = begin
l = label(m)
w = Terminals.width(TerminalMenus.terminal)
#HACK: length(l) contains escape sequences, so actual line may look shorter
v = repr(value(m); context=:maxlength => w - length(l))
isempty(l) ? v : "$l $(misc_color("=")) $v"
end
title(m::MenuItem{<:System}) = "$(system_color(name(m)))"
title(m::MenuItem{<:State}) = "$(state_color(name(m)))"
title(m::MenuItem) = "$(non_state_color(name(m)))"
title(t::Vector{<:MenuItem}) = begin
sep = " $(misc_color(">")) "
join(title.(t), sep)
end
dive(s::System, t) = dive(map(zip(fieldnamesalias(s), s)) do ((n, a), v)
k = string(n)
cn = string(uncanonicalname(n))
ca = string(uncanonicalname(a))
l = "$(variable_color(cn))"
!isnothing(a) && (l *= " $(misc_color("($ca)"))")
MenuItem(k, l, v)
end, t)
dive(s::Vector{<:System}, t) = dive(map(t -> MenuItem(string(t[1]), "", t[2]), enumerate(s)), t)
dive(s::State{<:System}, t) = dive([MenuItem("1", "", s')], t)
dive(s::State{<:Vector}, t) = dive(map(t -> MenuItem(string(t[1]), "", t[2]), enumerate(s')), t)
dive(s::State, t) = dive(t) do io
look(io, MIME("text/plain"), value(t[end-1]), Symbol(name(t[end])))
end
dive(l::Vector{<:MenuItem}, t) = begin
isempty(l) && return
term = TerminalMenus.terminal
o = term.out_stream
i = 1
while true
println(o, title(t))
N = length(l)
M = TerminalMenus.RadioMenu(text.(l); charset=:unicode, pagesize=N)
i = TerminalMenus.request(M; cursor=i)
n = min(N, M.pagesize)
#HACK: for single option menu?
n == 1 && (n += 1)
print(o, repeat("\x1b[9999D\x1b[1A", n+1)) # move up
print(o, "\x1b[J") # clear lines below
if i > 0
v = l[i]
dive(value(v), [t..., v])
else
break
end
end
end
dive(m::MenuItem, t) = dive(value(m), t)
dive(v, t) = dive(t) do io
show(IOContext(io, :limit => true), MIME("text/plain"), v)
end
dive(f::Function, t) = begin
term = TerminalMenus.terminal
i = term.in_stream
o = term.out_stream
b = IOBuffer()
#HACK: dive() assumes color terminal
x = IOContext(b, :color => get(o, :color, true))
println(x, title(t))
f(x)
println(x)
n = countlines(seekstart(b))
print(o, String(take!(b)))
Terminals.raw!(term, true) && print(o, "\x1b[?25l") # hide cursor
c = TerminalMenus.readkey(i)
Terminals.raw!(term, false) && print(o, "\x1b[?25h") # unhide cursor
print(o, repeat("\x1b[9999D\x1b[1A", n)) # move up
print(o, "\x1b[J") # clear lines below
if c == 13 # enter
throw(value(t[end]))
elseif c == 3 # ctrl-c
throw(InterruptException())
end
end
"""
dive(s)
Inspect an instance of system `s` by navigating hierarchy of variables displayed in a tree structure.
Pressing up/down arrow keys allows navigation. Press 'enter' to dive into a deeper level and press 'q' to come back. A leaf node of the tree shows an output of `look` regarding the variable. Pressing 'enter' again would return a variable itself and exit to REPL.
Only works in a terminal environment; not working on Jupyter Notebook.
See also: [`look`](@ref)
# Arguments
- `s::System`: instance of target system.
# Examples
```julia-repl
julia> @system S(Controller) begin
a => 1 ~ preserve(parameter)
end;
julia> s = instance(S);
julia> dive(s)
S
→ context = <Context>
config = <Config>
a = 1.0
```
"""
dive(s::System) = begin
if isdefined(Main, :IJulia) && Main.IJulia.inited
return look(s)
end
try
dive(s, [MenuItem(string(namefor(s)), "", s)])
catch e
!isa(e, InterruptException) && return e
end
nothing
end
export dive
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 5062 | import DataFrames
#TODO: share code with calibrate()
"""
evaluate(S, obs; <keyword arguments>) -> Number | Tuple
Compare output of simulation results for the given system `S` and observation data `obs` with a choice of evaluation metric.
# Arguments
- `S::Type{<:System}`: type of system to be evaluated.
- `obs::DataFrame`: observation data to be used for evaluation.
# Keyword Arguments
## Configuration
- `config=()`: a single configuration for the system (can't be used with `configs`).
- `configs=[]`: multiple configurations for the system (can't be used with `config`).
## Layout
- `index=nothing`: variables to construct index columns of the output; default falls back to `context.clock.time`.
- `target`: variables to construct non-index columns of the output.
## Evaluation
- `metric=nothing`: evaluation metric (`:rmse`, `:nrmse`, `:mae`, `:mape`, `:ef`, `:dr`); default is RMSE.
Remaining keyword arguments are passed down to `simulate` with regard to running system `S`.
See also: [`simulate`](@ref), [`calibrate`](@ref), [`@config`](@ref)
# Examples
```julia-repl
julia> @system S(Controller) begin
a => 19 ~ preserve(u"m/hr", parameter)
b(a) ~ accumulate(u"m")
end;
julia> obs = DataFrame(time=10u"hr", b=200u"m");
julia> configs = @config !(:S => :a => [19, 21]);
julia> evaluate(S, obs; configs, target=:b, stop=10u"hr")
10.0 m
```
"""
evaluate(S::Type{<:System}, obs; config=(), configs=[], kwargs...) = begin
if isempty(configs)
evaluate(S, obs, [config]; kwargs...)
elseif isempty(config)
evaluate(S, obs, configs; kwargs...)
else
@error "redundant configurations" config configs
end
end
evaluate(S::Type{<:System}, obs, configs; index=nothing, target, metric=nothing, kwargs...) = begin
#HACK: use copy due to normalize!
obs = copy(obs)
I = parseindex(index, S) |> keys |> collect
T = parsetarget(target, S) |> keys |> collect
n = length(T)
multi = n > 1
isnothing(metric) && (metric = :rmse)
metric = metricfunc(metric)
IC = [t for t in zip(getproperty.(Ref(obs), I)...)]
IV = parseindex(index, S) |> values |> Tuple
snap(s) = getproperty.(s, IV) .|> value in IC
NT = DataFrames.make_unique([propertynames(obs)..., T...], makeunique=true)
T1 = NT[end-n+1:end]
residual(c) = begin
est = simulate(S; config=c, index, target, snap, verbose=false, kwargs...)
isempty(est) && return repeat([Inf], n)
normalize!(est, obs, on=I)
df = DataFrames.innerjoin(est, obs, on=I, makeunique=true)
r = [(df[!, e], df[!, o]) for (e, o) in zip(T, T1)]
end
cost() = begin
l = length(configs)
R = Vector(undef, l)
Threads.@threads for i in 1:l
R[i] = residual(configs[i])
end
e = map(getindex.(R, i) for i in 1:n) do r
metric(vcat(first.(r)...), vcat(last.(r)...))
end
multi ? Tuple(e) : only(e)
end
cost()
end
"""
evaluate(obs, est; <keyword arguments>) -> Number | Tuple
Compare observation data `obs` and estimation data `est` with a choice of evaluation metric.
# Arguments
- `obs::DataFrame`: observation data to be used for evaluation.
- `est::DataFrame`: estimated data from simulation.
# Keyword Arguments
## Layout
- `index`: variables referring to index columns of the output.
- `target`: variables referring to non-index columns of the output.
## Evaluation
- `metric=nothing`: evaluation metric (`:rmse`, `:nrmse`, `:mae`, `:mape`, `:ef`, `:dr`); default is RMSE.
See also: [`evaluate`](@ref)
# Examples
```julia-repl
julia> obs = DataFrame(time = [1, 2, 3]u"hr", b = [10, 20, 30]u"g");
julia> est = DataFrame(time = [1, 2, 3]u"hr", b = [10, 20, 30]u"g", c = [11, 19, 31]u"g");
julia> evaluate(obs, est; index = :time, target = :b)
0.0 g
julia> evaluate(obs, est; index = :time, target = :b => :c)
1.0 g
```
"""
evaluate(obs::DataFrame, est::DataFrame; index=nothing, target, metric=nothing, kwargs...) = begin
S = nothing
#HACK: use copy due to normalize!
obs = copy(obs)
I = parseindex(index, S) |> collect
IO = parseindex(index, S) |> keys |> collect
TO = parsetarget(target, S) |> keys |> collect
TE = parsetarget(target, S) |> values |> collect
n = length(TO)
multi = n > 1
isnothing(metric) && (metric = :rmse)
metric = metricfunc(metric)
NT = DataFrames.make_unique([propertynames(obs)..., TE...], makeunique=true)
TE1 = NT[end-n+1:end]
residual() = begin
isempty(est) && return repeat([Inf], n)
normalize!(obs, est, on=I)
df = DataFrames.innerjoin(obs, est, on=I, makeunique=true)
df = df[!, [IO..., TO..., TE1...]]
DataFrames.dropmissing!(df)
r = [(df[!, e], df[!, o]) for (e, o) in zip(TO, TE1)]
end
cost() = begin
R = [residual()]
e = map(getindex.(R, i) for i in 1:n) do r
metric(vcat(first.(r)...), vcat(last.(r)...))
end
multi ? Tuple(e) : only(e)
end
cost()
end
export evaluate
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
|
[
"MIT"
] | 0.3.50 | 92f7c427254bee725db295408ed3af6d9e2072cd | code | 1067 | struct Gather{T,S,F}
types::T
store::S
callback::F
end
(g::Gather)(a...; k...) = g.callback(a...; k...)
Base.broadcastable(g::Gather) = Ref(g)
Base.push!(g::Gather, v...) = push!(g.store, v...)
Base.append!(g::Gather, v...) = append!(g.store, v...)
value(g::Gather) = g.store
Base.getindex(g::Gather) = value(g)
Base.adjoint(g::Gather) = value(g)
mixindispatch(s, g::Gather) = mixindispatch(s, g.types...)
gather!(s::System, SS::Type{<:System}...; store=[], callback=visit!, kwargs=()) = gather!(Gather(SS, store, callback), s; kwargs...)
gather!(g::Gather, v; kwargs...) = g(g, mixindispatch(v, g)...; kwargs...)
visit!(g::Gather, s::System, ::Val; k...) = (gather!.(g, value.(collect(s)); k...); g')
visit!(g::Gather, V::Vector{<:System}, ::Val; k...) = (gather!.(g, V; k...); g')
visit!(g::Gather, s, ::Val; k...) = g'
visit!(g::Gather, s; k...) = visit!(g, s, Val(nothing); k...)
gathersystem!(g::Gather, s::System, ::Val{:System}) = (push!(g, s); visit!(g, s))
gathersystem!(g::Gather, a...) = visit!(g, a...)
export Gather, gather!, visit!
| Cropbox | https://github.com/cropbox/Cropbox.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.